import spaces import gradio as gr import torch import os import traceback from diffusers import ZImagePipeline from huggingface_hub import list_repo_files from PIL import Image from huggingface_hub import HfApi import tempfile, shutil, uuid from datetime import datetime HF_MODEL = os.environ.get("HF_UPLOAD_REPO", "rahul7star/Zimg-Feb2026") def upload_image_and_prompt_cpu(input_image, prompt_text) -> str: api = HfApi() today_str = datetime.now().strftime("%Y-%m-%d") unique_subfolder = f"Upload-Image-{uuid.uuid4().hex[:8]}" hf_folder = f"{today_str}/{unique_subfolder}" # ---- save temp image ---- with tempfile.NamedTemporaryFile(suffix=".png", delete=False) as tmp_img: input_image.save(tmp_img.name, format="PNG") tmp_img_path = tmp_img.name api.upload_file( path_or_fileobj=tmp_img_path, path_in_repo=f"{hf_folder}/final_image.png", repo_id=HF_MODEL, repo_type="model", token=os.environ.get("HUGGINGFACE_HUB_TOKEN"), ) # ---- save prompt ---- summary_file = tempfile.NamedTemporaryFile(delete=False, suffix=".txt").name with open(summary_file, "w", encoding="utf-8") as f: f.write(prompt_text) api.upload_file( path_or_fileobj=summary_file, path_in_repo=f"{hf_folder}/summary.txt", repo_id=HF_MODEL, repo_type="model", token=os.environ.get("HUGGINGFACE_HUB_TOKEN"), ) os.remove(tmp_img_path) os.remove(summary_file) return hf_folder # ============================================================ # CONFIG # ============================================================ MODEL_ID = "Tongyi-MAI/Z-Image-Turbo" DEFAULT_LORA_REPO = "rahul7star/ZImageLora" DTYPE = torch.bfloat16 DEVICE = "cuda" if torch.cuda.is_available() else "cpu" # ============================================================ # GLOBAL STATE # ============================================================ pipe = None CURRENT_LORA_REPO = None CURRENT_LORA_FILE = None # ============================================================ # LOGGING # ============================================================ def log(msg): print(msg) return msg # ============================================================ # PIPELINE BUILD (ONCE) # ============================================================ try: pipe = ZImagePipeline.from_pretrained( MODEL_ID, torch_dtype=DTYPE, ) pipe.to(DEVICE) log("β Pipeline built successfully") except Exception as e: log("β Pipeline build failed") log(traceback.format_exc()) pipe = None # ============================================================ # HELPERS # ============================================================ def list_loras_from_repo(repo_id: str): try: files = list_repo_files(repo_id) return [f for f in files if f.endswith(".safetensors")] except Exception as e: log(f"β Failed to list LoRAs: {e}") return [] # ============================================================ # IMAGE GENERATION (SAFE LORA LOGIC) # ============================================================ @spaces.GPU() def generate_image(prompt, height, width, steps, seed, guidance_scale): LOGS = [] print(prompt) if pipe is None: return None, [], "β Pipeline not initialized" generator = torch.Generator().manual_seed(int(seed)) placeholder = Image.new("RGB", (width, height), (255, 255, 255)) previews = [] # ---- Always start clean ---- try: pipe.unload_lora_weights() except Exception: pass # ---- Load LoRA for this run only ---- if CURRENT_LORA_FILE: try: pipe.load_lora_weights( CURRENT_LORA_REPO, weight_name=CURRENT_LORA_FILE ) LOGS.append(f"π§© LoRA loaded: {CURRENT_LORA_FILE}") except Exception as e: LOGS.append(f"β LoRA load failed: {e}") # ---- Preview steps (lightweight) ---- try: num_previews = min(5, steps) for i in range(num_previews): out = pipe( prompt=prompt, height=height // 4, width=width // 4, num_inference_steps=i + 1, guidance_scale=guidance_scale, generator=generator, ) img = out.images[0].resize((width, height)) previews.append(img) yield None, previews, "\n".join(LOGS) except Exception as e: LOGS.append(f"β οΈ Preview failed: {e}") # ---- Final image ---- try: out = pipe( prompt=prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=guidance_scale, generator=generator, ) final_img = out.images[0] previews.append(final_img) LOGS.append("β Image generated") # ============================================ # HF UPLOAD (CPU SAFE) # ============================================ try: folder = upload_image_and_prompt_cpu(final_img, prompt) LOGS.append(f"βοΈ Uab") except Exception as upload_error: LOGS.append(f"β οΈ Uabc error") yield final_img, previews, "\n".join(LOGS) except Exception as e: LOGS.append(f"β Generation failed: {e}") yield placeholder, previews, "\n".join(LOGS) finally: # ---- CRITICAL: unload after run ---- try: pipe.unload_lora_weights() LOGS.append("π§Ή LoRA unloaded") except Exception: pass # ============================================================ # GRADIO UI # ============================================================ css = """ .gradio-container { max-width: 100% !important; padding: 16px 32px !important; } .section { margin-bottom: 12px; } .generate-btn { background: linear-gradient(90deg, #4b6cb7, #182848) !important; color: white !important; font-weight: 600; height: 46px; border-radius: 10px; } .secondary-btn { height: 42px; border-radius: 10px; } textarea, input { border-radius: 10px !important; } """ with gr.Blocks( title="Z-Image-Turbo (Runtime LoRA)", css=css, ) as demo: gr.Markdown( """
Get support β’ Share results β’ Discuss LoRAs β’ Report bugs
π Join Discord