File size: 1,635 Bytes
82a49a3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
import gradio as gr
import torch
from diffusers import AuraFlowPipeline

# Initialize the AuraFlow pipeline
pipe = AuraFlowPipeline.from_pretrained("purplesmartai/pony-v7-base", torch_dtype=torch.float16)
pipe = pipe.to("cuda")

def generate_image(prompt, negative_prompt, height, width, num_inference_steps, guidance_scale, seed):
    """
    Function to generate an image using the AuraFlow pipeline.
    """
    generator = torch.Generator("cuda").manual_seed(seed)
    image = pipe(
        prompt=prompt,
        negative_prompt=negative_prompt,
        height=int(height),
        width=int(width),
        num_inference_steps=int(num_inference_steps),
        guidance_scale=guidance_scale,
        generator=generator
    ).images[0]
    return image

# Create the Gradio interface
iface = gr.Interface(
    fn=generate_image,
    inputs=[
        gr.Textbox(label="Prompt", value="A cat holding a sign that says hello world"),
        gr.Textbox(label="Negative Prompt", placeholder="Enter prompts to exclude"),
        gr.Slider(label="Height", minimum=256, maximum=2048, step=64, value=1024),
        gr.Slider(label="Width", minimum=256, maximum=2048, step=64, value=1024),
        gr.Slider(label="Number of Inference Steps", minimum=1, maximum=100, step=1, value=50),
        gr.Slider(label="Guidance Scale", minimum=1.0, maximum=20.0, step=0.1, value=5.0),
        gr.Number(label="Seed", value=42)
    ],
    outputs=gr.Image(label="Generated Image"),
    title="AuraFlow Text-to-Image Generation",
    description="Generate images from text prompts using the AuraFlow model."
)

# Launch the Gradio app
iface.launch()