Spaces:
Running
on
Zero
Running
on
Zero
| import gradio as gr | |
| import spaces | |
| import torch | |
| from diffusers import AuraFlowPipeline, Lumina2Pipeline | |
| import random | |
| import numpy as np | |
| import warnings | |
| warnings.filterwarnings("ignore") | |
| # Load Model 1: Pony v7 | |
| pipe_pony = AuraFlowPipeline.from_pretrained("purplesmartai/pony-v7-base", torch_dtype=torch.float16) | |
| pipe_pony.to("cuda") | |
| # Load Model 2: NetaYume | |
| pipe_netayume = Lumina2Pipeline.from_pretrained( | |
| "duongve/NetaYume-Lumina-Image-2.0-Diffusers-v35-pretrained", | |
| torch_dtype=torch.bfloat16 | |
| ) | |
| pipe_netayume.to("cuda") | |
| def generate_image_pony(prompt, negative_prompt, height, width, num_inference_steps, guidance_scale, sigmas_factor, seed, progress=gr.Progress(track_tqdm=True)): | |
| if seed < 0: | |
| seed = random.randint(0, 2**32 - 1) | |
| generator = torch.Generator("cuda").manual_seed(int(seed)) | |
| pipeline_args = { | |
| "prompt": prompt, | |
| "negative_prompt": negative_prompt, | |
| "height": int(height), | |
| "width": int(width), | |
| "num_inference_steps": int(num_inference_steps), | |
| "guidance_scale": guidance_scale, | |
| "generator": generator, | |
| } | |
| if sigmas_factor != 1.0: | |
| steps = int(num_inference_steps) | |
| sigmas = np.linspace(1.0, 1 / steps, steps) | |
| sigmas = sigmas * sigmas_factor | |
| pipeline_args["sigmas"] = sigmas.tolist() | |
| image = pipe_pony(**pipeline_args).images[0] | |
| return image, seed | |
| def generate_image_netayume(prompt, negative_prompt, system_prompt, height, width, guidance_scale, num_inference_steps, cfg_trunc_ratio, cfg_normalization, seed, sigmas_factor, progress=gr.Progress(track_tqdm=True)): | |
| if seed < 0: | |
| seed = random.randint(0, 2**32 - 1) | |
| generator = torch.Generator("cuda").manual_seed(int(seed)) | |
| pipeline_args = { | |
| "prompt": prompt, | |
| "negative_prompt": negative_prompt if negative_prompt and negative_prompt.strip() else None, | |
| "system_prompt": system_prompt, | |
| "height": int(height), | |
| "width": int(width), | |
| "guidance_scale": guidance_scale, | |
| "num_inference_steps": int(num_inference_steps), | |
| "cfg_trunc_ratio": cfg_trunc_ratio, | |
| "cfg_normalization": cfg_normalization, | |
| "generator": generator, | |
| } | |
| if sigmas_factor != 1.0: | |
| steps = int(num_inference_steps) | |
| sigmas = np.linspace(1.0, 1 / steps, steps) | |
| sigmas = sigmas * sigmas_factor | |
| pipeline_args["sigmas"] = sigmas.tolist() | |
| image = pipe_netayume(**pipeline_args).images[0] | |
| return image, seed | |
| with gr.Blocks(theme=gr.themes.Soft(), title="Image Generation Playground") as demo: | |
| gr.Markdown("# Image Generation Playground") | |
| with gr.Tabs(): | |
| with gr.Tab(label="Pony v7"): | |
| gr.Markdown("## ✨ Pony v7 AuraFlow") | |
| gr.Markdown("Generate images from text prompts using the AuraFlow model.") | |
| with gr.Row(variant="panel"): | |
| with gr.Column(scale=2): | |
| prompt_pony = gr.Textbox(label="Prompt", value="Score_9, ", lines=3) | |
| neg_prompt_pony = gr.Textbox( | |
| label="Negative Prompt", | |
| value="score_6, score_5, score_4, worst quality, low quality, text, deformed, bad hand, blurry, (watermark), extra hands, long ears, ugly, deformed joints, deformed hands, empty background, big ears, narrow face, glowing eyes,", | |
| lines=3 | |
| ) | |
| with gr.Row(): | |
| height_pony = gr.Slider(label="Height", minimum=512, maximum=1536, step=64, value=1024) | |
| width_pony = gr.Slider(label="Width", minimum=512, maximum=1536, step=64, value=1024) | |
| with gr.Row(): | |
| steps_pony = gr.Slider(label="Inference Steps", minimum=1, maximum=100, step=1, value=30) | |
| cfg_pony = gr.Slider(label="Guidance Scale", minimum=1.0, maximum=20.0, step=0.1, value=3.5) | |
| with gr.Row(): | |
| sigmas_pony = gr.Slider(label="Sigmas Factor", minimum=0.95, maximum=1.05, step=0.01, value=.99) | |
| seed_pony = gr.Number(label="Seed (-1 for random)", value=-1, precision=0) | |
| generate_btn_pony = gr.Button("Generate", variant="primary") | |
| with gr.Column(scale=1): | |
| image_output_pony = gr.Image(label="Generated Image", format="png", interactive=False) | |
| used_seed_pony = gr.Number(label="Used Seed", interactive=False) | |
| with gr.Tab(label="NetaYume v3.5"): | |
| gr.Markdown("## 🌌 NetaYume v3.5 Lumina") | |
| gr.Markdown("Generate images from text prompts using the Lumina 2 model with a focus on anime aesthetics.") | |
| with gr.Row(variant="panel"): | |
| with gr.Column(scale=2): | |
| prompt_neta = gr.Textbox( | |
| label="Prompt", | |
| value="kita ikuyo (Bocchi the Rock!), 1girl, anime style, vibrant colors, red hair, medium hair with one side up, green eyes, bangs, hair between eyes, school uniform (white shirt, grey serafuku sailor collar, red neckerchief, pleated skirt), sitting upper body close-up, holding bouquet with white lily & pink flowers, indoors with depth of field, cherry blossom-like light particles, soft sunlight backlighting, bloom, chromatic aberration & lens flare abuse, light smile, closed mouth, one side hair up, transparent blurry foreground, warm cozy atmosphere, masterpiece, best quality", | |
| lines=5 | |
| ) | |
| neg_prompt_neta = gr.Textbox(label="Negative Prompt", placeholder="Enter concepts to avoid...", lines=2) | |
| system_prompt_neta = gr.Dropdown( | |
| label="System Prompt", | |
| choices=[ | |
| "You are an advanced assistant designed to generate high-quality images from user prompts, utilizing danbooru tags to accurately guide the image creation process.", | |
| "You are an assistant designed to generate high-quality images based on user prompts and danbooru tags.", | |
| "You are an assistant designed to generate superior images with the superior degree of image-text alignment based on textual prompts or user prompts.", | |
| "You are an assistant designed to generate high-quality images with the highest degree of image-text alignment based on textual prompts." | |
| ], | |
| value="You are an advanced assistant designed to generate high-quality images from user prompts, utilizing danbooru tags to accurately guide the image creation process." | |
| ) | |
| with gr.Row(): | |
| height_neta = gr.Slider(label="Height", minimum=512, maximum=2048, step=64, value=1536) | |
| width_neta = gr.Slider(label="Width", minimum=512, maximum=2048, step=64, value=1024) | |
| with gr.Row(): | |
| cfg_neta = gr.Slider(label="Guidance Scale (CFG)", minimum=1.0, maximum=10.0, step=0.1, value=4.0) | |
| steps_neta = gr.Slider(label="Sampling Steps", minimum=10, maximum=100, step=1, value=50) | |
| with gr.Row(): | |
| cfg_trunc_neta = gr.Slider(label="CFG Truncation Ratio", minimum=0.0, maximum=10.0, step=0.1, value=6.0) | |
| sigmas_neta = gr.Slider(label="Sigmas Factor", minimum=0.9, maximum=1.1, step=0.01, value=1.0) | |
| with gr.Row(): | |
| cfg_norm_neta = gr.Checkbox(label="CFG Normalization", value=False) | |
| seed_neta = gr.Number(label="Seed (-1 for random)", value=-1, precision=0) | |
| generate_btn_neta = gr.Button("Generate", variant="primary") | |
| with gr.Column(scale=1): | |
| image_output_neta = gr.Image(label="Generated Image", format="png", interactive=False) | |
| used_seed_neta = gr.Number(label="Used Seed", interactive=False) | |
| generate_btn_pony.click( | |
| fn=generate_image_pony, | |
| inputs=[prompt_pony, neg_prompt_pony, height_pony, width_pony, steps_pony, cfg_pony, sigmas_pony, seed_pony], | |
| outputs=[image_output_pony, used_seed_pony] | |
| ) | |
| generate_btn_neta.click( | |
| fn=generate_image_netayume, | |
| inputs=[prompt_neta, neg_prompt_neta, system_prompt_neta, height_neta, width_neta, cfg_neta, steps_neta, cfg_trunc_neta, cfg_norm_neta, seed_neta, sigmas_neta], | |
| outputs=[image_output_neta, used_seed_neta] | |
| ) | |
| demo.launch() |