|
|
import gradio as gr |
|
|
from diffusers import StableDiffusionPipeline |
|
|
import torch |
|
|
|
|
|
|
|
|
device = "cuda" if torch.cuda.is_available() else "cpu" |
|
|
|
|
|
pipe = StableDiffusionPipeline.from_pretrained( |
|
|
"runwayml/stable-diffusion-v1-5", |
|
|
torch_dtype=torch.float16 if device=="cuda" else torch.float32 |
|
|
) |
|
|
pipe = pipe.to(device) |
|
|
|
|
|
|
|
|
def generar(prompt): |
|
|
with torch.autocast(device): |
|
|
image = pipe(prompt, num_inference_steps=25).images[0] |
|
|
return image |
|
|
|
|
|
|
|
|
iface = gr.Interface( |
|
|
fn=generar, |
|
|
inputs=gr.Textbox(label="Escribe tu descripción", placeholder="Lo mejor de Blax / The Best of Blax"), |
|
|
outputs="image", |
|
|
title="Lo mejor de Blax / The Best of Blax", |
|
|
description="Genera imágenes con IA (Stable Diffusion v1.5) en ~30 segundos" |
|
|
) |
|
|
|
|
|
|
|
|
iface.launch() |