Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -41,11 +41,23 @@ pipe.to("cuda")
|
|
| 41 |
pipe.scheduler = TCDScheduler.from_config(pipe.scheduler.config, timestep_spacing ="trailing")
|
| 42 |
|
| 43 |
with gr.Blocks() as demo:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 44 |
with gr.Column():
|
| 45 |
with gr.Row():
|
| 46 |
with gr.Column():
|
| 47 |
# scribble = gr.Image(source="canvas", tool="color-sketch", shape=(512, 512), height=768, width=768, type="pil")
|
| 48 |
-
scribble = gr.ImageEditor(type="pil", image_mode="L", crop_size=(512, 512), sources=(), brush=gr.Brush(color_mode="fixed", colors=["#
|
| 49 |
# scribble_out = gr.Image(height=384, width=384)
|
| 50 |
num_images = gr.Slider(label="Number of Images", minimum=1, maximum=8, step=1, value=4, interactive=True)
|
| 51 |
steps = gr.Slider(label="Inference Steps", minimum=1, maximum=8, step=1, value=1, interactive=True)
|
|
@@ -62,12 +74,12 @@ with gr.Blocks() as demo:
|
|
| 62 |
@spaces.GPU
|
| 63 |
def process_image(steps, prompt, controlnet_scale, eta, seed, scribble, num_images):
|
| 64 |
global pipe
|
| 65 |
-
if scribble:
|
| 66 |
with torch.inference_mode(), torch.autocast("cuda", dtype=torch.float16), timer("inference"):
|
| 67 |
result = pipe(
|
| 68 |
prompt=[prompt]*num_images,
|
| 69 |
-
|
| 70 |
-
image=[scribble['composite']]*num_images,
|
| 71 |
generator=torch.Generator().manual_seed(int(seed)),
|
| 72 |
num_inference_steps=steps,
|
| 73 |
guidance_scale=0.,
|
|
|
|
| 41 |
pipe.scheduler = TCDScheduler.from_config(pipe.scheduler.config, timestep_spacing ="trailing")
|
| 42 |
|
| 43 |
with gr.Blocks() as demo:
|
| 44 |
+
block.load(
|
| 45 |
+
None,
|
| 46 |
+
None,
|
| 47 |
+
_js="""
|
| 48 |
+
() => {
|
| 49 |
+
const params = new URLSearchParams(window.location.search);
|
| 50 |
+
if (!params.has('__theme')) {
|
| 51 |
+
params.set('__theme', 'dark');
|
| 52 |
+
window.location.search = params.toString();
|
| 53 |
+
}
|
| 54 |
+
}""",
|
| 55 |
+
)
|
| 56 |
with gr.Column():
|
| 57 |
with gr.Row():
|
| 58 |
with gr.Column():
|
| 59 |
# scribble = gr.Image(source="canvas", tool="color-sketch", shape=(512, 512), height=768, width=768, type="pil")
|
| 60 |
+
scribble = gr.ImageEditor(type="pil", image_mode="L", crop_size=(512, 512), sources=(), brush=gr.Brush(color_mode="fixed", colors=["#FFFFFF"]))
|
| 61 |
# scribble_out = gr.Image(height=384, width=384)
|
| 62 |
num_images = gr.Slider(label="Number of Images", minimum=1, maximum=8, step=1, value=4, interactive=True)
|
| 63 |
steps = gr.Slider(label="Inference Steps", minimum=1, maximum=8, step=1, value=1, interactive=True)
|
|
|
|
| 74 |
@spaces.GPU
|
| 75 |
def process_image(steps, prompt, controlnet_scale, eta, seed, scribble, num_images):
|
| 76 |
global pipe
|
| 77 |
+
if scribble:
|
| 78 |
with torch.inference_mode(), torch.autocast("cuda", dtype=torch.float16), timer("inference"):
|
| 79 |
result = pipe(
|
| 80 |
prompt=[prompt]*num_images,
|
| 81 |
+
image=[ImageOps.invert(scribble['composite'])]*num_images,
|
| 82 |
+
# image=[scribble['composite']]*num_images,
|
| 83 |
generator=torch.Generator().manual_seed(int(seed)),
|
| 84 |
num_inference_steps=steps,
|
| 85 |
guidance_scale=0.,
|