AhmadFiaz commited on
Commit
99d985e
·
verified ·
1 Parent(s): 3d1dc91

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +66 -18
app.py CHANGED
@@ -26,16 +26,18 @@ else:
26
  power_device = "CPU"
27
  device = "cpu"
28
 
29
- huggingface_token = os.getenv("HUGGINGFACE_TOKEN") # fixed typo
 
30
 
31
  model_path = snapshot_download(
32
  repo_id="black-forest-labs/FLUX.1-dev",
33
  repo_type="model",
34
  ignore_patterns=["*.md", "*..gitattributes"],
35
  local_dir="FLUX.1-dev",
36
- token=huggingface_token,
37
  )
38
 
 
39
  # Load pipeline
40
  controlnet = FluxControlNetModel.from_pretrained(
41
  "jasperai/Flux.1-dev-Controlnet-Upscaler", torch_dtype=torch.bfloat16
@@ -48,6 +50,7 @@ pipe.to(device)
48
  MAX_SEED = 1000000
49
  MAX_PIXEL_BUDGET = 1024 * 1024
50
 
 
51
  def process_input(input_image, upscale_factor, **kwargs):
52
  w, h = input_image.size
53
  w_original, h_original = w, h
@@ -56,6 +59,12 @@ def process_input(input_image, upscale_factor, **kwargs):
56
  was_resized = False
57
 
58
  if w * h * upscale_factor**2 > MAX_PIXEL_BUDGET:
 
 
 
 
 
 
59
  input_image = input_image.resize(
60
  (
61
  int(aspect_ratio * MAX_PIXEL_BUDGET**0.5 // upscale_factor),
@@ -64,6 +73,7 @@ def process_input(input_image, upscale_factor, **kwargs):
64
  )
65
  was_resized = True
66
 
 
67
  w, h = input_image.size
68
  w = w - w % 8
69
  h = h - h % 8
@@ -71,7 +81,7 @@ def process_input(input_image, upscale_factor, **kwargs):
71
  return input_image.resize((w, h)), w_original, h_original, was_resized
72
 
73
 
74
- @spaces.GPU # leave decorator if using Spaces GPU
75
  def infer(
76
  seed,
77
  randomize_seed,
@@ -88,10 +98,13 @@ def infer(
88
  input_image, upscale_factor
89
  )
90
 
 
91
  w, h = input_image.size
92
  control_image = input_image.resize((w * upscale_factor, h * upscale_factor))
 
93
  generator = torch.Generator().manual_seed(seed)
94
 
 
95
  image = pipe(
96
  prompt="",
97
  control_image=control_image,
@@ -104,18 +117,25 @@ def infer(
104
  ).images[0]
105
 
106
  if was_resized:
107
- image = image.resize((w_original * upscale_factor, h_original * upscale_factor))
 
 
108
 
 
 
109
  image.save("output.jpg")
 
110
  return [true_input_image, image, seed]
111
 
112
 
113
  with gr.Blocks(css=css) as demo:
 
114
  gr.Markdown(
115
  f"""
116
  # ⚡ Flux.1-dev Upscaler ControlNet ⚡
117
  This is an interactive demo of [Flux.1-dev Upscaler ControlNet](https://huggingface.co/jasperai/Flux.1-dev-Controlnet-Upscaler) taking as input a low resolution image to generate a high resolution image.
118
  Currently running on {power_device}.
 
119
  """
120
  )
121
 
@@ -162,8 +182,12 @@ with gr.Blocks(css=css) as demo:
162
 
163
  examples = gr.Examples(
164
  examples=[
 
165
  [42, False, "examples/image_2.jpg", 28, 4, 0.6],
 
166
  [42, False, "examples/image_4.jpg", 28, 4, 0.6],
 
 
167
  ],
168
  inputs=[
169
  seed,
@@ -178,19 +202,43 @@ with gr.Blocks(css=css) as demo:
178
  cache_examples="lazy",
179
  )
180
 
181
- gr.on(
182
- [run_button.click],
183
- fn=infer,
184
- inputs=[
185
- seed,
186
- randomize_seed,
187
- input_im,
188
- num_inference_steps,
189
- upscale_factor,
190
- controlnet_conditioning_scale,
191
- ],
192
- outputs=result,
193
- show_api=True, # allow API usage
 
 
 
 
 
 
 
 
 
 
194
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
195
 
196
- demo.queue().launch(share=False, show_api=True) # API enabled
 
26
  power_device = "CPU"
27
  device = "cpu"
28
 
29
+
30
+ huggingface_token = os.getenv("HUGGINFACE_TOKEN")
31
 
32
  model_path = snapshot_download(
33
  repo_id="black-forest-labs/FLUX.1-dev",
34
  repo_type="model",
35
  ignore_patterns=["*.md", "*..gitattributes"],
36
  local_dir="FLUX.1-dev",
37
+ token=huggingface_token, # type a new token-id.
38
  )
39
 
40
+
41
  # Load pipeline
42
  controlnet = FluxControlNetModel.from_pretrained(
43
  "jasperai/Flux.1-dev-Controlnet-Upscaler", torch_dtype=torch.bfloat16
 
50
  MAX_SEED = 1000000
51
  MAX_PIXEL_BUDGET = 1024 * 1024
52
 
53
+
54
  def process_input(input_image, upscale_factor, **kwargs):
55
  w, h = input_image.size
56
  w_original, h_original = w, h
 
59
  was_resized = False
60
 
61
  if w * h * upscale_factor**2 > MAX_PIXEL_BUDGET:
62
+ warnings.warn(
63
+ f"Requested output image is too large ({w * upscale_factor}x{h * upscale_factor}). Resizing to ({int(aspect_ratio * MAX_PIXEL_BUDGET ** 0.5 // upscale_factor), int(MAX_PIXEL_BUDGET ** 0.5 // aspect_ratio // upscale_factor)}) pixels."
64
+ )
65
+ gr.Info(
66
+ f"Requested output image is too large ({w * upscale_factor}x{h * upscale_factor}). Resizing input to ({int(aspect_ratio * MAX_PIXEL_BUDGET ** 0.5 // upscale_factor), int(MAX_PIXEL_BUDGET ** 0.5 // aspect_ratio // upscale_factor)}) pixels budget."
67
+ )
68
  input_image = input_image.resize(
69
  (
70
  int(aspect_ratio * MAX_PIXEL_BUDGET**0.5 // upscale_factor),
 
73
  )
74
  was_resized = True
75
 
76
+ # resize to multiple of 8
77
  w, h = input_image.size
78
  w = w - w % 8
79
  h = h - h % 8
 
81
  return input_image.resize((w, h)), w_original, h_original, was_resized
82
 
83
 
84
+ @spaces.GPU#(duration=42)
85
  def infer(
86
  seed,
87
  randomize_seed,
 
98
  input_image, upscale_factor
99
  )
100
 
101
+ # rescale with upscale factor
102
  w, h = input_image.size
103
  control_image = input_image.resize((w * upscale_factor, h * upscale_factor))
104
+
105
  generator = torch.Generator().manual_seed(seed)
106
 
107
+ gr.Info("Upscaling image...")
108
  image = pipe(
109
  prompt="",
110
  control_image=control_image,
 
117
  ).images[0]
118
 
119
  if was_resized:
120
+ gr.Info(
121
+ f"Resizing output image to targeted {w_original * upscale_factor}x{h_original * upscale_factor} size."
122
+ )
123
 
124
+ # resize to target desired size
125
+ image = image.resize((w_original * upscale_factor, h_original * upscale_factor))
126
  image.save("output.jpg")
127
+ # convert to numpy
128
  return [true_input_image, image, seed]
129
 
130
 
131
  with gr.Blocks(css=css) as demo:
132
+ # with gr.Column(elem_id="col-container"):
133
  gr.Markdown(
134
  f"""
135
  # ⚡ Flux.1-dev Upscaler ControlNet ⚡
136
  This is an interactive demo of [Flux.1-dev Upscaler ControlNet](https://huggingface.co/jasperai/Flux.1-dev-Controlnet-Upscaler) taking as input a low resolution image to generate a high resolution image.
137
  Currently running on {power_device}.
138
+ *Note*: Even though the model can handle higher resolution images, due to GPU memory constraints, this demo was limited to a generated output not exceeding a pixel budget of 1024x1024. If the requested size exceeds that limit, the input will be first resized keeping the aspect ratio such that the output of the controlNet model does not exceed the allocated pixel budget. The output is then resized to the targeted shape using a simple resizing. This may explain some artifacts for high resolution input. To adress this, run the demo locally or consider implementing a tiling strategy. Happy upscaling! 🚀
139
  """
140
  )
141
 
 
182
 
183
  examples = gr.Examples(
184
  examples=[
185
+ # [42, False, "examples/image_1.jpg", 28, 4, 0.6],
186
  [42, False, "examples/image_2.jpg", 28, 4, 0.6],
187
+ # [42, False, "examples/image_3.jpg", 28, 4, 0.6],
188
  [42, False, "examples/image_4.jpg", 28, 4, 0.6],
189
+ # [42, False, "examples/image_5.jpg", 28, 4, 0.6],
190
+ # [42, False, "examples/image_6.jpg", 28, 4, 0.6],
191
  ],
192
  inputs=[
193
  seed,
 
202
  cache_examples="lazy",
203
  )
204
 
205
+ # examples = gr.Examples(
206
+ # examples=[
207
+ # #[42, False, "examples/image_1.jpg", 28, 4, 0.6],
208
+ # [42, False, "examples/image_2.jpg", 28, 4, 0.6],
209
+ # #[42, False, "examples/image_3.jpg", 28, 4, 0.6],
210
+ # #[42, False, "examples/image_4.jpg", 28, 4, 0.6],
211
+ # [42, False, "examples/image_5.jpg", 28, 4, 0.6],
212
+ # [42, False, "examples/image_6.jpg", 28, 4, 0.6],
213
+ # [42, False, "examples/image_7.jpg", 28, 4, 0.6],
214
+ # ],
215
+ # inputs=[
216
+ # seed,
217
+ # randomize_seed,
218
+ # input_im,
219
+ # num_inference_steps,
220
+ # upscale_factor,
221
+ # controlnet_conditioning_scale,
222
+ # ],
223
+ # )
224
+
225
+ gr.Markdown("**Disclaimer:**")
226
+ gr.Markdown(
227
+ "This demo is only for research purpose. Jasper cannot be held responsible for the generation of NSFW (Not Safe For Work) content through the use of this demo. Users are solely responsible for any content they create, and it is their obligation to ensure that it adheres to appropriate and ethical standards. Jasper provides the tools, but the responsibility for their use lies with the individual user."
228
  )
229
+ gr.on(
230
+ [run_button.click],
231
+ fn=infer,
232
+ inputs=[
233
+ seed,
234
+ randomize_seed,
235
+ input_im,
236
+ num_inference_steps,
237
+ upscale_factor,
238
+ controlnet_conditioning_scale,
239
+ ],
240
+ outputs=result,
241
+ show_api=True, # changed from False to True
242
+ )
243
 
244
+ demo.queue().launch(share=False, show_api=True)