Spaces:
Running
Running
Upload 2 files
Browse files- app.py +7 -6
- requirements.txt +2 -0
app.py
CHANGED
|
@@ -7,6 +7,7 @@ from datetime import datetime
|
|
| 7 |
import time
|
| 8 |
import logging
|
| 9 |
import traceback # For better error reporting
|
|
|
|
| 10 |
|
| 11 |
#os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
|
| 12 |
#logger = logging.getLogger()
|
|
@@ -46,7 +47,7 @@ def load_fn(models):
|
|
| 46 |
for model in models:
|
| 47 |
if model not in models_load.keys():
|
| 48 |
try:
|
| 49 |
-
m = gr.load(f'models/{model}')
|
| 50 |
|
| 51 |
models_load.update({model: m})
|
| 52 |
models_load[model] = m # Store in dictionary
|
|
@@ -121,8 +122,8 @@ def gen_fn(model_str, prompt):
|
|
| 121 |
# Execute the model's processing with a timeout
|
| 122 |
#future = executor.submit(models_load[model_str], f"{prompt}")
|
| 123 |
#response = future.result(timeout=150) # Wait for result with timeout
|
| 124 |
-
response = models_load.get(model_str, lambda txt: None)(f'{prompt}')
|
| 125 |
-
if isinstance(response, gr.Image):
|
| 126 |
return response
|
| 127 |
elif isinstance(response, tuple):
|
| 128 |
return None
|
|
@@ -219,9 +220,9 @@ textarea{ position: absolute; font-size: 1em !important; padding: 4px;
|
|
| 219 |
|
| 220 |
|
| 221 |
demo.queue()
|
| 222 |
-
demo.queue = False
|
| 223 |
-
demo.config["queue"] = False
|
| 224 |
-
demo.launch(max_threads=40)
|
| 225 |
|
| 226 |
|
| 227 |
|
|
|
|
| 7 |
import time
|
| 8 |
import logging
|
| 9 |
import traceback # For better error reporting
|
| 10 |
+
from PIL import Image
|
| 11 |
|
| 12 |
#os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
|
| 13 |
#logger = logging.getLogger()
|
|
|
|
| 47 |
for model in models:
|
| 48 |
if model not in models_load.keys():
|
| 49 |
try:
|
| 50 |
+
m = gr.load(f'models/{model}', examples=None)
|
| 51 |
|
| 52 |
models_load.update({model: m})
|
| 53 |
models_load[model] = m # Store in dictionary
|
|
|
|
| 122 |
# Execute the model's processing with a timeout
|
| 123 |
#future = executor.submit(models_load[model_str], f"{prompt}")
|
| 124 |
#response = future.result(timeout=150) # Wait for result with timeout
|
| 125 |
+
response = models_load.get(model_str, lambda txt: None).fn(f'{prompt}')
|
| 126 |
+
if isinstance(response, gr.Image) or isinstance(response, Image.Image):
|
| 127 |
return response
|
| 128 |
elif isinstance(response, tuple):
|
| 129 |
return None
|
|
|
|
| 220 |
|
| 221 |
|
| 222 |
demo.queue()
|
| 223 |
+
#demo.queue = False
|
| 224 |
+
#demo.config["queue"] = False
|
| 225 |
+
demo.launch(max_threads=40, ssr_mode=False)
|
| 226 |
|
| 227 |
|
| 228 |
|
requirements.txt
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
pydantic==2.10.6
|
| 2 |
+
huggingface_hub==0.25.2
|