Spaces:
Running
on
Zero
Running
on
Zero
Commit
·
b656b16
1
Parent(s):
0ddac36
boosting chances for new model
Browse files- model/model_manager.py +16 -6
- model/models/__init__.py +2 -1
model/model_manager.py
CHANGED
|
@@ -5,7 +5,7 @@ import requests
|
|
| 5 |
import io, base64, json, os
|
| 6 |
import spaces
|
| 7 |
from PIL import Image
|
| 8 |
-
from .models import IMAGE_GENERATION_MODELS, IMAGE_EDITION_MODELS, VIDEO_GENERATION_MODELS, MUSEUM_UNSUPPORTED_MODELS, DESIRED_APPEAR_MODEL, load_pipeline
|
| 9 |
from .fetch_museum_results import draw_from_imagen_museum, draw2_from_imagen_museum, draw_from_videogen_museum, draw2_from_videogen_museum
|
| 10 |
from .pre_download import pre_download_all_models, pre_download_image_models_gen, pre_download_image_models_edit, pre_download_video_models_gen
|
| 11 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
|
@@ -327,12 +327,22 @@ class ModelManager:
|
|
| 327 |
def generate_video_vg_museum_parallel_anony(self, model_A, model_B):
|
| 328 |
# Using list comprehension to get the difference between two lists
|
| 329 |
picking_list = [item for item in self.model_vg_list if item not in self.excluding_model_list]
|
| 330 |
-
|
| 331 |
if model_A == "" and model_B == "":
|
| 332 |
-
|
| 333 |
-
|
| 334 |
-
|
| 335 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 336 |
else:
|
| 337 |
model_names = [model_A, model_B]
|
| 338 |
|
|
|
|
| 5 |
import io, base64, json, os
|
| 6 |
import spaces
|
| 7 |
from PIL import Image
|
| 8 |
+
from .models import IMAGE_GENERATION_MODELS, IMAGE_EDITION_MODELS, VIDEO_GENERATION_MODELS, MUSEUM_UNSUPPORTED_MODELS, DESIRED_APPEAR_MODEL, DESIRED_APPEAR_MODEL_CHANCE, load_pipeline
|
| 9 |
from .fetch_museum_results import draw_from_imagen_museum, draw2_from_imagen_museum, draw_from_videogen_museum, draw2_from_videogen_museum
|
| 10 |
from .pre_download import pre_download_all_models, pre_download_image_models_gen, pre_download_image_models_edit, pre_download_video_models_gen
|
| 11 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
|
|
|
| 327 |
def generate_video_vg_museum_parallel_anony(self, model_A, model_B):
|
| 328 |
# Using list comprehension to get the difference between two lists
|
| 329 |
picking_list = [item for item in self.model_vg_list if item not in self.excluding_model_list]
|
| 330 |
+
|
| 331 |
if model_A == "" and model_B == "":
|
| 332 |
+
# Filter desired_model_list to only include models that exist in picking_list
|
| 333 |
+
valid_desired_models = [m for m in self.desired_model_list if m in picking_list]
|
| 334 |
+
|
| 335 |
+
# 50% (or DESIRED_APPEAR_MODEL_CHANCE) chance to include exactly one model from valid desired_model_list
|
| 336 |
+
if valid_desired_models and random.random() < DESIRED_APPEAR_MODEL_CHANCE:
|
| 337 |
+
# Pick one model from valid desired list
|
| 338 |
+
desired_model = random.choice(valid_desired_models)
|
| 339 |
+
# Pick one model from regular list, excluding desired models
|
| 340 |
+
regular_model = random.choice([m for m in picking_list if m not in valid_desired_models])
|
| 341 |
+
# Randomly determine order
|
| 342 |
+
model_names = [desired_model, regular_model] if random.random() < 0.5 else [regular_model, desired_model]
|
| 343 |
+
else:
|
| 344 |
+
# Pick two models from the regular picking list
|
| 345 |
+
model_names = random.sample([model for model in picking_list], 2)
|
| 346 |
else:
|
| 347 |
model_names = [model_A, model_B]
|
| 348 |
|
model/models/__init__.py
CHANGED
|
@@ -37,7 +37,8 @@ MAP_NAMES_IMAGENHUB = {}
|
|
| 37 |
MAP_NAMES_VIDEOGENHUB = {"CogVideoX-2B": "CogVideoX", "CogVideoX-5B": "CogVideoX5B"}
|
| 38 |
|
| 39 |
MUSEUM_UNSUPPORTED_MODELS = []
|
| 40 |
-
DESIRED_APPEAR_MODEL = ['
|
|
|
|
| 41 |
|
| 42 |
ALL_MODELS = IMAGE_GENERATION_MODELS + IMAGE_EDITION_MODELS + VIDEO_GENERATION_MODELS
|
| 43 |
|
|
|
|
| 37 |
MAP_NAMES_VIDEOGENHUB = {"CogVideoX-2B": "CogVideoX", "CogVideoX-5B": "CogVideoX5B"}
|
| 38 |
|
| 39 |
MUSEUM_UNSUPPORTED_MODELS = []
|
| 40 |
+
DESIRED_APPEAR_MODEL = ['videogenhub_Mochi1_text2video']
|
| 41 |
+
DESIRED_APPEAR_MODEL_CHANCE = 0.5
|
| 42 |
|
| 43 |
ALL_MODELS = IMAGE_GENERATION_MODELS + IMAGE_EDITION_MODELS + VIDEO_GENERATION_MODELS
|
| 44 |
|