with ref image
Browse files- App/Generate/Schema.py +14 -2
- App/Generate/database/Model.py +14 -8
- App/Generate/generatorRoutes.py +8 -4
App/Generate/Schema.py
CHANGED
|
@@ -3,11 +3,23 @@ from pydantic import BaseModel, HttpUrl
|
|
| 3 |
from pydantic import validator
|
| 4 |
|
| 5 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 6 |
class GeneratorRequest(BaseModel):
|
| 7 |
prompt: str
|
| 8 |
-
grok: Optional[bool] = True
|
| 9 |
batch_size: int = 4
|
| 10 |
-
|
|
|
|
| 11 |
|
| 12 |
|
| 13 |
class GeneratorBulkRequest(BaseModel):
|
|
|
|
| 3 |
from pydantic import validator
|
| 4 |
|
| 5 |
|
| 6 |
+
class Scene(BaseModel):
|
| 7 |
+
narration: str
|
| 8 |
+
image_prompts: List[str]
|
| 9 |
+
voice: str
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
class Story(BaseModel):
|
| 13 |
+
scenes: List[Scene]
|
| 14 |
+
reference_image_url: Optional[str] = None
|
| 15 |
+
ip_adapter_weight: Optional[float] = 0.4 # Add this line
|
| 16 |
+
|
| 17 |
+
|
| 18 |
class GeneratorRequest(BaseModel):
|
| 19 |
prompt: str
|
|
|
|
| 20 |
batch_size: int = 4
|
| 21 |
+
grok: bool = False
|
| 22 |
+
model: str = "command"
|
| 23 |
|
| 24 |
|
| 25 |
class GeneratorBulkRequest(BaseModel):
|
App/Generate/database/Model.py
CHANGED
|
@@ -227,9 +227,13 @@ class Scene(orm.Model):
|
|
| 227 |
transcript = await self.tts._make_transcript(links=links, text=text)
|
| 228 |
return transform_alignment_data(data=transcript, offset=offset)
|
| 229 |
|
| 230 |
-
async def generate_scene_data(
|
|
|
|
|
|
|
| 231 |
# Run narrate() and generate_images() concurrently
|
| 232 |
-
await asyncio.gather(
|
|
|
|
|
|
|
| 233 |
self.calculate_durations()
|
| 234 |
|
| 235 |
async def narrate(self):
|
|
@@ -257,16 +261,18 @@ class Scene(orm.Model):
|
|
| 257 |
self.narration_duration = int(len(audio_file) / 1000)
|
| 258 |
self.image_duration = self.narration_duration / len(self.image_prompts)
|
| 259 |
|
| 260 |
-
async def generate_images(
|
|
|
|
|
|
|
|
|
|
|
|
|
| 261 |
self.images = []
|
| 262 |
async with aiohttp.ClientSession() as session:
|
| 263 |
image_generator = ModalImageGenerator(session)
|
| 264 |
for prompt in self.image_prompts:
|
| 265 |
-
# You may want to define a method to get a suitable reference image URL
|
| 266 |
-
reference_image_url = "https://image.lexica.art/full_webp/11caada0-c3c3-4b70-b7a5-d3172c07fc14"
|
| 267 |
try:
|
| 268 |
-
image_url = await image_generator.
|
| 269 |
-
prompt, reference_image_url
|
| 270 |
)
|
| 271 |
self.images.append(image_url)
|
| 272 |
except Exception as e:
|
|
@@ -309,7 +315,7 @@ class BackgroundMusic(orm.Model):
|
|
| 309 |
# pass
|
| 310 |
|
| 311 |
|
| 312 |
-
# #
|
| 313 |
# async def create_tables():
|
| 314 |
# datas = {
|
| 315 |
# "narration": "Welcome to a journey through some of history's strangest moments! Get ready to explore the bizarre, the unusual, and the downright weird.",
|
|
|
|
| 227 |
transcript = await self.tts._make_transcript(links=links, text=text)
|
| 228 |
return transform_alignment_data(data=transcript, offset=offset)
|
| 229 |
|
| 230 |
+
async def generate_scene_data(
|
| 231 |
+
self, reference_image_url: str = None, ip_adapter_weight: float = 0.4
|
| 232 |
+
):
|
| 233 |
# Run narrate() and generate_images() concurrently
|
| 234 |
+
await asyncio.gather(
|
| 235 |
+
self.narrate(), self.generate_images(reference_image_url, ip_adapter_weight)
|
| 236 |
+
)
|
| 237 |
self.calculate_durations()
|
| 238 |
|
| 239 |
async def narrate(self):
|
|
|
|
| 261 |
self.narration_duration = int(len(audio_file) / 1000)
|
| 262 |
self.image_duration = self.narration_duration / len(self.image_prompts)
|
| 263 |
|
| 264 |
+
async def generate_images(
|
| 265 |
+
self,
|
| 266 |
+
reference_image_url: str = "https://image.lexica.art/full_webp/d6ddd5c5-060c-4aba-b9d0-cf0e02dc65bd",
|
| 267 |
+
ip_adapter_weight: float = 0.4,
|
| 268 |
+
):
|
| 269 |
self.images = []
|
| 270 |
async with aiohttp.ClientSession() as session:
|
| 271 |
image_generator = ModalImageGenerator(session)
|
| 272 |
for prompt in self.image_prompts:
|
|
|
|
|
|
|
| 273 |
try:
|
| 274 |
+
image_url = await image_generator.generate_image(
|
| 275 |
+
prompt, reference_image_url, ip_adapter_weight
|
| 276 |
)
|
| 277 |
self.images.append(image_url)
|
| 278 |
except Exception as e:
|
|
|
|
| 315 |
# pass
|
| 316 |
|
| 317 |
|
| 318 |
+
# # Create the tables
|
| 319 |
# async def create_tables():
|
| 320 |
# datas = {
|
| 321 |
# "narration": "Welcome to a journey through some of history's strangest moments! Get ready to explore the bizarre, the unusual, and the downright weird.",
|
App/Generate/generatorRoutes.py
CHANGED
|
@@ -20,8 +20,8 @@ from App.Editor.editorRoutes import celery_task, EditorRequest
|
|
| 20 |
import uuid
|
| 21 |
|
| 22 |
|
| 23 |
-
async def update_scene(model_scene):
|
| 24 |
-
await model_scene.generate_scene_data()
|
| 25 |
await model_scene.update(**model_scene.__dict__)
|
| 26 |
|
| 27 |
|
|
@@ -53,7 +53,11 @@ async def generate_assets(generated_story: Story, batch_size=4, threeD=True):
|
|
| 53 |
await model_scene.update(**model_scene.__dict__)
|
| 54 |
all_scenes.append(model_scene)
|
| 55 |
batch_updates.append(
|
| 56 |
-
update_scene(
|
|
|
|
|
|
|
|
|
|
|
|
|
| 57 |
) # Append update coroutine to batch_updates
|
| 58 |
# pause per batch
|
| 59 |
await asyncio.gather(
|
|
@@ -127,7 +131,7 @@ async def generate_video(
|
|
| 127 |
|
| 128 |
@generator_router.post("/generate_video_from_json")
|
| 129 |
async def generate_video_from_json(jsonReq: Story, background_task: BackgroundTasks):
|
| 130 |
-
background_task.add_task(
|
| 131 |
return {"task_id": "started"}
|
| 132 |
|
| 133 |
|
|
|
|
| 20 |
import uuid
|
| 21 |
|
| 22 |
|
| 23 |
+
async def update_scene(model_scene, reference_image_url=None, ip_adapter_weight=0.4):
|
| 24 |
+
await model_scene.generate_scene_data(reference_image_url, ip_adapter_weight)
|
| 25 |
await model_scene.update(**model_scene.__dict__)
|
| 26 |
|
| 27 |
|
|
|
|
| 53 |
await model_scene.update(**model_scene.__dict__)
|
| 54 |
all_scenes.append(model_scene)
|
| 55 |
batch_updates.append(
|
| 56 |
+
update_scene(
|
| 57 |
+
model_scene,
|
| 58 |
+
generated_story.reference_image_url,
|
| 59 |
+
generated_story.ip_adapter_weight,
|
| 60 |
+
)
|
| 61 |
) # Append update coroutine to batch_updates
|
| 62 |
# pause per batch
|
| 63 |
await asyncio.gather(
|
|
|
|
| 131 |
|
| 132 |
@generator_router.post("/generate_video_from_json")
|
| 133 |
async def generate_video_from_json(jsonReq: Story, background_task: BackgroundTasks):
|
| 134 |
+
background_task.add_task(generate_assets, jsonReq)
|
| 135 |
return {"task_id": "started"}
|
| 136 |
|
| 137 |
|