akhaliq HF Staff commited on
Commit
62d4fd1
·
verified ·
1 Parent(s): bcd06e6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +14 -336
app.py CHANGED
@@ -1,351 +1,29 @@
1
  import gradio as gr
2
- import os
3
- from huggingface_hub import InferenceClient
4
- import tempfile
5
- import shutil
6
- from pathlib import Path
7
 
8
- # Initialize the client
9
- client = InferenceClient(
10
- provider="fal-ai",
11
- api_key=os.environ.get("HF_TOKEN"),
12
- bill_to="huggingface",
13
- )
14
-
15
- def text_to_video(prompt, duration=5, aspect_ratio="16:9", resolution="720p", profile: gr.OAuthProfile | None = None):
16
- """Generate video from text prompt"""
17
- try:
18
- if profile is None:
19
- return None, "❌ Click Sign in with Hugging Face button to use this app for free"
20
-
21
- if not prompt or prompt.strip() == "":
22
- return None, "Please enter a text prompt"
23
-
24
- # Generate video from text
25
- video = client.text_to_video(
26
- prompt,
27
- model="akhaliq/veo3.1-fast",
28
- )
29
-
30
- # Save the video to a temporary file
31
- with tempfile.NamedTemporaryFile(delete=False, suffix=".mp4") as tmp_file:
32
- tmp_file.write(video)
33
- video_path = tmp_file.name
34
-
35
- return video_path, f"✅ Video generated successfully from prompt: '{prompt[:50]}...'"
36
-
37
- except Exception as e:
38
- return None, f"❌ Error generating video: {str(e)}"
39
-
40
- def image_to_video(image, prompt, duration=5, aspect_ratio="16:9", resolution="720p", profile: gr.OAuthProfile | None = None):
41
- """Generate video from image and prompt"""
42
- try:
43
- if profile is None:
44
- return None, "❌ Click Sign in with Hugging Face button to use this app for free"
45
-
46
- if image is None:
47
- return None, "Please upload an image"
48
-
49
- if not prompt or prompt.strip() == "":
50
- return None, "Please enter a prompt describing the motion"
51
-
52
- # Read the image file
53
- if isinstance(image, str):
54
- # If image is a file path
55
- with open(image, "rb") as image_file:
56
- input_image = image_file.read()
57
- else:
58
- # If image is already bytes or similar
59
- import io
60
- from PIL import Image as PILImage
61
-
62
- # Convert to bytes if necessary
63
- if isinstance(image, PILImage.Image):
64
- buffer = io.BytesIO()
65
- image.save(buffer, format='PNG')
66
- input_image = buffer.getvalue()
67
- else:
68
- # Assume it's a numpy array or similar
69
- pil_image = PILImage.fromarray(image)
70
- buffer = io.BytesIO()
71
- pil_image.save(buffer, format='PNG')
72
- input_image = buffer.getvalue()
73
-
74
- # Generate video from image
75
- video = client.image_to_video(
76
- input_image,
77
- prompt=prompt,
78
- model="akhaliq/veo3.1-fast-image-to-video",
79
- )
80
-
81
- # Save the video to a temporary file
82
- with tempfile.NamedTemporaryFile(delete=False, suffix=".mp4") as tmp_file:
83
- tmp_file.write(video)
84
- video_path = tmp_file.name
85
-
86
- return video_path, f"✅ Video generated successfully with motion: '{prompt[:50]}...'"
87
-
88
- except Exception as e:
89
- return None, f"❌ Error generating video: {str(e)}"
90
-
91
- def clear_text_tab():
92
- """Clear text-to-video tab"""
93
- return "", None, ""
94
-
95
- def clear_image_tab():
96
- """Clear image-to-video tab"""
97
- return None, "", None, ""
98
-
99
- # Custom CSS for better styling
100
  custom_css = """
101
- .container {
102
- max-width: 1200px;
103
- margin: auto;
104
- }
105
- .header-link {
106
- text-decoration: none;
107
- color: #2196F3;
108
- font-weight: bold;
109
- }
110
- .header-link:hover {
111
- text-decoration: underline;
112
- }
113
- .status-box {
114
- padding: 10px;
115
- border-radius: 5px;
116
- margin-top: 10px;
117
  }
118
- .auth-warning {
119
- color: #ff6b00;
120
- font-weight: bold;
121
  text-align: center;
122
- margin: 1em 0;
123
- padding: 1em;
124
- background-color: #fff3e0;
125
- border-radius: 5px;
126
- }
127
- .mobile-link-container {
128
- background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
129
- padding: 1.5em;
130
- border-radius: 10px;
131
- text-align: center;
132
- margin: 1em 0;
133
- box-shadow: 0 4px 6px rgba(0, 0, 0, 0.1);
134
- }
135
- .mobile-link {
136
- color: white !important;
137
- font-size: 1.2em;
138
- font-weight: bold;
139
- text-decoration: none;
140
- display: inline-block;
141
- padding: 0.5em 1.5em;
142
- background: rgba(255, 255, 255, 0.2);
143
- border-radius: 25px;
144
- transition: all 0.3s ease;
145
- }
146
- .mobile-link:hover {
147
- background: rgba(255, 255, 255, 0.3);
148
- transform: translateY(-2px);
149
- box-shadow: 0 4px 8px rgba(0, 0, 0, 0.2);
150
- }
151
- .mobile-text {
152
- color: white;
153
- margin-bottom: 0.5em;
154
- font-size: 1.1em;
155
  }
156
  """
157
 
158
  # Create the Gradio interface
159
- with gr.Blocks(css=custom_css, theme=gr.themes.Soft(), title="AI Video Generator") as demo:
160
  gr.Markdown(
161
  """
162
- # 🎬 AI Video Generator
163
- ### Generate stunning videos from text or animate your images with AI
164
- #### Powered by VEO 3.1 Fast Model | [Built with anycoder](https://huggingface.co/spaces/akhaliq/anycoder)
165
- """
166
- )
167
-
168
- # Add mobile link section
169
- gr.HTML(
170
- """
171
- <div class="mobile-link-container">
172
- <div class="mobile-text">📱 On mobile? Use the optimized version:</div>
173
- <a href="https://akhaliq-veo3-1-fast.hf.space" target="_blank" class="mobile-link">
174
- 🚀 Open Mobile Version
175
- </a>
176
- </div>
177
- """
178
- )
179
-
180
- gr.HTML(
181
- """
182
- <div class="auth-warning">
183
- ⚠️ You must Sign in with Hugging Face using the button below to use this app.
184
- </div>
185
- """
186
- )
187
-
188
- # Add login button - required for OAuth
189
- gr.LoginButton()
190
-
191
- with gr.Tabs() as tabs:
192
- # Text-to-Video Tab
193
- with gr.Tab("📝 Text to Video", id=0):
194
- gr.Markdown("### Transform your text descriptions into dynamic videos")
195
-
196
- with gr.Row():
197
- with gr.Column(scale=1):
198
- text_prompt = gr.Textbox(
199
- label="Text Prompt",
200
- placeholder="Describe the video you want to create... (e.g., 'A young man walking on the street during sunset')",
201
- lines=4,
202
- max_lines=6
203
- )
204
-
205
- with gr.Row():
206
- text_generate_btn = gr.Button("🎬 Generate Video", variant="primary", scale=2)
207
- text_clear_btn = gr.ClearButton(value="🗑️ Clear", scale=1)
208
-
209
- text_status = gr.Textbox(
210
- label="Status",
211
- interactive=False,
212
- visible=True,
213
- elem_classes=["status-box"]
214
- )
215
-
216
- with gr.Column(scale=1):
217
- text_video_output = gr.Video(
218
- label="Generated Video",
219
- autoplay=True,
220
- show_download_button=True,
221
- height=400
222
- )
223
-
224
- # Examples for text-to-video
225
- gr.Examples(
226
- examples=[
227
- ["A serene beach at sunset with gentle waves"],
228
- ["A bustling city street with neon lights at night"],
229
- ["A majestic eagle soaring through mountain peaks"],
230
- ["An astronaut floating in space near the International Space Station"],
231
- ["Cherry blossoms falling in slow motion in a Japanese garden"],
232
- ],
233
- inputs=text_prompt,
234
- label="Example Prompts"
235
- )
236
 
237
- # Image-to-Video Tab
238
- with gr.Tab("🖼️ Image to Video", id=1):
239
- gr.Markdown("### Bring your static images to life with motion")
240
-
241
- with gr.Row():
242
- with gr.Column(scale=1):
243
- image_input = gr.Image(
244
- label="Upload Image",
245
- type="pil",
246
- height=300
247
- )
248
-
249
- image_prompt = gr.Textbox(
250
- label="Motion Prompt",
251
- placeholder="Describe how the image should move... (e.g., 'The cat starts to dance')",
252
- lines=3,
253
- max_lines=5
254
- )
255
-
256
- with gr.Row():
257
- image_generate_btn = gr.Button("🎬 Animate Image", variant="primary", scale=2)
258
- image_clear_btn = gr.ClearButton(value="🗑️ Clear", scale=1)
259
-
260
- image_status = gr.Textbox(
261
- label="Status",
262
- interactive=False,
263
- visible=True,
264
- elem_classes=["status-box"]
265
- )
266
-
267
- with gr.Column(scale=1):
268
- image_video_output = gr.Video(
269
- label="Generated Video",
270
- autoplay=True,
271
- show_download_button=True,
272
- height=400
273
- )
274
-
275
- # Examples for image-to-video
276
- gr.Examples(
277
- examples=[
278
- [None, "The person starts walking forward"],
279
- [None, "The animal begins to run"],
280
- [None, "Camera slowly zooms in while the subject smiles"],
281
- [None, "The flowers sway gently in the breeze"],
282
- [None, "The clouds move across the sky in time-lapse"],
283
- ],
284
- inputs=[image_input, image_prompt],
285
- label="Example Motion Prompts"
286
- )
287
-
288
- # How to Use section
289
- with gr.Accordion("📖 How to Use", open=False):
290
- gr.Markdown(
291
- """
292
- ### Text to Video:
293
- 1. Enter a detailed description of the video you want to create
294
- 2. Optionally adjust advanced settings (duration, aspect ratio, resolution)
295
- 3. Click "Generate Video" and wait for the AI to create your video
296
- 4. Download or preview your generated video
297
-
298
- ### Image to Video:
299
- 1. Upload an image you want to animate
300
- 2. Describe the motion or action you want to add to the image
301
- 3. Optionally adjust advanced settings
302
- 4. Click "Animate Image" to bring your image to life
303
- 5. Download or preview your animated video
304
-
305
- ### Tips for Better Results:
306
- - Be specific and descriptive in your prompts
307
- - For image-to-video, describe natural motions that fit the image
308
- - Use high-quality input images for better results
309
- - Experiment with different prompts to get the desired effect
310
-
311
- ### Mobile Users:
312
- - For the best mobile experience, use the optimized version at: https://akhaliq-veo3-1-fast.hf.space
313
- """
314
- )
315
-
316
- # Event handlers
317
- text_generate_btn.click(
318
- fn=text_to_video,
319
- inputs=[text_prompt],
320
- outputs=[text_video_output, text_status],
321
- show_progress="full",
322
- queue=False,
323
- api_name=False,
324
- show_api=False
325
- )
326
-
327
- text_clear_btn.click(
328
- fn=clear_text_tab,
329
- inputs=[],
330
- outputs=[text_prompt, text_video_output, text_status],
331
- queue=False
332
- )
333
-
334
- image_generate_btn.click(
335
- fn=image_to_video,
336
- inputs=[image_input, image_prompt],
337
- outputs=[image_video_output, image_status],
338
- show_progress="full",
339
- queue=False,
340
- api_name=False,
341
- show_api=False
342
- )
343
-
344
- image_clear_btn.click(
345
- fn=clear_image_tab,
346
- inputs=[],
347
- outputs=[image_input, image_prompt, image_video_output, image_status],
348
- queue=False
349
  )
350
 
351
  # Launch the app
 
1
  import gradio as gr
 
 
 
 
 
2
 
3
+ # Custom CSS to hide everything except maintenance message
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4
  custom_css = """
5
+ .gradio-container > * {
6
+ display: none !important;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
  }
8
+ .maintenance-message {
9
+ display: block !important;
 
10
  text-align: center;
11
+ padding: 50px 20px;
12
+ font-size: 1.5em;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
13
  }
14
  """
15
 
16
  # Create the Gradio interface
17
+ with gr.Blocks(css=custom_css, theme=gr.themes.Soft(), title="Veo 3.1") as demo:
18
  gr.Markdown(
19
  """
20
+ # 🚧 Under Maintenance
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
21
 
22
+ This application is currently under maintenance. Please check back later.
23
+
24
+ We apologize for any inconvenience.
25
+ """,
26
+ elem_classes=["maintenance-message"]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
27
  )
28
 
29
  # Launch the app