learnmlf commited on
Commit
5e56570
·
1 Parent(s): 587e46b

feat: zerogpu support

Browse files
Files changed (2) hide show
  1. app.py +6 -3
  2. requirements.txt +1 -0
app.py CHANGED
@@ -16,6 +16,7 @@ from media_pipe.draw_util import FaceMeshVisualizer
16
 
17
  from download_models import download
18
  import torch
 
19
 
20
  # Download models and check for exists
21
  download()
@@ -30,7 +31,7 @@ if torch.cuda.is_available():
30
  print(f" GPU device: {torch.cuda.get_device_name(0)}")
31
  print(f" GPU count: {torch.cuda.device_count()}")
32
  else:
33
- print(" ⚠️ No CUDA GPU detected - will use CPU")
34
  print("="*50)
35
 
36
  PROCESSED_VIDEO_DIR = './processed_videos'
@@ -184,6 +185,7 @@ def preview_crop(image_path, npy_file, video_path, expand_x, expand_y, offset_x,
184
  else:
185
  return None,None, "Failed to generate crop preview"
186
 
 
187
  def generate_video(input_img, should_crop_face, expand_x, expand_y, offset_x, offset_y, input_video_type, input_video, input_npy_select, input_npy, input_video_frames,
188
  settings_steps, settings_cfg_scale, settings_seed, resolution_w, resolution_h,
189
  model_step, custom_output_path, use_custom_fps, output_fps, callback_steps, context_frames, context_stride, context_overlap, context_batch_size, anomaly_action,intropolate_factor):
@@ -278,9 +280,10 @@ with gr.Blocks() as demo:
278
  gpu_name = torch.cuda.get_device_name(0)
279
  gpu_info = f"🚀 **GPU Enabled**: {gpu_name}"
280
  else:
281
- gpu_info = "⚠️ **Running on CPU** (Generation will be slower)"
 
282
 
283
- gr.Markdown(f"<div style='text-align: center; padding: 10px; background-color: #f0f0f0; border-radius: 5px;'>{gpu_info}</div>")
284
 
285
  gr.Markdown("""
286
  <div style='text-align: center; padding: 20px; background-color: #f8f9fa; border-radius: 10px; margin: 10px 0; border: 2px solid #e0e0e0;'>
 
16
 
17
  from download_models import download
18
  import torch
19
+ import spaces # ZeroGPU support
20
 
21
  # Download models and check for exists
22
  download()
 
31
  print(f" GPU device: {torch.cuda.get_device_name(0)}")
32
  print(f" GPU count: {torch.cuda.device_count()}")
33
  else:
34
+ print(" ℹ️ GPU will be allocated on-demand (ZeroGPU mode)")
35
  print("="*50)
36
 
37
  PROCESSED_VIDEO_DIR = './processed_videos'
 
185
  else:
186
  return None,None, "Failed to generate crop preview"
187
 
188
+ @spaces.GPU(duration=120) # Allocate GPU for 120 seconds (adjust based on your video length)
189
  def generate_video(input_img, should_crop_face, expand_x, expand_y, offset_x, offset_y, input_video_type, input_video, input_npy_select, input_npy, input_video_frames,
190
  settings_steps, settings_cfg_scale, settings_seed, resolution_w, resolution_h,
191
  model_step, custom_output_path, use_custom_fps, output_fps, callback_steps, context_frames, context_stride, context_overlap, context_batch_size, anomaly_action,intropolate_factor):
 
280
  gpu_name = torch.cuda.get_device_name(0)
281
  gpu_info = f"🚀 **GPU Enabled**: {gpu_name}"
282
  else:
283
+ # ZeroGPU mode - GPU allocated on-demand
284
+ gpu_info = "⚡ **ZeroGPU Mode**: GPU will be allocated automatically when generating"
285
 
286
+ gr.Markdown(f"<div style='text-align: center; padding: 10px; background-color: #e8f5e9; border-radius: 5px; border: 1px solid #4caf50;'>{gpu_info}</div>")
287
 
288
  gr.Markdown("""
289
  <div style='text-align: center; padding: 20px; background-color: #f8f9fa; border-radius: 10px; margin: 10px 0; border: 2px solid #e0e0e0;'>
requirements.txt CHANGED
@@ -26,3 +26,4 @@ xformers==0.0.23
26
  scikit-image
27
  deepspeed==0.13.1
28
  gradio==4.39.0
 
 
26
  scikit-image
27
  deepspeed==0.13.1
28
  gradio==4.39.0
29
+ spaces