import gradio as gr import os import tempfile import shutil from typing import Optional, Union from pathlib import Path from huggingface_hub import InferenceClient # ------------------------- # Utilities # ------------------------- def cleanup_temp_files(): try: temp_dir = tempfile.gettempdir() for file_path in Path(temp_dir).glob("*.mp4"): try: import time if file_path.stat().st_mtime < (time.time() - 300): file_path.unlink(missing_ok=True) except Exception: pass except Exception as e: print(f"Cleanup error: {e}") def _client_from_token(token: Optional[str]) -> InferenceClient: if not token: raise gr.Error("Please sign in first. This app requires your Hugging Face login.") # IMPORTANT: do not set bill_to when using user OAuth tokens return InferenceClient( provider="fal-ai", api_key=token, ) def _save_bytes_as_temp_mp4(data: bytes) -> str: temp_file = tempfile.NamedTemporaryFile(suffix=".mp4", delete=False) try: temp_file.write(data) temp_file.flush() return temp_file.name finally: temp_file.close() # ------------------------- # Inference wrappers (no env fallback; always require LoginButton) # ------------------------- def generate_video( prompt: str, token: gr.OAuthToken | None, duration: int = 8, # kept for future use size: str = "1280x720", # kept for future use *_ # tolerate extra event payloads ) -> Optional[str]: if token is None or not getattr(token, "token", None): raise gr.Error("Sign in with Hugging Face to continue. This app uses your inference provider credits.") if not prompt or not prompt.strip(): return None cleanup_temp_files() try: client = _client_from_token(token.token) # Ensure model id matches what users can access. Change if you intend provider repo. model_id = "akhaliq/sora-2" try: video_bytes = client.text_to_video(prompt, model=model_id) except Exception as e: # Provide a clearer message if this is an HTTP 403 from requests import requests if isinstance(e, requests.HTTPError) and getattr(e.response, "status_code", None) == 403: raise gr.Error( "Access denied by provider (403). Make sure your HF account has credits/permission " f"for provider 'fal-ai' and model '{model_id}'." ) raise return _save_bytes_as_temp_mp4(video_bytes) except gr.Error: raise except Exception: raise gr.Error("Generation failed. If this keeps happening, check your provider quota or try again later.") def generate_video_from_image( image: Union[str, bytes, None], prompt: str, token: gr.OAuthToken | None, *_ ) -> Optional[str]: if token is None or not getattr(token, "token", None): raise gr.Error("Sign in with Hugging Face to continue. This app uses your inference provider credits.") if not image or not prompt or not prompt.strip(): return None cleanup_temp_files() try: # Load image bytes if isinstance(image, str): with open(image, "rb") as f: input_image = f.read() elif isinstance(image, (bytes, bytearray)): input_image = image else: return None client = _client_from_token(token.token) model_id = "akhaliq/sora-2-image-to-video" try: video_bytes = client.image_to_video( input_image, prompt=prompt, model=model_id, ) except Exception as e: import requests if isinstance(e, requests.HTTPError) and getattr(e.response, "status_code", None) == 403: raise gr.Error( "Access denied by provider (403). Make sure your HF account has credits/permission " f"for provider 'fal-ai' and model '{model_id}'." ) raise return _save_bytes_as_temp_mp4(video_bytes) except gr.Error: raise except Exception: raise gr.Error("Generation failed. If this keeps happening, check your provider quota or try again later.") # ------------------------- # UI # ------------------------- def create_ui(): css = ''' .logo-dark{display: none} .dark .logo-dark{display: block !important} .dark .logo-light{display: none} #sub_title{margin-top: -20px !important} .notice { background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); color: white; padding: 14px 16px; border-radius: 12px; margin: 18px auto 6px; max-width: 860px; text-align: center; font-size: 0.98rem; } ''' with gr.Blocks(title="Sora-2 (uses your provider credits)", theme=gr.themes.Soft(), css=css) as demo: gr.HTML("""
Generate videos via the Hugging Face Inference API (provider: fal-ai)
Built with anycoder
""" ) login_btn = gr.LoginButton("Sign in with Hugging Face") # Text -> Video with gr.Row(): with gr.Column(scale=1): prompt_input = gr.Textbox( label="Enter your prompt", placeholder="Describe the video you want to create…", lines=4, elem_id="prompt-text-input" ) generate_btn = gr.Button("🎥 Generate Video", variant="primary") with gr.Column(scale=1): video_output = gr.Video( label="Generated Video", height=400, interactive=False, show_download_button=True, elem_id="text-to-video" ) # Order of inputs: prompt, token generate_btn.click( fn=generate_video, inputs=[prompt_input, login_btn], outputs=[video_output], ) # Image -> Video gr.HTML("""Turn a single image into a short video with a guiding prompt.