| import torch | |
| import gradio as gr | |
| from diffusers import DiffusionPipeline, DPMSolverMultistepScheduler | |
| from diffusers.utils import export_to_video | |
| import os | |
| device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') | |
| def generate_video(prompt): | |
| # load pipeline | |
| pipe = DiffusionPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b", torch_dtype=torch.float16, | |
| variant="fp16") | |
| pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) | |
| # optimize for GPU memory | |
| pipe.enable_model_cpu_offload() | |
| pipe.enable_vae_slicing() | |
| # generate | |
| video_frames = pipe(prompt, num_inference_steps=25, num_frames=200).frames | |
| # get absolute path to current working directory | |
| current_directory = os.getcwd() | |
| # create directory to store video | |
| video_directory = os.path.join(current_directory, "generated_videos") | |
| os.makedirs(video_directory, exist_ok=True) | |
| # convert to video | |
| video_path = export_to_video(video_frames, os.path.join(video_directory, "generated_video.mp4")) | |
| return video_path | |
| demo = gr.Interface(fn=generate_video, inputs="text", outputs="video") | |
| demo.launch(share=True) | |