Spaces:
Running
Running
| import torch | |
| from diffusers import FluxPipeline | |
| try: | |
| model_id = "black-forest-labs/FLUX.1-schnell" | |
| pipe = FluxPipeline.from_pretrained(model_id, torch_dtype=torch.bfloat16) | |
| pipe = pipe.to("mps") | |
| prompt = "A cat holding a sign that says hello world" | |
| image_size = (768, 1360) | |
| num_inference_steps = 4 | |
| guidance_scale = 0.0 | |
| max_sequence_length = 256 | |
| with torch.inference_mode(): | |
| image = pipe(prompt, | |
| image_size=image_size, | |
| num_inference_steps=num_inference_steps, | |
| guidance_scale=guidance_scale, | |
| max_sequence_length=max_sequence_length, | |
| attention_slicing=True, | |
| vae_slicing=True).images[0] | |
| image.save("output.png") | |
| except Exception as e: | |
| print(f"An error occurred: {e}") | |
| except ImportError as e: | |
| print(f"An import error occurred: {e}. Please make sure you have the required libraries installed.") | |
| except RuntimeError as e: | |
| if "CUDA out of memory" in str(e): | |
| print("Out of MPS memory. Try reducing image size or batch size.") | |
| else: | |
| print(f"A runtime error occurred: {e}") |