Spaces:
Runtime error
Runtime error
| import io, base64, os, random | |
| from fastapi import FastAPI | |
| from pydantic import BaseModel | |
| from PIL import Image | |
| print(">>> importing optimum.intel.openvino ...") | |
| from optimum.intel.openvino import OVStableDiffusionPipeline | |
| print(">>> import OK") | |
| MODEL_ID = os.environ.get("MODEL_ID", "OpenVINO/stable-diffusion-v1-5-int8-ov") | |
| print("Loading model ...") | |
| pipe = OVStableDiffusionPipeline.from_pretrained(MODEL_ID) | |
| pipe.reshape(512, 512) # برای CPU بهتر | |
| pipe.compile() | |
| print("Model loaded.") | |
| app = FastAPI(title="Txt2Img CPU API") | |
| class Req(BaseModel): | |
| prompt: str | |
| negative_prompt: str | None = None | |
| steps: int = 20 | |
| guidance: float = 7.5 | |
| seed: int | None = None | |
| width: int = 512 | |
| height: int = 512 | |
| def health(): | |
| return {"ok": True} | |
| def txt2img(r: Req): | |
| # در OpenVINO نیازی به torch.Generator نیست؛ seed را روی random ست میکنیم (اختیاری) | |
| if r.seed is not None: | |
| random.seed(r.seed) | |
| image = pipe( | |
| prompt=r.prompt, | |
| negative_prompt=r.negative_prompt, | |
| num_inference_steps=r.steps, | |
| guidance_scale=r.guidance, | |
| width=r.width, height=r.height, | |
| ).images[0] | |
| buf = io.BytesIO() | |
| image.save(buf, format="PNG") | |
| return {"image_base64": base64.b64encode(buf.getvalue()).decode()} | |