MadhavRupala's picture
Update app.py
e1ca57e verified
import gradio as gr
from transformers import pipeline
# --- Model Loading ---
MODEL_ID = "MadhavRupala/SD15-ControlNet"
print(f"Loading model '{MODEL_ID}' on CPU...")
try:
pipe = pipeline("text2text-generation", model=MODEL_ID, device=-1)
print("✅ Model loaded successfully!")
except Exception as e:
print(f"❌ Error loading model: {e}")
pipe = None
# --- Inference Function ---
# The parameter name 'prompt' is important for the client.
def run_inference(prompt: str):
if pipe is None:
return "Error: The model could not be loaded."
if not prompt:
return "Please enter a prompt."
try:
result = pipe(prompt, max_length=100)
return result[0]['generated_text']
except Exception as e:
return f"An error occurred during inference: {e}"
# --- Gradio Interface ---
iface = gr.Interface(
fn=run_inference,
title=f"🤖 Self-Hosted Model API: `{MODEL_ID}`",
description="This Space hosts a model for unlimited access.",
inputs=gr.Textbox(label="Your Prompt", lines=4),
outputs=gr.Textbox(label="Model Response", lines=4),
allow_flagging="never"
)
iface.launch()