MadhavRupala commited on
Commit
e1ca57e
·
verified ·
1 Parent(s): be63571

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +29 -7
app.py CHANGED
@@ -1,14 +1,36 @@
1
  import gradio as gr
 
2
 
3
- def simple_greet(name):
4
- return f"Hello, {name}! The API is working."
 
 
 
 
 
 
 
5
 
6
- # Using gr.Interface is the most reliable way to create a functional API endpoint.
 
 
 
 
 
 
 
 
 
 
 
 
 
7
  iface = gr.Interface(
8
- fn=simple_greet,
9
- title="API Test",
10
- inputs=gr.Textbox(label="Your Name"),
11
- outputs=gr.Textbox(label="Greeting"),
 
12
  allow_flagging="never"
13
  )
14
 
 
1
  import gradio as gr
2
+ from transformers import pipeline
3
 
4
+ # --- Model Loading ---
5
+ MODEL_ID = "MadhavRupala/SD15-ControlNet"
6
+ print(f"Loading model '{MODEL_ID}' on CPU...")
7
+ try:
8
+ pipe = pipeline("text2text-generation", model=MODEL_ID, device=-1)
9
+ print("✅ Model loaded successfully!")
10
+ except Exception as e:
11
+ print(f"❌ Error loading model: {e}")
12
+ pipe = None
13
 
14
+ # --- Inference Function ---
15
+ # The parameter name 'prompt' is important for the client.
16
+ def run_inference(prompt: str):
17
+ if pipe is None:
18
+ return "Error: The model could not be loaded."
19
+ if not prompt:
20
+ return "Please enter a prompt."
21
+ try:
22
+ result = pipe(prompt, max_length=100)
23
+ return result[0]['generated_text']
24
+ except Exception as e:
25
+ return f"An error occurred during inference: {e}"
26
+
27
+ # --- Gradio Interface ---
28
  iface = gr.Interface(
29
+ fn=run_inference,
30
+ title=f"🤖 Self-Hosted Model API: `{MODEL_ID}`",
31
+ description="This Space hosts a model for unlimited access.",
32
+ inputs=gr.Textbox(label="Your Prompt", lines=4),
33
+ outputs=gr.Textbox(label="Model Response", lines=4),
34
  allow_flagging="never"
35
  )
36