vedarshatarun69 commited on
Commit
6c8dd0a
·
verified ·
1 Parent(s): 98856f8

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +13 -8
app.py CHANGED
@@ -1,20 +1,25 @@
1
- from transformers import pipeline
2
  import gradio as gr
3
 
4
- # Use a very small, CPU-friendly model
5
- chatbot_pipeline = pipeline("text-generation", model="google/flan-t5-small")
 
 
 
6
 
7
- # Chat function
8
  def chat(message, history=[]):
9
- # Generate reply
10
- response = chatbot_pipeline(message, max_new_tokens=50)
11
- reply = response[0]['generated_text'].strip()
 
 
 
 
12
  history.append((message, reply))
13
  return history, history
14
 
15
  # Gradio UI
16
  with gr.Blocks() as demo:
17
- gr.Markdown("## 🤖 Permanent AI Chatbot (CPU-Friendly)")
18
  chat_ui = gr.Chatbot()
19
  msg = gr.Textbox(label="Type your message here")
20
  clear = gr.Button("Clear Chat")
 
 
1
  import gradio as gr
2
 
3
+ # Use Hugging Face hosted inference API (no heavy model in the Space)
4
+ import requests
5
+
6
+ API_URL = "https://api-inference.huggingface.co/models/google/flan-t5-small"
7
+ headers = {"Authorization": f"Bearer hf_XXXXXXXXXXXXXXXXXXXX"} # optional if public model
8
 
 
9
  def chat(message, history=[]):
10
+ payload = {"inputs": message}
11
+ try:
12
+ response = requests.post(API_URL, headers=headers, json=payload, timeout=30)
13
+ result = response.json()
14
+ reply = result[0]['generated_text']
15
+ except Exception as e:
16
+ reply = f"⚠️ Error: {str(e)}"
17
  history.append((message, reply))
18
  return history, history
19
 
20
  # Gradio UI
21
  with gr.Blocks() as demo:
22
+ gr.Markdown("## 🤖 Permanent CPU-Friendly AI Chatbot")
23
  chat_ui = gr.Chatbot()
24
  msg = gr.Textbox(label="Type your message here")
25
  clear = gr.Button("Clear Chat")