Bman21 commited on
Commit
4360b77
·
verified ·
1 Parent(s): f124ba5

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +39 -0
app.py ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from transformers import pipeline
3
+
4
+ # Load the model locally
5
+ MODEL = "microsoft/DialoGPT-small"
6
+
7
+ print("🔄 Loading medical model locally...")
8
+ medical_tutor = pipeline(
9
+ "text-generation",
10
+ model=MODEL,
11
+ device=-1, # Use CPU
12
+ torch_dtype="auto"
13
+ )
14
+ print("✅ Model loaded!")
15
+
16
+ def chat(message, history):
17
+ # Simple medical tutoring prompt
18
+ prompt = f"""You are a medical tutor. Provide educational information about: {message}
19
+
20
+ Remember: This is for learning purposes only, not medical advice.
21
+ Answer:"""
22
+
23
+ response = medical_tutor(
24
+ prompt,
25
+ max_new_tokens=150,
26
+ temperature=0.7,
27
+ do_sample=True,
28
+ pad_token_id=medical_tutor.tokenizer.eos_token_id
29
+ )[0]['generated_text']
30
+
31
+ # Extract just the answer part
32
+ answer = response.split("Answer:")[-1].strip()
33
+ return answer
34
+
35
+ gr.ChatInterface(
36
+ chat,
37
+ title="🩺 Medical Tutor",
38
+ examples=["Explain how vaccines work", "What is DNA?", "How does the heart work?"]
39
+ ).launch(server_port=7860)