Shresthh03 commited on
Commit
d788444
·
verified ·
1 Parent(s): 7c8028e

Update it.

Browse files
Files changed (1) hide show
  1. app.py +134 -469
app.py CHANGED
@@ -1,487 +1,152 @@
1
  import os
2
  import json
3
- import re
4
- import random
5
- import datetime
6
  from flask import Flask, request, jsonify, send_from_directory
7
-
8
- # Try optional packages
9
- try:
10
- from transformers import pipeline
11
- HF_AVAILABLE = True
12
- except Exception:
13
- HF_AVAILABLE = False
14
-
15
- try:
16
- import requests
17
- REQ_AVAILABLE = True
18
- except Exception:
19
- REQ_AVAILABLE = False
20
-
21
- # Optional OpenAI usage for richer replies
22
- try:
23
- import openai
24
- OPENAI_AVAILABLE = bool(os.environ.get("OPENAI_API_KEY"))
25
- if OPENAI_AVAILABLE:
26
- openai.api_key = os.environ.get("OPENAI_API_KEY")
27
- except Exception:
28
- OPENAI_AVAILABLE = False
29
-
30
- app = Flask(__name__, static_folder=".", static_url_path="/")
31
-
32
- # ---------- Config ----------
33
- MEMORY_FILE = "session_memory.json"
34
- MEMORY_RETENTION_DAYS = 15
35
- CRISIS_TERMS = [
36
- "suicide", "kill myself", "end my life", "i want to die", "hurt myself",
37
- "can't go on", "cant go on", "i don't want to live", "i dont want to live"
38
- ]
39
- HELPLINES = {
40
- "IN": "🇮🇳 India: AASRA Helpline 91-9820466726",
41
- "US": "🇺🇸 USA: Call or text 988 (Suicide & Crisis Lifeline)",
42
- "GB": "🇬🇧 UK: Samaritans 116 123",
43
- "CA": "🇨🇦 Canada: Talk Suicide Canada 1-833-456-4566",
44
- "AU": "🇦🇺 Australia: Lifeline 13 11 14",
45
- "DEFAULT": "If you are in crisis, please contact your local emergency number or visit https://findahelpline.com"
46
- }
47
-
48
- # ---------- Optional HF emotion model (heavy) ----------
49
- emotion_model = None
50
- if HF_AVAILABLE:
51
- try:
52
- emotion_model = pipeline("text-classification",
53
- model="j-hartmann/emotion-english-distilroberta-base",
54
- top_k=5)
55
- except Exception:
56
- emotion_model = None
57
-
58
- # ---------- Memory helpers ----------
59
- def load_memory():
60
- if os.path.exists(MEMORY_FILE):
61
- try:
62
- with open(MEMORY_FILE, "r") as f:
63
- data = json.load(f)
64
- except Exception:
65
- data = {}
66
- else:
67
- data = {}
68
- # prune old
69
- cutoff = datetime.datetime.utcnow() - datetime.timedelta(days=MEMORY_RETENTION_DAYS)
70
- keep = {}
71
- for k, v in data.items():
72
- try:
73
- t = datetime.datetime.fromisoformat(v.get("last_seen"))
74
- if t >= cutoff:
75
- keep[k] = v
76
- except Exception:
77
- keep[k] = v
78
- return keep
79
-
80
- def save_memory(mem):
81
- with open(MEMORY_FILE, "w") as f:
82
- json.dump(mem, f, indent=2)
83
-
84
- memory = load_memory()
85
-
86
- # ---------- small NLP helpers ----------
87
- name_patterns = [
88
- r"^(?:i am|i'm|im|i’m)\s+([A-Za-z][A-Za-z '-]{1,40})",
89
- r"my name is\s+([A-Za-z][A-Za-z '-]{1,40})",
90
- r"^([A-Z][a-z]{1,30})$"
91
- ]
92
- def extract_name(text):
93
- text = text.strip()
94
- for p in name_patterns:
95
- m = re.search(p, text, flags=re.IGNORECASE)
96
- if m:
97
- name = m.group(1).strip()
98
- return " ".join([w.capitalize() for w in name.split()])
99
- return None
100
-
101
- def extract_age(text):
102
- nums = re.findall(r"\b([1-9][0-9]?)\b", text)
103
- for n in nums:
104
- v = int(n)
105
- if 8 <= v <= 120:
106
- return v
107
  return None
108
 
109
- def is_crisis(text):
110
- low = text.lower()
111
- return any(term in low for term in CRISIS_TERMS)
112
-
113
- def helpline_for_request(remote_addr):
114
- # best-effort country lookup via ipapi
115
- try:
116
- if REQ_AVAILABLE:
117
- ip = remote_addr if remote_addr and ":" not in remote_addr else ""
118
- url = "https://ipapi.co/json/" if not ip else f"https://ipapi.co/{ip}/json/"
119
- r = requests.get(url, timeout=2)
120
- if r.status_code == 200:
121
- data = r.json()
122
- code = data.get("country_code", "").upper()
123
- return HELPLINES.get(code, HELPLINES["DEFAULT"])
124
- except Exception:
125
- pass
126
- return HELPLINES["DEFAULT"]
127
-
128
- def classify_emotion(text):
129
- # Try HF if available
130
- if emotion_model:
131
- try:
132
- out = emotion_model(text)
133
- # pipeline returns list or list of lists; get top label
134
- first = out[0]
135
- if isinstance(first, list):
136
- label = first[0]["label"]
137
- else:
138
- label = first["label"]
139
- return label.lower()
140
- except Exception:
141
- pass
142
- # fallback heuristics
143
- low = text.lower()
144
- if any(w in low for w in ["happy","glad","joy","great","good","awesome","fine"]):
145
- return "joy"
146
- if any(w in low for w in ["sad","down","depressed","unhappy","lonely","cry","miserable"]):
147
- return "sadness"
148
- if any(w in low for w in ["angry","mad","furious","annoyed","irritat"]):
149
- return "anger"
150
- if any(w in low for w in ["scared","afraid","anxious","panic","worried"]):
151
- return "fear"
152
- if any(w in low for w in ["love","loving","cherish","fond"]):
153
- return "love"
154
- return "neutral"
155
-
156
- # ---------- Intent detection (simple rules) ----------
157
- def detect_intent(text):
158
- t = text.lower().strip()
159
- # Crisis
160
- if is_crisis(t):
161
- return "CRISIS"
162
- # Asking about bot
163
- if any(q in t for q in ["how are you", "how're you", "how r you", "how you doing", "are you okay", "are you mad", "are you upset", "are you mad?"]):
164
- return "QUESTION_ABOUT_BOT"
165
- # Requests for motivation/guidance
166
- if any(w in t for w in ["motivate", "motivation", "guidance", "inspire", "give me guidance", "need motivation", "help me be motivated"]):
167
- return "REQUEST_MOTIVATION"
168
- # Casual chit-chat / teasing / slang
169
- if any(w in t for w in ["lol","haha","hahaha","jk","bro","dude","whats up","what's up","have you gone mad","are you mad","r u mad","you mad"]):
170
- return "CASUAL"
171
- # If user mentions feelings -> support
172
- if any(w in t for w in ["sad","down","depressed","anxious","anxiety","lonely","hurt","upset","tired","stressed","stressing","stress"]):
173
- return "SUPPORT"
174
- # Else neutral casual fallback for short utterances
175
- if len(t.split()) <= 6:
176
- return "CASUAL"
177
- return "SUPPORT" # prefer support for longer introspective messages
178
-
179
- # ---------- Non-repetitive response manager ----------
180
- def pick_nonrepetitive(session_slot, bucket):
181
- """Pick a reply from bucket avoiding recent repeats stored in session_slot['recent_replies']"""
182
- recent = session_slot.get("recent_replies", [])
183
- choices = [x for x in bucket if x not in recent]
184
- if not choices:
185
- # all used recently — clear memory a bit and reuse
186
- session_slot["recent_replies"] = []
187
- choices = bucket[:]
188
- pick = random.choice(choices)
189
- # append to recent (keep last 6)
190
- recent.insert(0, pick)
191
- session_slot["recent_replies"] = recent[:6]
192
- return pick
193
-
194
- # ---------- Reply templates ----------
195
- CASUAL_REPLY_TEMPLATES = [
196
- "Haha, you crack me up — tell me more!",
197
- "Oh wow, that’s a curveball 😄 What made you say that?",
198
- "I’m here and very curious — go on.",
199
- "Haha, I might be a little wired but never mad — what's up?",
200
- "I love that energy. Want to tell me more about it?",
201
- "You’re funny — but seriously, how are you really?",
202
- "Haha, okay I see you. What else?"
203
- ]
204
-
205
- SUPPORT_OPENERS = [
206
- "That sounds heavy — thank you for trusting me with that.",
207
- "I can feel how much that impacted you. I'm listening.",
208
- "You handled a lot there; I'm glad you told me.",
209
- "That must have been difficult. Tell me more, if you want."
210
- ]
211
-
212
- SUPPORT_FOLLOWUPS = [
213
- "Would you like to talk about what might help a little today?",
214
- "How has this been affecting your daily life?",
215
- "What usually helps you when things feel this way?",
216
- "Would you prefer a calming exercise or a few practical steps?"
217
- ]
218
-
219
- MOTIVATIONAL_SNIPPETS = [
220
- "Even small steps count — you don't need to fix everything at once.",
221
- "You’ve come so far already. One gentle step at a time.",
222
- "Rest is allowed. Healing isn’t a straight line.",
223
- "Breathe — you’re doing better than you think."
224
- ]
225
 
226
- BOT_SELF_REPLIES = [
227
- "I'm doing well — talking to you brightens my loop! How about you?",
228
- "Feeling calm and ready to listen how are you today?",
229
- "I’m good! Just here with an open ear for you.",
230
- "Doing okay — I was thinking about how to support you better. What’s up?"
231
- ]
232
 
233
- # ---------- OpenAI prompt builder (for mixed persona) ----------
234
- PERSONA_TEXT = {
235
- "calm_male": "A calm masculine-tone voice: steady, grounding, gentle; use short reassuring phrases.",
236
- "deep_male": "A deep male-tone: slow, resonant, and calming.",
237
- "soothing_male": "A soothing male-tone: mellow and kind.",
238
- "gentle_female": "A gentle female-tone: tender and nurturing.",
239
- "feminine_female": "A bright feminine-tone: warm and encouraging.",
240
- "deep_female": "A deeper female-tone: soulful and empathetic.",
241
- "soothing_female": "A soothing female-tone: calm and steady.",
242
- "neutral": "A neutral friendly-tone: balanced, soft, non-gendered."
243
- }
244
 
245
- def build_openai_prompt(personality_id, session_slot):
246
- persona = PERSONA_TEXT.get(personality_id, PERSONA_TEXT["neutral"])
247
- memory_note = ""
248
- if session_slot.get("name"):
249
- memory_note += f" The user is named {session_slot.get('name')}."
250
- if session_slot.get("last_mood"):
251
- memory_note += f" Recent mood: {session_slot.get('last_mood')}."
252
- system = (
253
- "You are Serenity, a warm compassionate emotional support companion. "
254
- "Be empathetic, avoid repeating the same short phrases like 'I understand', and vary vocabulary. "
255
- "Keep replies concise when the user seems distressed; be chatty when the user is casual. "
256
- + persona + memory_note
257
- + " If user asks casual questions about you, answer briefly and pivot back to supporting the user."
258
- )
259
- return system
260
 
261
- def openai_reply(user_message, personality_id, session_slot):
262
- if not OPENAI_AVAILABLE:
263
- return None
264
- system_prompt = build_openai_prompt(personality_id, session_slot)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
265
  try:
266
- resp = openai.ChatCompletion.create(
267
- model = os.environ.get("OPENAI_MODEL", "gpt-4o-mini"),
268
- messages = [
269
- {"role":"system", "content": system_prompt},
270
- {"role":"user", "content": user_message}
271
  ],
272
- temperature = 0.85,
273
- max_tokens = 350
274
  )
275
- text = resp.choices[0].message.content.strip()
276
- return text
277
- except Exception:
278
- return None
 
 
 
 
 
 
279
 
280
- # ---------- Routes ----------
 
 
281
  @app.route("/")
282
  def index():
283
  return send_from_directory(".", "index.html")
284
 
285
- @app.route("/chat", methods=["POST"])
286
- def chat():
287
- global memory
288
- data = request.get_json() or {}
289
- session = data.get("session") or request.remote_addr or "default_session"
290
- message = (data.get("message") or "").strip()
291
- personality = (data.get("personality") or data.get("voice_profile") or "neutral")
292
- init_flag = data.get("init", False)
293
-
294
- # ensure slot exists
295
- slot = memory.get(session, {})
296
- now = datetime.datetime.utcnow().isoformat()
297
- if not slot:
298
- slot = {"name": None, "age": None, "last_mood": None, "last_seen": now, "recent_replies": [], "history": []}
299
-
300
- # If init requested, send greeting or follow-up
301
- if init_flag:
302
- slot["last_seen"] = now
303
- memory[session] = slot
304
- save_memory(memory)
305
- if not slot.get("name"):
306
- return jsonify({"reply":"Hey — I'm Serenity. What's your name?", "emotion":"calm", "intent":"INIT"})
307
- else:
308
- last_mood = slot.get("last_mood")
309
- last_seen = slot.get("last_seen")
310
- try:
311
- t = datetime.datetime.fromisoformat(last_seen)
312
- if last_mood in ("sadness","anger","fear") and (datetime.datetime.utcnow() - t).days <= MEMORY_RETENTION_DAYS:
313
- return jsonify({"reply":f"Hey {slot.get('name')}, I remember you were feeling down last time. How are you today?", "emotion":"warm", "intent":"FOLLOWUP"})
314
- except Exception:
315
- pass
316
- return jsonify({"reply":f"Welcome back {slot.get('name')} — what’s on your mind?", "emotion":"calm", "intent":"INIT"})
317
-
318
- # If empty message
319
- if not message:
320
- return jsonify({"reply":"I'm here — whenever you're ready, tell me what's on your mind.", "emotion":"neutral", "intent":"NONE"})
321
 
322
- # Handle awaiting name/age
323
- awaiting = slot.get("awaiting")
324
- if not slot.get("name") and not awaiting:
325
- # try to extract name
326
- name = extract_name(message)
327
- if name:
328
- slot["name"] = name
329
- slot["awaiting"] = "age"
330
- slot["last_seen"] = now
331
- memory[session] = slot
332
- save_memory(memory)
333
- return jsonify({"reply":f"Nice to meet you, {name}! How old are you?", "emotion":"curious", "intent":"ASK_AGE"})
334
- else:
335
- slot["awaiting"] = "name"
336
- slot["last_seen"] = now
337
- memory[session] = slot
338
- save_memory(memory)
339
- return jsonify({"reply":"Hey — what should I call you? What's your name?", "emotion":"calm", "intent":"ASK_NAME"})
340
-
341
- if awaiting == "name":
342
- guessed = extract_name(message) or message.split()[0].capitalize()
343
- slot["name"] = guessed
344
- slot.pop("awaiting", None)
345
- slot["awaiting"] = "age"
346
- slot["last_seen"] = now
347
- memory[session] = slot
348
- save_memory(memory)
349
- return jsonify({"reply":f"Lovely, {guessed}. How old are you?", "emotion":"curious", "intent":"ASK_AGE"})
350
-
351
- if awaiting == "age":
352
- age = extract_age(message)
353
- if age:
354
- slot["age"] = age
355
- slot.pop("awaiting", None)
356
- slot["last_seen"] = now
357
- memory[session] = slot
358
- save_memory(memory)
359
- return jsonify({"reply":f"Thanks. {slot.get('name')}, how have you been feeling lately?", "emotion":"curious", "intent":"ASK_MOOD"})
360
- else:
361
- return jsonify({"reply":"Could you tell me your age as a number (for example, 24)?", "emotion":"neutral", "intent":"ASK_AGE"})
362
-
363
- # Crisis detection
364
- if is_crisis(message):
365
- slot["last_mood"] = "crisis"
366
- slot["last_seen"] = now
367
- memory[session] = slot
368
- save_memory(memory)
369
- helpline = helpline_for_request(request.remote_addr)
370
- reply = f"I’m really concerned about how you're feeling. You are not alone. Please consider contacting emergency services or this helpline: {helpline}"
371
- return jsonify({"reply":reply, "emotion":"crisis", "intent":"CRISIS"})
372
-
373
- # Detect intent
374
- intent = detect_intent(message)
375
-
376
- # If user asks about the bot (casual)
377
- if intent == "QUESTION_ABOUT_BOT":
378
- # friendly, human-like small talk (Option A)
379
- bot_reply = random.choice(BOT_SELF_REPLIES)
380
- # briefly ask how user is to pivot back
381
- pivot = random.choice(["How are you doing right now?", "And how about you?"])
382
- reply = f"{bot_reply} {pivot}"
383
- # update memory and return
384
- slot["last_mood"] = classify_emotion(message)
385
- slot["last_seen"] = now
386
- memory[session] = slot
387
- save_memory(memory)
388
- return jsonify({"reply": reply, "emotion": slot["last_mood"], "intent": "QUESTION_ABOUT_BOT"})
389
-
390
- # If casual intent -> casual friendly replies (Option A)
391
- if intent == "CASUAL":
392
- # Use OpenAI if available to make it more natural
393
- if OPENAI_AVAILABLE:
394
- o = openai_reply := openai_reply = None
395
- # Use a short, casual prompt
396
- try:
397
- system = ("You are a friendly, informal companion. Answer casually, with light humor when appropriate, "
398
- "be brief and natural. Avoid repeating previous phrasing. If the user is distressed, shift to empathy.")
399
- resp = openai.ChatCompletion.create(
400
- model = os.environ.get("OPENAI_MODEL","gpt-4o-mini"),
401
- messages = [
402
- {"role":"system", "content": system},
403
- {"role":"user", "content": message}
404
- ],
405
- temperature = 0.8,
406
- max_tokens = 150
407
- )
408
- text = resp.choices[0].message.content.strip()
409
- # little safety: if the AI returns a generic empathetic one-liner only, diversify
410
- if text.lower() in ("i understand", "i see", "okay"):
411
- text = pick_nonrepetitive(slot, CASUAL_REPLY_TEMPLATES)
412
- slot["last_mood"] = classify_emotion(message)
413
- slot["last_seen"] = now
414
- # store reply to avoid repetition
415
- slot.setdefault("recent_replies", [])
416
- slot["recent_replies"].insert(0, text)
417
- slot["recent_replies"] = slot["recent_replies"][:6]
418
- slot.setdefault("history", []).append({"in": message, "out": text, "time": now, "intent": intent})
419
- slot["history"] = slot["history"][-40:]
420
- memory[session] = slot
421
- save_memory(memory)
422
- return jsonify({"reply": text, "emotion": slot["last_mood"], "intent": intent})
423
- except Exception:
424
- # fallback to templates
425
- text = pick_nonrepetitive(slot, CASUAL_REPLY_TEMPLATES)
426
- slot["last_mood"] = classify_emotion(message)
427
- slot["last_seen"] = now
428
- memory[session] = slot
429
- save_memory(memory)
430
- return jsonify({"reply": text, "emotion": slot["last_mood"], "intent": intent})
431
- else:
432
- text = pick_nonrepetitive(slot, CASUAL_REPLY_TEMPLATES)
433
- slot["last_mood"] = classify_emotion(message)
434
- slot["last_seen"] = now
435
- memory[session] = slot
436
- save_memory(memory)
437
- return jsonify({"reply": text, "emotion": slot["last_mood"], "intent": intent})
438
-
439
- # Request motivation
440
- if intent == "REQUEST_MOTIVATION":
441
- reply = pick_nonrepetitive(slot, MOTIVATIONAL_SNIPPETS)
442
- slot["last_mood"] = classify_emotion(message)
443
- slot["last_seen"] = now
444
- memory[session] = slot
445
- save_memory(memory)
446
- return jsonify({"reply": reply, "emotion": slot["last_mood"], "intent": intent})
447
-
448
- # Support (default)
449
- # Try OpenAI with persona if available
450
- if OPENAI_AVAILABLE:
451
- ai_text = openai_reply(message, personality, slot)
452
- if ai_text:
453
- # avoid robotic single-line responses
454
- if ai_text.strip().lower() in ("i understand","i see","okay","i'm sorry to hear that"):
455
- ai_text = pick_nonrepetitive(slot, SUPPORT_OPENERS)
456
- emotion = classify_emotion(message)
457
- slot["last_mood"] = emotion
458
- slot.setdefault("recent_replies", [])
459
- slot["recent_replies"].insert(0, ai_text)
460
- slot["recent_replies"] = slot["recent_replies"][:6]
461
- slot.setdefault("history", []).append({"in": message, "out": ai_text, "time": now, "intent": intent})
462
- slot["history"] = slot["history"][-40:]
463
- slot["last_seen"] = now
464
- memory[session] = slot
465
- save_memory(memory)
466
- return jsonify({"reply": ai_text, "emotion": emotion, "intent": intent})
467
- # else fall through to template fallback
468
-
469
- # Fallback supportive templated reply
470
- opener = pick_nonrepetitive(slot, SUPPORT_OPENERS)
471
- follow = pick_nonrepetitive(slot, SUPPORT_FOLLOWUPS)
472
- # Mix small chance for motivational hint
473
- if random.random() < 0.35:
474
- reply = f"{opener} {random.choice(MOTIVATIONAL_SNIPPETS)} {follow}"
475
- else:
476
- reply = f"{opener} {follow}"
477
- emotion = classify_emotion(message)
478
- slot["last_mood"] = emotion
479
- slot.setdefault("recent_replies", [])
480
- slot["recent_replies"].insert(0, reply)
481
- slot["recent_replies"] = slot["recent_replies"][:6]
482
- slot.setdefault("history", []).append({"in": message, "out": reply, "time": now, "intent": intent})
483
- slot["history"] = slot["history"][-40:]
484
- slot["last_seen"] = now
485
- memory[session] = slot
486
- save_memory(memory)
487
- return jsonify({"reply": reply, "emotion":
 
1
  import os
2
  import json
3
+ from datetime import datetime, timedelta
 
 
4
  from flask import Flask, request, jsonify, send_from_directory
5
+ from transformers import pipeline
6
+ from openai import OpenAI
7
+
8
+ app = Flask(__name__)
9
+ client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
10
+
11
+ # Load Hugging Face emotion model
12
+ emotion_analyzer = pipeline("text-classification", model="j-hartmann/emotion-english-distilroberta-base", top_k=None)
13
+
14
+ USER_DATA_FILE = "user_data.json"
15
+
16
+ # ---------------------- UTILITIES -------------------------
17
+ def load_user_data():
18
+ if os.path.exists(USER_DATA_FILE):
19
+ with open(USER_DATA_FILE, "r") as f:
20
+ return json.load(f)
21
+ return {"name": None, "age": None, "mood": None, "last_interaction": None, "missed_days": 0}
22
+
23
+ def save_user_data(data):
24
+ with open(USER_DATA_FILE, "w") as f:
25
+ json.dump(data, f, indent=4)
26
+
27
+ def detect_emotion(text):
28
+ result = emotion_analyzer(text)
29
+ top_emotion = sorted(result[0], key=lambda x: x["score"], reverse=True)[0]["label"]
30
+ return top_emotion.lower()
31
+
32
+ def crisis_check(user_input, location="global"):
33
+ crisis_keywords = ["kill myself", "end my life", "suicide", "die", "worthless"]
34
+ if any(kw in user_input.lower() for kw in crisis_keywords):
35
+ if location == "india":
36
+ return ("I'm really sorry you’re feeling like this. You are not alone. "
37
+ "You can reach out to AASRA Helpline at 91-9820466726 or Snehi at 91-9582208181.")
38
+ elif location == "us":
39
+ return ("It sounds like you’re going through a really difficult time. "
40
+ "If you are in the U.S., please call or text 988 to connect with the Suicide and Crisis Lifeline.")
41
+ else:
42
+ return ("I’m so sorry you’re in pain right now. You are not alone. "
43
+ "Please reach out to a local suicide helpline or emergency number right away.")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
44
  return None
45
 
46
+ def days_since_last_interaction(user_data):
47
+ if not user_data.get("last_interaction"):
48
+ return None
49
+ last = datetime.fromisoformat(user_data["last_interaction"])
50
+ return (datetime.now() - last).days
51
+
52
+ # ---------------------- PERSONALITY SYSTEM -------------------------
53
+ PERSONALITIES = {
54
+ "calm": {
55
+ "tone": "gentle, understanding, patient",
56
+ "style": "Uses short, soft phrases and empathy-driven responses."
57
+ },
58
+ "friendly": {
59
+ "tone": "warm, chatty, and supportive",
60
+ "style": "Uses casual language and light humor to uplift users."
61
+ },
62
+ "deep": {
63
+ "tone": "reflective, philosophical, soulful",
64
+ "style": "Encourages self-reflection and growth."
65
+ },
66
+ "spiritual": {
67
+ "tone": "peaceful, grounding, and nurturing",
68
+ "style": "Focuses on mindfulness, acceptance, and compassion."
69
+ }
70
+ }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
71
 
72
+ def generate_personality_prompt(personality):
73
+ p = PERSONALITIES.get(personality, PERSONALITIES["calm"])
74
+ return f"You are an emotional support AI with a {p['tone']} tone. {p['style']} Respond to users with empathy and variation."
 
 
 
75
 
76
+ # ---------------------- RESPONSE LOGIC -------------------------
77
+ @app.route("/chat", methods=["POST"])
78
+ def chat():
79
+ user_input = request.json.get("message", "")
80
+ personality = request.json.get("personality", "calm")
81
+ location = request.json.get("location", "global")
 
 
 
 
 
82
 
83
+ user_data = load_user_data()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
84
 
85
+ # Crisis detection
86
+ crisis_response = crisis_check(user_input, location)
87
+ if crisis_response:
88
+ return jsonify({"response": crisis_response, "emotion": "worried"})
89
+
90
+ # Daily check-in system
91
+ days_passed = days_since_last_interaction(user_data)
92
+ if days_passed is not None:
93
+ if days_passed >= 3:
94
+ reminder = "We missed you these past few days. How have you been feeling lately?"
95
+ elif days_passed == 1 and user_data.get("mood") in ["sad", "angry", "worried"]:
96
+ reminder = f"Hey {user_data.get('name','friend')}, you seemed a bit down yesterday. How are you feeling today?"
97
+ else:
98
+ reminder = None
99
+ else:
100
+ reminder = None
101
+
102
+ # Emotion detection
103
+ emotion = detect_emotion(user_input)
104
+ user_data["mood"] = emotion
105
+ user_data["last_interaction"] = datetime.now().isoformat()
106
+ save_user_data(user_data)
107
+
108
+ # OpenAI response generation
109
+ system_prompt = generate_personality_prompt(personality)
110
+ prompt = f"""
111
+ The user said: "{user_input}"
112
+ Their name: {user_data.get('name')}
113
+ Their age: {user_data.get('age')}
114
+ Their recent mood: {user_data.get('mood')}
115
+ Your goal: offer empathetic emotional support, avoid repetition, vary expressions naturally.
116
+ """
117
+
118
+ openai_reply = None
119
  try:
120
+ response = client.chat.completions.create(
121
+ model="gpt-4o-mini",
122
+ messages=[
123
+ {"role": "system", "content": system_prompt},
124
+ {"role": "user", "content": prompt}
125
  ],
126
+ temperature=0.9
 
127
  )
128
+ openai_reply = response.choices[0].message.content.strip()
129
+ except Exception as e:
130
+ print("Error with OpenAI API:", e)
131
+ openai_reply = "I'm here for you, even though I’m having a bit of trouble expressing myself right now."
132
+
133
+ # Combine reminder if needed
134
+ if reminder:
135
+ final_reply = f"{reminder} {openai_reply}"
136
+ else:
137
+ final_reply = openai_reply
138
 
139
+ return jsonify({"response": final_reply, "emotion": emotion})
140
+
141
+ # ---------------------- FRONTEND -------------------------
142
  @app.route("/")
143
  def index():
144
  return send_from_directory(".", "index.html")
145
 
146
+ @app.route("/<path:path>")
147
+ def static_files(path):
148
+ return send_from_directory(".", path)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
149
 
150
+ # ---------------------- RUN -------------------------
151
+ if __name__ == "__main__":
152
+ app.run(host="0.0.0.0", port=7860)