Shresthh03 commited on
Commit
26242a6
·
verified ·
1 Parent(s): cb00eb0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +323 -223
app.py CHANGED
@@ -1,51 +1,61 @@
1
  import os
2
  import json
 
3
  import random
4
  import datetime
5
- import re
6
  from flask import Flask, request, jsonify, send_from_directory
7
 
8
- # Optional: Hugging Face emotion model if available (fallback to keyword heuristics)
9
  try:
10
  from transformers import pipeline
11
  HF_AVAILABLE = True
12
  except Exception:
13
  HF_AVAILABLE = False
14
 
15
- # OpenAI for generating supportive replies
 
 
 
 
 
 
16
  try:
17
  import openai
18
- OPENAI_AVAILABLE = True
19
- openai.api_key = os.environ.get("OPENAI_API_KEY")
20
- if not openai.api_key:
21
- OPENAI_AVAILABLE = False
22
  except Exception:
23
  OPENAI_AVAILABLE = False
24
 
25
  app = Flask(__name__, static_folder=".", static_url_path="/")
26
 
27
- # ----- CONFIG -----
28
  MEMORY_FILE = "session_memory.json"
29
- MEMORY_DAYS = 15
 
 
 
 
30
  HELPLINES = {
31
  "IN": "🇮🇳 India: AASRA Helpline 91-9820466726",
32
  "US": "🇺🇸 USA: Call or text 988 (Suicide & Crisis Lifeline)",
33
  "GB": "🇬🇧 UK: Samaritans 116 123",
34
  "CA": "🇨🇦 Canada: Talk Suicide Canada 1-833-456-4566",
35
  "AU": "🇦🇺 Australia: Lifeline 13 11 14",
36
- "DEFAULT": "If you are in crisis, please contact your local emergency number or use https://findahelpline.com"
37
  }
38
- CRISIS_TERMS = ["suicide","kill myself","end my life","i want to die","hurt myself","can't go on","cant go on","i don't want to live"]
39
 
40
- # ----- Try load HF model (optional) -----
41
  emotion_model = None
42
  if HF_AVAILABLE:
43
  try:
44
- emotion_model = pipeline("text-classification", model="j-hartmann/emotion-english-distilroberta-base", top_k=5)
 
 
45
  except Exception:
46
  emotion_model = None
47
 
48
- # ----- Memory helpers -----
49
  def load_memory():
50
  if os.path.exists(MEMORY_FILE):
51
  try:
@@ -55,17 +65,17 @@ def load_memory():
55
  data = {}
56
  else:
57
  data = {}
58
- # prune by date
59
- cutoff = datetime.datetime.utcnow() - datetime.timedelta(days=MEMORY_DAYS)
60
- out = {}
61
- for k,v in data.items():
62
  try:
63
  t = datetime.datetime.fromisoformat(v.get("last_seen"))
64
  if t >= cutoff:
65
- out[k] = v
66
  except Exception:
67
- out[k] = v
68
- return out
69
 
70
  def save_memory(mem):
71
  with open(MEMORY_FILE, "w") as f:
@@ -73,16 +83,15 @@ def save_memory(mem):
73
 
74
  memory = load_memory()
75
 
76
- # ----- small NLP helpers -----
 
 
 
 
 
77
  def extract_name(text):
78
- # patterns: "I'm X", "I am X", "My name is X" or single capitalized word
79
  text = text.strip()
80
- patterns = [
81
- r"^(?:i am|i'm|im|i’m)\s+([A-Za-z][A-Za-z '-]{1,40})",
82
- r"my name is\s+([A-Za-z][A-Za-z '-]{1,40})",
83
- r"^([A-Z][a-z]{1,30})$"
84
- ]
85
- for p in patterns:
86
  m = re.search(p, text, flags=re.IGNORECASE)
87
  if m:
88
  name = m.group(1).strip()
@@ -92,52 +101,51 @@ def extract_name(text):
92
  def extract_age(text):
93
  nums = re.findall(r"\b([1-9][0-9]?)\b", text)
94
  for n in nums:
95
- val = int(n)
96
- if 8 <= val <= 120:
97
- return val
98
  return None
99
 
100
- def detect_crisis(text):
101
- t = text.lower()
102
- return any(term in t for term in CRISIS_TERMS)
103
 
104
- def get_helpline_by_ip(remote_addr):
105
- # best effort: call ipapi.co
106
  try:
107
- import requests
108
- ip = remote_addr if remote_addr and ":" not in remote_addr else ""
109
- url = "https://ipapi.co/json/" if not ip else f"https://ipapi.co/{ip}/json/"
110
- r = requests.get(url, timeout=2)
111
- if r.status_code == 200:
112
- data = r.json()
113
- code = data.get("country_code", "").upper()
114
- return HELPLINES.get(code, HELPLINES["DEFAULT"])
115
  except Exception:
116
  pass
117
  return HELPLINES["DEFAULT"]
118
 
119
  def classify_emotion(text):
120
- # try HF model, fallback to keywords
121
  if emotion_model:
122
  try:
123
  out = emotion_model(text)
124
- if isinstance(out, list):
125
- # pipeline may return list of lists or list of dicts
126
- first = out[0]
127
- if isinstance(first, list):
128
- label = first[0]["label"]
129
- else:
130
- label = first["label"]
131
- return label.lower()
132
  except Exception:
133
  pass
134
- # keyword fallback
135
  low = text.lower()
136
- if any(w in low for w in ["happy","glad","joy","great","good","awesome"]):
137
  return "joy"
138
- if any(w in low for w in ["sad","down","depressed","unhappy","lonely","cry"]):
139
  return "sadness"
140
- if any(w in low for w in ["angry","mad","furious","hate","annoy"]):
141
  return "anger"
142
  if any(w in low for w in ["scared","afraid","anxious","panic","worried"]):
143
  return "fear"
@@ -145,126 +153,131 @@ def classify_emotion(text):
145
  return "love"
146
  return "neutral"
147
 
148
- # ----- Avoid repetitiveness helpers -----
149
- VARIED_ACKS = [
150
- "That sounds really heavy — thank you for trusting me with that.",
151
- "I can hear how much that matters to you.",
152
- "You’ve shared something important. I’m here with you.",
153
- "That must have taken courage to say. I’m listening."
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
154
  ]
155
- VARIED_FOLLOWUPS = [
156
- "Would you like to tell me more about what’s been happening?",
157
- "How has that been affecting your day-to-day life?",
158
- "What usually helps you when you feel this way?",
159
- "Would you like a calming exercise or practical next steps?"
 
160
  ]
 
 
 
 
 
 
 
 
161
  MOTIVATIONAL_SNIPPETS = [
162
- "Even small steps count. You don't have to do it all at once.",
163
- "It's okay to rest and take care of yourself.",
164
- "You’ve handled so much already that shows strength.",
165
- "Breath by breath, you are moving forward."
166
  ]
167
 
168
- # ----- OpenAI helper: build system prompt for mixed-mode personalities (Option C) -----
169
- def build_system_prompt(personality_choice, past_memory):
170
- # personality_choice is an id like 'calm_male', etc.
171
- # past_memory: dict with name, age, last_mood
172
- base = (
173
- "You are 'Serenity', a warm, compassionate emotional-support assistant. "
174
- "Be supportive, curiosity-led, and avoid giving medical or legal advice. "
175
- "Do not use the same short phrase repeatedly. Vary vocabulary and sentence structure. "
176
- "Always be empathetic, concise when user is distressed, more chatty when user is okay. "
177
- "When user shows crisis language, immediately offer helpline info and encourage contacting emergency services."
178
- )
179
 
180
- # personalities mapping
181
- persona_map = {
182
- "calm_male": "Use a calm male-tone voice: steady, grounding, gentle. Slightly formal but warm.",
183
- "deep_male": "Use a deep baritone male-tone: slow, resonant, reassuring.",
184
- "soothing_male": "Use a soothing male counselor-tone: mellow and kind.",
185
- "gentle_female": "Use a gentle female-tone: tender, nurturing, caring.",
186
- "feminine_female": "Use a feminine bright-tone: warm and encouraging.",
187
- "deep_female": "Use a deeper female-tone: soulful and empathetic.",
188
- "soothing_female": "Use a clear soothing female-tone: calm and steady.",
189
- "neutral": "Use a neutral friendly-tone: balanced, soft, non-gendered."
190
- }
191
- persona = persona_map.get(personality_choice, persona_map["neutral"])
192
 
 
 
193
  memory_note = ""
194
- if past_memory:
195
- nm = past_memory.get("name")
196
- last = past_memory.get("last_mood")
197
- if nm:
198
- memory_note += f" The user is called {nm}."
199
- if last:
200
- memory_note += f" The user's recent mood was: {last}."
201
-
202
- # Add few-shot style guidance for followups and variety
203
- examples = (
204
- "\nExamples of empathetic flow (do not repeat exact wording):\n"
205
- "- User: 'I've been so down lately.' -> Assistant: 'I can hear how heavy that feels. Would you like to share what triggered it?' \n"
206
- "- User: 'I can't sleep, I'm anxious' -> Assistant: 'That must be exhausting. Try breathing with me for a minute — would you like that?' \n"
207
- "- User: 'How are you?' -> Assistant: Provide a short warm, slightly human reply and then re-focus on the user: e.g. 'I'm here and ready to listen — how are you feeling today?'\n"
208
  )
 
209
 
210
- return "\n".join([base, persona, memory_note, examples])
211
-
212
- # ----- Generate assistant reply via OpenAI (preferred) or friendly fallback -----
213
- def generate_assistant_reply(user_message, personality, session_memory):
214
- name = session_memory.get("name", "friend")
215
- age = session_memory.get("age")
216
- last_mood = session_memory.get("last_mood")
217
-
218
- # If OpenAI not configured, produce a careful local fallback using templates
219
  if not OPENAI_AVAILABLE:
220
- # we still use classification to vary replies
221
- emo = classify_emotion(user_message)
222
- if emo in ("sadness","fear","anger"):
223
- reply = random.choice(VARIED_ACKS) + " " + random.choice(VARIED_FOLLOWUPS)
224
- elif emo in ("joy","love"):
225
- reply = f"That's wonderful, {name}! So glad to hear that. What made it bright for you?"
226
- else:
227
- if any(w in user_message.lower() for w in ("help","guidance","motivate","motivation","advice")):
228
- reply = random.choice(MOTIVATIONAL_SNIPPETS)
229
- else:
230
- reply = random.choice(VARIED_ACKS)
231
- return reply, emo
232
-
233
- # Build system + user prompt for OpenAI
234
- system_prompt = build_system_prompt(personality or "neutral", session_memory)
235
- messages = [
236
- {"role":"system", "content": system_prompt},
237
- {"role":"user", "content": f"User (named {name if name else 'unknown'}) says: {user_message}"}
238
- ]
239
-
240
  try:
241
  resp = openai.ChatCompletion.create(
242
- model = os.environ.get("OPENAI_MODEL","gpt-4o-mini"),
243
- messages = messages,
244
- temperature = 0.8,
245
- max_tokens = 512,
246
- n = 1
 
 
247
  )
248
  text = resp.choices[0].message.content.strip()
249
- # compute emotion via classifier
250
- emo = classify_emotion(user_message)
251
- # small safety: prevent repetitive single-line "I understand"
252
- normalized = text.strip().lower()
253
- if normalized in ("i understand", "i see", "okay", "i'm sorry to hear that"):
254
- text = random.choice(VARIED_ACKS)
255
- return text, emo
256
  except Exception:
257
- # fallback if OpenAI errors
258
- emo = classify_emotion(user_message)
259
- if emo in ("sadness","fear","anger"):
260
- reply = random.choice(VARIED_ACKS) + " " + random.choice(VARIED_FOLLOWUPS)
261
- elif emo in ("joy","love"):
262
- reply = f"That's wonderful, {name}! Tell me more about that good thing."
263
- else:
264
- reply = random.choice(VARIED_ACKS)
265
- return reply, emo
266
 
267
- # ----- Routes -----
268
  @app.route("/")
269
  def index():
270
  return send_from_directory(".", "index.html")
@@ -273,115 +286,202 @@ def index():
273
  def chat():
274
  global memory
275
  data = request.get_json() or {}
276
- session = data.get("session") or request.remote_addr or "default"
277
  message = (data.get("message") or "").strip()
278
- personality = data.get("personality") or data.get("voice_profile") or "neutral"
279
  init_flag = data.get("init", False)
280
 
281
- # ensure session slot
282
  slot = memory.get(session, {})
283
  now = datetime.datetime.utcnow().isoformat()
 
 
284
 
285
- # If init flag, return initial greeting or follow-up if we have memory
286
  if init_flag:
 
 
 
287
  if not slot.get("name"):
288
- slot.setdefault("last_seen", now)
289
- memory[session] = slot
290
- save_memory(memory)
291
- return jsonify({"reply":"Hey there — I’m Serenity. What’s your name?", "emotion":"calm"})
292
  else:
293
  last_mood = slot.get("last_mood")
294
  last_seen = slot.get("last_seen")
295
  try:
296
- if last_mood in ("sadness","fear","anger") and last_seen:
297
- t = datetime.datetime.fromisoformat(last_seen)
298
- if (datetime.datetime.utcnow() - t).days <= MEMORY_DAYS:
299
- return jsonify({"reply":f"Hey {slot.get('name')}, I remember you were having a tough time last time. How are you today?", "emotion":"warm"})
300
  except Exception:
301
  pass
302
- return jsonify({"reply":f"Welcome back {slot.get('name')} — I'm here for you. What's on your mind?", "emotion":"calm"})
303
 
304
- # If message empty
305
  if not message:
306
- return jsonify({"reply":"Im listening whenever youre ready tell me whats on your mind.", "emotion":"neutral"})
307
 
308
- # If awaiting name or age from previous flow
309
  awaiting = slot.get("awaiting")
310
  if not slot.get("name") and not awaiting:
311
- # Try to extract name
312
- name_extracted = extract_name(message)
313
- if name_extracted:
314
- slot["name"] = name_extracted
315
  slot["awaiting"] = "age"
316
  slot["last_seen"] = now
317
  memory[session] = slot
318
  save_memory(memory)
319
- return jsonify({"reply":f"Lovely to meet you, {name_extracted}! How old are you?", "emotion":"curious"})
320
  else:
321
  slot["awaiting"] = "name"
322
  slot["last_seen"] = now
323
  memory[session] = slot
324
  save_memory(memory)
325
- return jsonify({"reply":"Hey — what should I call you? What's your name?", "emotion":"calm"})
326
 
327
  if awaiting == "name":
328
- name_guess = extract_name(message) or message.split()[0].capitalize()
329
- slot["name"] = name_guess
330
  slot.pop("awaiting", None)
331
- slot["last_seen"] = now
332
  slot["awaiting"] = "age"
333
- memory[session]=slot
 
334
  save_memory(memory)
335
- return jsonify({"reply":f"Nice to meet you, {name_guess}! How old are you?", "emotion":"curious"})
336
 
337
  if awaiting == "age":
338
- age_guess = extract_age(message)
339
- if age_guess:
340
- slot["age"] = age_guess
341
  slot.pop("awaiting", None)
342
  slot["last_seen"] = now
343
- memory[session]=slot
344
  save_memory(memory)
345
- return jsonify({"reply":f"Thanks — got it. {slot.get('name')}, how have you been feeling lately?", "emotion":"curious"})
346
  else:
347
- return jsonify({"reply":"Could you tell me your age as a number (for example: 24)?", "emotion":"neutral"})
348
 
349
  # Crisis detection
350
- if detect_crisis(message):
351
- helpline = get_helpline_by_ip(request.remote_addr)
352
  slot["last_mood"] = "crisis"
353
  slot["last_seen"] = now
354
  memory[session] = slot
355
  save_memory(memory)
356
- reply = (f"I’m really concerned about how you're feeling right now. You are not alone. "
357
- f"Please consider contacting emergency services or a helpline: {helpline}")
358
- return jsonify({"reply":reply, "emotion":"crisis"})
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
359
 
360
- # Generate reply via OpenAI or fallback templates
361
- reply_text, emotion = generate_assistant_reply(message, personality, slot)
362
- # store memory
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
363
  slot["last_mood"] = emotion
 
 
 
 
 
364
  slot["last_seen"] = now
365
- # keep a short history
366
- hist = slot.get("history", [])
367
- hist.append({"time": now, "in": message, "out": reply_text, "emotion": emotion})
368
- # keep only last 20 messages
369
- slot["history"] = hist[-20:]
370
  memory[session] = slot
371
  save_memory(memory)
372
-
373
- return jsonify({"reply": reply_text, "emotion": emotion})
374
-
375
- # optional endpoint to clear a session (new chat)
376
- @app.route("/reset_session", methods=["POST"])
377
- def reset_session():
378
- data = request.get_json() or {}
379
- session = data.get("session")
380
- if session and session in memory:
381
- memory.pop(session, None)
382
- save_memory(memory)
383
- return jsonify({"ok": True})
384
-
385
- if __name__ == "__main__":
386
- port = int(os.environ.get("PORT", 7860))
387
- app.run(host="0.0.0.0", port=port)
 
1
  import os
2
  import json
3
+ import re
4
  import random
5
  import datetime
 
6
  from flask import Flask, request, jsonify, send_from_directory
7
 
8
+ # Try optional packages
9
  try:
10
  from transformers import pipeline
11
  HF_AVAILABLE = True
12
  except Exception:
13
  HF_AVAILABLE = False
14
 
15
+ try:
16
+ import requests
17
+ REQ_AVAILABLE = True
18
+ except Exception:
19
+ REQ_AVAILABLE = False
20
+
21
+ # Optional OpenAI usage for richer replies
22
  try:
23
  import openai
24
+ OPENAI_AVAILABLE = bool(os.environ.get("OPENAI_API_KEY"))
25
+ if OPENAI_AVAILABLE:
26
+ openai.api_key = os.environ.get("OPENAI_API_KEY")
 
27
  except Exception:
28
  OPENAI_AVAILABLE = False
29
 
30
  app = Flask(__name__, static_folder=".", static_url_path="/")
31
 
32
+ # ---------- Config ----------
33
  MEMORY_FILE = "session_memory.json"
34
+ MEMORY_RETENTION_DAYS = 15
35
+ CRISIS_TERMS = [
36
+ "suicide", "kill myself", "end my life", "i want to die", "hurt myself",
37
+ "can't go on", "cant go on", "i don't want to live", "i dont want to live"
38
+ ]
39
  HELPLINES = {
40
  "IN": "🇮🇳 India: AASRA Helpline 91-9820466726",
41
  "US": "🇺🇸 USA: Call or text 988 (Suicide & Crisis Lifeline)",
42
  "GB": "🇬🇧 UK: Samaritans 116 123",
43
  "CA": "🇨🇦 Canada: Talk Suicide Canada 1-833-456-4566",
44
  "AU": "🇦🇺 Australia: Lifeline 13 11 14",
45
+ "DEFAULT": "If you are in crisis, please contact your local emergency number or visit https://findahelpline.com"
46
  }
 
47
 
48
+ # ---------- Optional HF emotion model (heavy) ----------
49
  emotion_model = None
50
  if HF_AVAILABLE:
51
  try:
52
+ emotion_model = pipeline("text-classification",
53
+ model="j-hartmann/emotion-english-distilroberta-base",
54
+ top_k=5)
55
  except Exception:
56
  emotion_model = None
57
 
58
+ # ---------- Memory helpers ----------
59
  def load_memory():
60
  if os.path.exists(MEMORY_FILE):
61
  try:
 
65
  data = {}
66
  else:
67
  data = {}
68
+ # prune old
69
+ cutoff = datetime.datetime.utcnow() - datetime.timedelta(days=MEMORY_RETENTION_DAYS)
70
+ keep = {}
71
+ for k, v in data.items():
72
  try:
73
  t = datetime.datetime.fromisoformat(v.get("last_seen"))
74
  if t >= cutoff:
75
+ keep[k] = v
76
  except Exception:
77
+ keep[k] = v
78
+ return keep
79
 
80
  def save_memory(mem):
81
  with open(MEMORY_FILE, "w") as f:
 
83
 
84
  memory = load_memory()
85
 
86
+ # ---------- small NLP helpers ----------
87
+ name_patterns = [
88
+ r"^(?:i am|i'm|im|i’m)\s+([A-Za-z][A-Za-z '-]{1,40})",
89
+ r"my name is\s+([A-Za-z][A-Za-z '-]{1,40})",
90
+ r"^([A-Z][a-z]{1,30})$"
91
+ ]
92
  def extract_name(text):
 
93
  text = text.strip()
94
+ for p in name_patterns:
 
 
 
 
 
95
  m = re.search(p, text, flags=re.IGNORECASE)
96
  if m:
97
  name = m.group(1).strip()
 
101
  def extract_age(text):
102
  nums = re.findall(r"\b([1-9][0-9]?)\b", text)
103
  for n in nums:
104
+ v = int(n)
105
+ if 8 <= v <= 120:
106
+ return v
107
  return None
108
 
109
+ def is_crisis(text):
110
+ low = text.lower()
111
+ return any(term in low for term in CRISIS_TERMS)
112
 
113
+ def helpline_for_request(remote_addr):
114
+ # best-effort country lookup via ipapi
115
  try:
116
+ if REQ_AVAILABLE:
117
+ ip = remote_addr if remote_addr and ":" not in remote_addr else ""
118
+ url = "https://ipapi.co/json/" if not ip else f"https://ipapi.co/{ip}/json/"
119
+ r = requests.get(url, timeout=2)
120
+ if r.status_code == 200:
121
+ data = r.json()
122
+ code = data.get("country_code", "").upper()
123
+ return HELPLINES.get(code, HELPLINES["DEFAULT"])
124
  except Exception:
125
  pass
126
  return HELPLINES["DEFAULT"]
127
 
128
  def classify_emotion(text):
129
+ # Try HF if available
130
  if emotion_model:
131
  try:
132
  out = emotion_model(text)
133
+ # pipeline returns list or list of lists; get top label
134
+ first = out[0]
135
+ if isinstance(first, list):
136
+ label = first[0]["label"]
137
+ else:
138
+ label = first["label"]
139
+ return label.lower()
 
140
  except Exception:
141
  pass
142
+ # fallback heuristics
143
  low = text.lower()
144
+ if any(w in low for w in ["happy","glad","joy","great","good","awesome","fine"]):
145
  return "joy"
146
+ if any(w in low for w in ["sad","down","depressed","unhappy","lonely","cry","miserable"]):
147
  return "sadness"
148
+ if any(w in low for w in ["angry","mad","furious","annoyed","irritat"]):
149
  return "anger"
150
  if any(w in low for w in ["scared","afraid","anxious","panic","worried"]):
151
  return "fear"
 
153
  return "love"
154
  return "neutral"
155
 
156
+ # ---------- Intent detection (simple rules) ----------
157
+ def detect_intent(text):
158
+ t = text.lower().strip()
159
+ # Crisis
160
+ if is_crisis(t):
161
+ return "CRISIS"
162
+ # Asking about bot
163
+ if any(q in t for q in ["how are you", "how're you", "how r you", "how you doing", "are you okay", "are you mad", "are you upset", "are you mad?"]):
164
+ return "QUESTION_ABOUT_BOT"
165
+ # Requests for motivation/guidance
166
+ if any(w in t for w in ["motivate", "motivation", "guidance", "inspire", "give me guidance", "need motivation", "help me be motivated"]):
167
+ return "REQUEST_MOTIVATION"
168
+ # Casual chit-chat / teasing / slang
169
+ if any(w in t for w in ["lol","haha","hahaha","jk","bro","dude","whats up","what's up","have you gone mad","are you mad","r u mad","you mad"]):
170
+ return "CASUAL"
171
+ # If user mentions feelings -> support
172
+ if any(w in t for w in ["sad","down","depressed","anxious","anxiety","lonely","hurt","upset","tired","stressed","stressing","stress"]):
173
+ return "SUPPORT"
174
+ # Else neutral casual fallback for short utterances
175
+ if len(t.split()) <= 6:
176
+ return "CASUAL"
177
+ return "SUPPORT" # prefer support for longer introspective messages
178
+
179
+ # ---------- Non-repetitive response manager ----------
180
+ def pick_nonrepetitive(session_slot, bucket):
181
+ """Pick a reply from bucket avoiding recent repeats stored in session_slot['recent_replies']"""
182
+ recent = session_slot.get("recent_replies", [])
183
+ choices = [x for x in bucket if x not in recent]
184
+ if not choices:
185
+ # all used recently — clear memory a bit and reuse
186
+ session_slot["recent_replies"] = []
187
+ choices = bucket[:]
188
+ pick = random.choice(choices)
189
+ # append to recent (keep last 6)
190
+ recent.insert(0, pick)
191
+ session_slot["recent_replies"] = recent[:6]
192
+ return pick
193
+
194
+ # ---------- Reply templates ----------
195
+ CASUAL_REPLY_TEMPLATES = [
196
+ "Haha, you crack me up — tell me more!",
197
+ "Oh wow, that’s a curveball 😄 What made you say that?",
198
+ "I’m here and very curious — go on.",
199
+ "Haha, I might be a little wired but never mad — what's up?",
200
+ "I love that energy. Want to tell me more about it?",
201
+ "You’re funny — but seriously, how are you really?",
202
+ "Haha, okay I see you. What else?"
203
  ]
204
+
205
+ SUPPORT_OPENERS = [
206
+ "That sounds heavy thank you for trusting me with that.",
207
+ "I can feel how much that impacted you. I'm listening.",
208
+ "You handled a lot there; I'm glad you told me.",
209
+ "That must have been difficult. Tell me more, if you want."
210
  ]
211
+
212
+ SUPPORT_FOLLOWUPS = [
213
+ "Would you like to talk about what might help a little today?",
214
+ "How has this been affecting your daily life?",
215
+ "What usually helps you when things feel this way?",
216
+ "Would you prefer a calming exercise or a few practical steps?"
217
+ ]
218
+
219
  MOTIVATIONAL_SNIPPETS = [
220
+ "Even small steps count you don't need to fix everything at once.",
221
+ "You’ve come so far already. One gentle step at a time.",
222
+ "Rest is allowed. Healing isn’t a straight line.",
223
+ "Breathe you’re doing better than you think."
224
  ]
225
 
226
+ BOT_SELF_REPLIES = [
227
+ "I'm doing well — talking to you brightens my loop! How about you?",
228
+ "Feeling calm and ready to listen — how are you today?",
229
+ "I’m good! Just here with an open ear for you.",
230
+ "Doing okay — I was thinking about how to support you better. What’s up?"
231
+ ]
 
 
 
 
 
232
 
233
+ # ---------- OpenAI prompt builder (for mixed persona) ----------
234
+ PERSONA_TEXT = {
235
+ "calm_male": "A calm masculine-tone voice: steady, grounding, gentle; use short reassuring phrases.",
236
+ "deep_male": "A deep male-tone: slow, resonant, and calming.",
237
+ "soothing_male": "A soothing male-tone: mellow and kind.",
238
+ "gentle_female": "A gentle female-tone: tender and nurturing.",
239
+ "feminine_female": "A bright feminine-tone: warm and encouraging.",
240
+ "deep_female": "A deeper female-tone: soulful and empathetic.",
241
+ "soothing_female": "A soothing female-tone: calm and steady.",
242
+ "neutral": "A neutral friendly-tone: balanced, soft, non-gendered."
243
+ }
 
244
 
245
+ def build_openai_prompt(personality_id, session_slot):
246
+ persona = PERSONA_TEXT.get(personality_id, PERSONA_TEXT["neutral"])
247
  memory_note = ""
248
+ if session_slot.get("name"):
249
+ memory_note += f" The user is named {session_slot.get('name')}."
250
+ if session_slot.get("last_mood"):
251
+ memory_note += f" Recent mood: {session_slot.get('last_mood')}."
252
+ system = (
253
+ "You are Serenity, a warm compassionate emotional support companion. "
254
+ "Be empathetic, avoid repeating the same short phrases like 'I understand', and vary vocabulary. "
255
+ "Keep replies concise when the user seems distressed; be chatty when the user is casual. "
256
+ + persona + memory_note
257
+ + " If user asks casual questions about you, answer briefly and pivot back to supporting the user."
 
 
 
 
258
  )
259
+ return system
260
 
261
+ def openai_reply(user_message, personality_id, session_slot):
 
 
 
 
 
 
 
 
262
  if not OPENAI_AVAILABLE:
263
+ return None
264
+ system_prompt = build_openai_prompt(personality_id, session_slot)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
265
  try:
266
  resp = openai.ChatCompletion.create(
267
+ model = os.environ.get("OPENAI_MODEL", "gpt-4o-mini"),
268
+ messages = [
269
+ {"role":"system", "content": system_prompt},
270
+ {"role":"user", "content": user_message}
271
+ ],
272
+ temperature = 0.85,
273
+ max_tokens = 350
274
  )
275
  text = resp.choices[0].message.content.strip()
276
+ return text
 
 
 
 
 
 
277
  except Exception:
278
+ return None
 
 
 
 
 
 
 
 
279
 
280
+ # ---------- Routes ----------
281
  @app.route("/")
282
  def index():
283
  return send_from_directory(".", "index.html")
 
286
  def chat():
287
  global memory
288
  data = request.get_json() or {}
289
+ session = data.get("session") or request.remote_addr or "default_session"
290
  message = (data.get("message") or "").strip()
291
+ personality = (data.get("personality") or data.get("voice_profile") or "neutral")
292
  init_flag = data.get("init", False)
293
 
294
+ # ensure slot exists
295
  slot = memory.get(session, {})
296
  now = datetime.datetime.utcnow().isoformat()
297
+ if not slot:
298
+ slot = {"name": None, "age": None, "last_mood": None, "last_seen": now, "recent_replies": [], "history": []}
299
 
300
+ # If init requested, send greeting or follow-up
301
  if init_flag:
302
+ slot["last_seen"] = now
303
+ memory[session] = slot
304
+ save_memory(memory)
305
  if not slot.get("name"):
306
+ return jsonify({"reply":"Hey — I'm Serenity. What's your name?", "emotion":"calm", "intent":"INIT"})
 
 
 
307
  else:
308
  last_mood = slot.get("last_mood")
309
  last_seen = slot.get("last_seen")
310
  try:
311
+ t = datetime.datetime.fromisoformat(last_seen)
312
+ if last_mood in ("sadness","anger","fear") and (datetime.datetime.utcnow() - t).days <= MEMORY_RETENTION_DAYS:
313
+ return jsonify({"reply":f"Hey {slot.get('name')}, I remember you were feeling down last time. How are you today?", "emotion":"warm", "intent":"FOLLOWUP"})
 
314
  except Exception:
315
  pass
316
+ return jsonify({"reply":f"Welcome back {slot.get('name')} — what’s on your mind?", "emotion":"calm", "intent":"INIT"})
317
 
318
+ # If empty message
319
  if not message:
320
+ return jsonify({"reply":"I'm here whenever you're ready, tell me what's on your mind.", "emotion":"neutral", "intent":"NONE"})
321
 
322
+ # Handle awaiting name/age
323
  awaiting = slot.get("awaiting")
324
  if not slot.get("name") and not awaiting:
325
+ # try to extract name
326
+ name = extract_name(message)
327
+ if name:
328
+ slot["name"] = name
329
  slot["awaiting"] = "age"
330
  slot["last_seen"] = now
331
  memory[session] = slot
332
  save_memory(memory)
333
+ return jsonify({"reply":f"Nice to meet you, {name}! How old are you?", "emotion":"curious", "intent":"ASK_AGE"})
334
  else:
335
  slot["awaiting"] = "name"
336
  slot["last_seen"] = now
337
  memory[session] = slot
338
  save_memory(memory)
339
+ return jsonify({"reply":"Hey — what should I call you? What's your name?", "emotion":"calm", "intent":"ASK_NAME"})
340
 
341
  if awaiting == "name":
342
+ guessed = extract_name(message) or message.split()[0].capitalize()
343
+ slot["name"] = guessed
344
  slot.pop("awaiting", None)
 
345
  slot["awaiting"] = "age"
346
+ slot["last_seen"] = now
347
+ memory[session] = slot
348
  save_memory(memory)
349
+ return jsonify({"reply":f"Lovely, {guessed}. How old are you?", "emotion":"curious", "intent":"ASK_AGE"})
350
 
351
  if awaiting == "age":
352
+ age = extract_age(message)
353
+ if age:
354
+ slot["age"] = age
355
  slot.pop("awaiting", None)
356
  slot["last_seen"] = now
357
+ memory[session] = slot
358
  save_memory(memory)
359
+ return jsonify({"reply":f"Thanks. {slot.get('name')}, how have you been feeling lately?", "emotion":"curious", "intent":"ASK_MOOD"})
360
  else:
361
+ return jsonify({"reply":"Could you tell me your age as a number (for example, 24)?", "emotion":"neutral", "intent":"ASK_AGE"})
362
 
363
  # Crisis detection
364
+ if is_crisis(message):
 
365
  slot["last_mood"] = "crisis"
366
  slot["last_seen"] = now
367
  memory[session] = slot
368
  save_memory(memory)
369
+ helpline = helpline_for_request(request.remote_addr)
370
+ reply = f"I’m really concerned about how you're feeling. You are not alone. Please consider contacting emergency services or this helpline: {helpline}"
371
+ return jsonify({"reply":reply, "emotion":"crisis", "intent":"CRISIS"})
372
+
373
+ # Detect intent
374
+ intent = detect_intent(message)
375
+
376
+ # If user asks about the bot (casual)
377
+ if intent == "QUESTION_ABOUT_BOT":
378
+ # friendly, human-like small talk (Option A)
379
+ bot_reply = random.choice(BOT_SELF_REPLIES)
380
+ # briefly ask how user is to pivot back
381
+ pivot = random.choice(["How are you doing right now?", "And how about you?"])
382
+ reply = f"{bot_reply} {pivot}"
383
+ # update memory and return
384
+ slot["last_mood"] = classify_emotion(message)
385
+ slot["last_seen"] = now
386
+ memory[session] = slot
387
+ save_memory(memory)
388
+ return jsonify({"reply": reply, "emotion": slot["last_mood"], "intent": "QUESTION_ABOUT_BOT"})
389
+
390
+ # If casual intent -> casual friendly replies (Option A)
391
+ if intent == "CASUAL":
392
+ # Use OpenAI if available to make it more natural
393
+ if OPENAI_AVAILABLE:
394
+ o = openai_reply := openai_reply = None
395
+ # Use a short, casual prompt
396
+ try:
397
+ system = ("You are a friendly, informal companion. Answer casually, with light humor when appropriate, "
398
+ "be brief and natural. Avoid repeating previous phrasing. If the user is distressed, shift to empathy.")
399
+ resp = openai.ChatCompletion.create(
400
+ model = os.environ.get("OPENAI_MODEL","gpt-4o-mini"),
401
+ messages = [
402
+ {"role":"system", "content": system},
403
+ {"role":"user", "content": message}
404
+ ],
405
+ temperature = 0.8,
406
+ max_tokens = 150
407
+ )
408
+ text = resp.choices[0].message.content.strip()
409
+ # little safety: if the AI returns a generic empathetic one-liner only, diversify
410
+ if text.lower() in ("i understand", "i see", "okay"):
411
+ text = pick_nonrepetitive(slot, CASUAL_REPLY_TEMPLATES)
412
+ slot["last_mood"] = classify_emotion(message)
413
+ slot["last_seen"] = now
414
+ # store reply to avoid repetition
415
+ slot.setdefault("recent_replies", [])
416
+ slot["recent_replies"].insert(0, text)
417
+ slot["recent_replies"] = slot["recent_replies"][:6]
418
+ slot.setdefault("history", []).append({"in": message, "out": text, "time": now, "intent": intent})
419
+ slot["history"] = slot["history"][-40:]
420
+ memory[session] = slot
421
+ save_memory(memory)
422
+ return jsonify({"reply": text, "emotion": slot["last_mood"], "intent": intent})
423
+ except Exception:
424
+ # fallback to templates
425
+ text = pick_nonrepetitive(slot, CASUAL_REPLY_TEMPLATES)
426
+ slot["last_mood"] = classify_emotion(message)
427
+ slot["last_seen"] = now
428
+ memory[session] = slot
429
+ save_memory(memory)
430
+ return jsonify({"reply": text, "emotion": slot["last_mood"], "intent": intent})
431
+ else:
432
+ text = pick_nonrepetitive(slot, CASUAL_REPLY_TEMPLATES)
433
+ slot["last_mood"] = classify_emotion(message)
434
+ slot["last_seen"] = now
435
+ memory[session] = slot
436
+ save_memory(memory)
437
+ return jsonify({"reply": text, "emotion": slot["last_mood"], "intent": intent})
438
 
439
+ # Request motivation
440
+ if intent == "REQUEST_MOTIVATION":
441
+ reply = pick_nonrepetitive(slot, MOTIVATIONAL_SNIPPETS)
442
+ slot["last_mood"] = classify_emotion(message)
443
+ slot["last_seen"] = now
444
+ memory[session] = slot
445
+ save_memory(memory)
446
+ return jsonify({"reply": reply, "emotion": slot["last_mood"], "intent": intent})
447
+
448
+ # Support (default)
449
+ # Try OpenAI with persona if available
450
+ if OPENAI_AVAILABLE:
451
+ ai_text = openai_reply(message, personality, slot)
452
+ if ai_text:
453
+ # avoid robotic single-line responses
454
+ if ai_text.strip().lower() in ("i understand","i see","okay","i'm sorry to hear that"):
455
+ ai_text = pick_nonrepetitive(slot, SUPPORT_OPENERS)
456
+ emotion = classify_emotion(message)
457
+ slot["last_mood"] = emotion
458
+ slot.setdefault("recent_replies", [])
459
+ slot["recent_replies"].insert(0, ai_text)
460
+ slot["recent_replies"] = slot["recent_replies"][:6]
461
+ slot.setdefault("history", []).append({"in": message, "out": ai_text, "time": now, "intent": intent})
462
+ slot["history"] = slot["history"][-40:]
463
+ slot["last_seen"] = now
464
+ memory[session] = slot
465
+ save_memory(memory)
466
+ return jsonify({"reply": ai_text, "emotion": emotion, "intent": intent})
467
+ # else fall through to template fallback
468
+
469
+ # Fallback supportive templated reply
470
+ opener = pick_nonrepetitive(slot, SUPPORT_OPENERS)
471
+ follow = pick_nonrepetitive(slot, SUPPORT_FOLLOWUPS)
472
+ # Mix small chance for motivational hint
473
+ if random.random() < 0.35:
474
+ reply = f"{opener} {random.choice(MOTIVATIONAL_SNIPPETS)} {follow}"
475
+ else:
476
+ reply = f"{opener} {follow}"
477
+ emotion = classify_emotion(message)
478
  slot["last_mood"] = emotion
479
+ slot.setdefault("recent_replies", [])
480
+ slot["recent_replies"].insert(0, reply)
481
+ slot["recent_replies"] = slot["recent_replies"][:6]
482
+ slot.setdefault("history", []).append({"in": message, "out": reply, "time": now, "intent": intent})
483
+ slot["history"] = slot["history"][-40:]
484
  slot["last_seen"] = now
 
 
 
 
 
485
  memory[session] = slot
486
  save_memory(memory)
487
+ return jsonify({"reply": reply, "emotion":