Spaces:
Runtime error
Runtime error
Upload 53 files
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .env +11 -0
- .gitattributes +35 -35
- .gitignore +206 -0
- LICENSE +201 -0
- README.md +132 -14
- gui/chatbot_demo.py +672 -0
- gui/chatbot_dev_app.py +508 -0
- gui/simple_gui.py +156 -0
- pyproject.toml +53 -0
- src/kallam.egg-info/PKG-INFO +174 -0
- src/kallam.egg-info/SOURCES.txt +32 -0
- src/kallam.egg-info/dependency_links.txt +1 -0
- src/kallam.egg-info/requires.txt +20 -0
- src/kallam.egg-info/top_level.txt +1 -0
- src/kallam/__init__.py +4 -0
- src/kallam/__pycache__/__init__.cpython-311.pyc +0 -0
- src/kallam/app/__init__.py +0 -0
- src/kallam/app/__pycache__/__init__.cpython-311.pyc +0 -0
- src/kallam/app/__pycache__/chatbot_manager.cpython-311.pyc +0 -0
- src/kallam/app/chatbot_manager.py +412 -0
- src/kallam/domain/__init__.py +0 -0
- src/kallam/domain/__pycache__/__init__.cpython-311.pyc +0 -0
- src/kallam/domain/agents/__init__.py +0 -0
- src/kallam/domain/agents/__pycache__/Single_agent.cpython-311.pyc +0 -0
- src/kallam/domain/agents/__pycache__/__init__.cpython-311.pyc +0 -0
- src/kallam/domain/agents/__pycache__/doctor.cpython-311.pyc +0 -0
- src/kallam/domain/agents/__pycache__/orchestrator.cpython-311.pyc +0 -0
- src/kallam/domain/agents/__pycache__/psychologist.cpython-311.pyc +0 -0
- src/kallam/domain/agents/__pycache__/summarizer.cpython-311.pyc +0 -0
- src/kallam/domain/agents/__pycache__/supervisor.cpython-311.pyc +0 -0
- src/kallam/domain/agents/__pycache__/translator.cpython-311.pyc +0 -0
- src/kallam/domain/agents/doctor.py +215 -0
- src/kallam/domain/agents/orchestrator.py +206 -0
- src/kallam/domain/agents/psychologist.py +620 -0
- src/kallam/domain/agents/summarizer.py +157 -0
- src/kallam/domain/agents/supervisor.py +520 -0
- src/kallam/domain/agents/translator.py +296 -0
- src/kallam/infra/__init__.py +0 -0
- src/kallam/infra/__pycache__/__init__.cpython-311.pyc +0 -0
- src/kallam/infra/__pycache__/db.cpython-311.pyc +0 -0
- src/kallam/infra/__pycache__/exporter.cpython-311.pyc +0 -0
- src/kallam/infra/__pycache__/message_store.cpython-311.pyc +0 -0
- src/kallam/infra/__pycache__/session_store.cpython-311.pyc +0 -0
- src/kallam/infra/__pycache__/summary_store.cpython-311.pyc +0 -0
- src/kallam/infra/__pycache__/token_counter.cpython-311.pyc +0 -0
- src/kallam/infra/db.py +18 -0
- src/kallam/infra/exporter.py +46 -0
- src/kallam/infra/message_store.py +120 -0
- src/kallam/infra/session_store.py +122 -0
- src/kallam/infra/summary_store.py +22 -0
.env
ADDED
|
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# .env
|
| 2 |
+
SEA_LION_API_KEY=sk-Pgz55LWbB_WJM57hVKHl2Q
|
| 3 |
+
GEMINI_API_KEY=AIzaSyBBKtn1m2WUJnPXwxvwbJb4UE6K8eTs3j4
|
| 4 |
+
NGROK_AUTHTOKEN=31ooUO8hf5RHq33GbvQUU2cClDj_7pFWFaGLkzf6HSUmCBmVc
|
| 5 |
+
|
| 6 |
+
# Setting up AWS credentials for temporary access
|
| 7 |
+
AWS_ACCESS_KEY_ID=ASIAYXX3CBWNEDOJS3TI
|
| 8 |
+
AWS_SECRET_ACCESS_KEY=QXOgQW0M1+D4Out3ayMdEzEtdhfZ/JOvEXbfrGDF
|
| 9 |
+
AWS_SESSION_TOKEN=IQoJb3JpZ2luX2VjEN///////////wEaCXVzLWVhc3QtMSJGMEQCIA9ZY7I0eFkL9LHJcDhSUfJeaZdoWNv4wNlnicrZ3ZNHAiAfgDcWN6FS7RH2xH0lk8kg+ozWj8lhWccRCwYKoK2rFyqNAwhIEAAaDDYwMDc0ODE5OTMyMiIMIaoSNf4lU4GN+XYVKuoCw7xlObfhM7xfIWpwu3isKfK6thpPmOQvEPqyMiP+MbXsnzrZz2Ayu2vbCsnM16PIiUpE9RTDM8Gt7+wSY0vsZOTidtn9oJJr/l67PZ/GUVRrxydPcKtAbaxDOcmsAkCzIA69oCtJ6KZ20YNUaO14pMxF0mor1VpIJosF231IxKsgDfyxYuEPAfDfa49qn1gFCJuiAOTwJikQGyA0Fpol9ZP+ykV3T2CxHeBe7xrfKmN9MwnG7oMuL6nuXSOqZQQe37GbRXGDEeWjsOvD4YkfWj04xJNwGt0dt7ZtAPdVy1Cqrsnr7JZn7d1r0ns+w1MFIpARjkzhUvhl5hO6Ml0IFOfqGWJ4w0fnHC224txp+65xVPIxT167FucVdqeO5NCcz19I8rHQF7Jx8m58HZuTI9N8bm7DnntRlHHEpLU345nm9vqZhASzIxkSNOKZLud8dgiMOW8h4it9lsSWZgcEns+5z36uvdrRj9YwvZ7hxQY6pwHcpUEFpAip5W9+oRuufPpMp82vsqSzMEzklPePKicUPtQCVs9bpimAaW8bcCVG51/kf4PkDfH8L/ScJQAe29diPI3jrbI36F9LFxJk8aozc824Lrvsx+17UgGCRb3vycdiqSk5B+oJ88l4aFN6fn9NM5PDxTo+S3hcj4njrY4EHteN5zj/BIhN/gfUysrPVcsZnZ30Ni0TiR4RhUzXGGC1CcBcWFzzhQ==
|
| 10 |
+
AWS_ROLE_ARN=arn:aws:iam::600748199322:role/SageMakerExecutionRole
|
| 11 |
+
AWS_DEFAULT_REGION=ap-southeast-2
|
.gitattributes
CHANGED
|
@@ -1,35 +1,35 @@
|
|
| 1 |
-
*.7z filter=lfs diff=lfs merge=lfs -text
|
| 2 |
-
*.arrow filter=lfs diff=lfs merge=lfs -text
|
| 3 |
-
*.bin filter=lfs diff=lfs merge=lfs -text
|
| 4 |
-
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
| 5 |
-
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
| 6 |
-
*.ftz filter=lfs diff=lfs merge=lfs -text
|
| 7 |
-
*.gz filter=lfs diff=lfs merge=lfs -text
|
| 8 |
-
*.h5 filter=lfs diff=lfs merge=lfs -text
|
| 9 |
-
*.joblib filter=lfs diff=lfs merge=lfs -text
|
| 10 |
-
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
| 11 |
-
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
| 12 |
-
*.model filter=lfs diff=lfs merge=lfs -text
|
| 13 |
-
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
| 14 |
-
*.npy filter=lfs diff=lfs merge=lfs -text
|
| 15 |
-
*.npz filter=lfs diff=lfs merge=lfs -text
|
| 16 |
-
*.onnx filter=lfs diff=lfs merge=lfs -text
|
| 17 |
-
*.ot filter=lfs diff=lfs merge=lfs -text
|
| 18 |
-
*.parquet filter=lfs diff=lfs merge=lfs -text
|
| 19 |
-
*.pb filter=lfs diff=lfs merge=lfs -text
|
| 20 |
-
*.pickle filter=lfs diff=lfs merge=lfs -text
|
| 21 |
-
*.pkl filter=lfs diff=lfs merge=lfs -text
|
| 22 |
-
*.pt filter=lfs diff=lfs merge=lfs -text
|
| 23 |
-
*.pth filter=lfs diff=lfs merge=lfs -text
|
| 24 |
-
*.rar filter=lfs diff=lfs merge=lfs -text
|
| 25 |
-
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
| 26 |
-
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
| 27 |
-
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
| 28 |
-
*.tar filter=lfs diff=lfs merge=lfs -text
|
| 29 |
-
*.tflite filter=lfs diff=lfs merge=lfs -text
|
| 30 |
-
*.tgz filter=lfs diff=lfs merge=lfs -text
|
| 31 |
-
*.wasm filter=lfs diff=lfs merge=lfs -text
|
| 32 |
-
*.xz filter=lfs diff=lfs merge=lfs -text
|
| 33 |
-
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
-
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
-
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
| 1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
| 2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
| 3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
| 4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
| 5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
| 6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
| 7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
| 8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
| 9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
| 10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
| 11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
| 12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
| 13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
| 14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
| 15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
| 16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
| 17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
| 18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
| 19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
| 20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
| 21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
| 22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
| 23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
| 24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
| 25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
| 26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
| 27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
| 28 |
+
*.tar filter=lfs diff=lfs merge=lfs -text
|
| 29 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
| 30 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
| 31 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
| 32 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
| 33 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
.gitignore
ADDED
|
@@ -0,0 +1,206 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
*.pem
|
| 2 |
+
*.db
|
| 3 |
+
logs
|
| 4 |
+
*.json
|
| 5 |
+
.env
|
| 6 |
+
.gradio/
|
| 7 |
+
data
|
| 8 |
+
exported_sessions
|
| 9 |
+
*.csv
|
| 10 |
+
*.jsonl
|
| 11 |
+
radar_outputs
|
| 12 |
+
|
| 13 |
+
# Byte-compiled / optimized / DLL files
|
| 14 |
+
__pycache__/
|
| 15 |
+
*.py[cod]
|
| 16 |
+
*$py.class
|
| 17 |
+
|
| 18 |
+
# C extensions
|
| 19 |
+
*.so
|
| 20 |
+
|
| 21 |
+
# Distribution / packaging
|
| 22 |
+
.Python
|
| 23 |
+
build/
|
| 24 |
+
develop-eggs/
|
| 25 |
+
dist/
|
| 26 |
+
downloads/
|
| 27 |
+
eggs/
|
| 28 |
+
.eggs/
|
| 29 |
+
lib/
|
| 30 |
+
lib64/
|
| 31 |
+
parts/
|
| 32 |
+
sdist/
|
| 33 |
+
var/
|
| 34 |
+
wheels/
|
| 35 |
+
share/python-wheels/
|
| 36 |
+
*.egg-info/
|
| 37 |
+
.installed.cfg
|
| 38 |
+
*.egg
|
| 39 |
+
MANIFEST
|
| 40 |
+
|
| 41 |
+
# PyInstaller
|
| 42 |
+
# Usually these files are written by a python script from a template
|
| 43 |
+
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
| 44 |
+
*.manifest
|
| 45 |
+
*.spec
|
| 46 |
+
|
| 47 |
+
# Installer logs
|
| 48 |
+
pip-log.txt
|
| 49 |
+
pip-delete-this-directory.txt
|
| 50 |
+
|
| 51 |
+
# Unit test / coverage reports
|
| 52 |
+
htmlcov/
|
| 53 |
+
.tox/
|
| 54 |
+
.nox/
|
| 55 |
+
.coverage
|
| 56 |
+
.coverage.*
|
| 57 |
+
.cache
|
| 58 |
+
nosetests.xml
|
| 59 |
+
coverage.xml
|
| 60 |
+
*.cover
|
| 61 |
+
*.py,cover
|
| 62 |
+
.hypothesis/
|
| 63 |
+
.pytest_cache/
|
| 64 |
+
cover/
|
| 65 |
+
|
| 66 |
+
# Translations
|
| 67 |
+
*.mo
|
| 68 |
+
*.pot
|
| 69 |
+
|
| 70 |
+
# Django stuff:
|
| 71 |
+
*.log
|
| 72 |
+
local_settings.py
|
| 73 |
+
db.sqlite3
|
| 74 |
+
db.sqlite3-journal
|
| 75 |
+
|
| 76 |
+
# Flask stuff:
|
| 77 |
+
instance/
|
| 78 |
+
.webassets-cache
|
| 79 |
+
|
| 80 |
+
# Scrapy stuff:
|
| 81 |
+
.scrapy
|
| 82 |
+
|
| 83 |
+
# Sphinx documentation
|
| 84 |
+
docs/_build/
|
| 85 |
+
|
| 86 |
+
# PyBuilder
|
| 87 |
+
.pybuilder/
|
| 88 |
+
target/
|
| 89 |
+
|
| 90 |
+
# Jupyter Notebook
|
| 91 |
+
.ipynb_checkpoints
|
| 92 |
+
|
| 93 |
+
# IPython
|
| 94 |
+
profile_default/
|
| 95 |
+
ipython_config.py
|
| 96 |
+
|
| 97 |
+
# pyenv
|
| 98 |
+
# For a library or package, you might want to ignore these files since the code is
|
| 99 |
+
# intended to run in multiple environments; otherwise, check them in:
|
| 100 |
+
# .python-version
|
| 101 |
+
|
| 102 |
+
# pipenv
|
| 103 |
+
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
| 104 |
+
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
| 105 |
+
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
| 106 |
+
# install all needed dependencies.
|
| 107 |
+
#Pipfile.lock
|
| 108 |
+
|
| 109 |
+
# UV
|
| 110 |
+
# Similar to Pipfile.lock, it is generally recommended to include uv.lock in version control.
|
| 111 |
+
# This is especially recommended for binary packages to ensure reproducibility, and is more
|
| 112 |
+
# commonly ignored for libraries.
|
| 113 |
+
#uv.lock
|
| 114 |
+
|
| 115 |
+
# poetry
|
| 116 |
+
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
|
| 117 |
+
# This is especially recommended for binary packages to ensure reproducibility, and is more
|
| 118 |
+
# commonly ignored for libraries.
|
| 119 |
+
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
|
| 120 |
+
#poetry.lock
|
| 121 |
+
|
| 122 |
+
# pdm
|
| 123 |
+
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
|
| 124 |
+
#pdm.lock
|
| 125 |
+
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
|
| 126 |
+
# in version control.
|
| 127 |
+
# https://pdm.fming.dev/latest/usage/project/#working-with-version-control
|
| 128 |
+
.pdm.toml
|
| 129 |
+
.pdm-python
|
| 130 |
+
.pdm-build/
|
| 131 |
+
|
| 132 |
+
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
|
| 133 |
+
__pypackages__/
|
| 134 |
+
|
| 135 |
+
# Celery stuff
|
| 136 |
+
celerybeat-schedule
|
| 137 |
+
celerybeat.pid
|
| 138 |
+
|
| 139 |
+
# SageMath parsed files
|
| 140 |
+
*.sage.py
|
| 141 |
+
|
| 142 |
+
# Environments
|
| 143 |
+
.env
|
| 144 |
+
.venv
|
| 145 |
+
env/
|
| 146 |
+
venv/
|
| 147 |
+
ENV/
|
| 148 |
+
env.bak/
|
| 149 |
+
venv.bak/
|
| 150 |
+
|
| 151 |
+
# Spyder project settings
|
| 152 |
+
.spyderproject
|
| 153 |
+
.spyproject
|
| 154 |
+
|
| 155 |
+
# Rope project settings
|
| 156 |
+
.ropeproject
|
| 157 |
+
|
| 158 |
+
# mkdocs documentation
|
| 159 |
+
/site
|
| 160 |
+
|
| 161 |
+
# mypy
|
| 162 |
+
.mypy_cache/
|
| 163 |
+
.dmypy.json
|
| 164 |
+
dmypy.json
|
| 165 |
+
|
| 166 |
+
# Pyre type checker
|
| 167 |
+
.pyre/
|
| 168 |
+
|
| 169 |
+
# pytype static type analyzer
|
| 170 |
+
.pytype/
|
| 171 |
+
|
| 172 |
+
# Cython debug symbols
|
| 173 |
+
cython_debug/
|
| 174 |
+
|
| 175 |
+
# PyCharm
|
| 176 |
+
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
|
| 177 |
+
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
|
| 178 |
+
# and can be added to the global gitignore or merged into this file. For a more nuclear
|
| 179 |
+
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
|
| 180 |
+
#.idea/
|
| 181 |
+
|
| 182 |
+
# Abstra
|
| 183 |
+
# Abstra is an AI-powered process automation framework.
|
| 184 |
+
# Ignore directories containing user credentials, local state, and settings.
|
| 185 |
+
# Learn more at https://abstra.io/docs
|
| 186 |
+
.abstra/
|
| 187 |
+
|
| 188 |
+
# Visual Studio Code
|
| 189 |
+
# Visual Studio Code specific template is maintained in a separate VisualStudioCode.gitignore
|
| 190 |
+
# that can be found at https://github.com/github/gitignore/blob/main/Global/VisualStudioCode.gitignore
|
| 191 |
+
# and can be added to the global gitignore or merged into this file. However, if you prefer,
|
| 192 |
+
# you could uncomment the following to ignore the enitre vscode folder
|
| 193 |
+
# .vscode/
|
| 194 |
+
|
| 195 |
+
# Ruff stuff:
|
| 196 |
+
.ruff_cache/
|
| 197 |
+
|
| 198 |
+
# PyPI configuration file
|
| 199 |
+
.pypirc
|
| 200 |
+
|
| 201 |
+
# Cursor
|
| 202 |
+
# Cursor is an AI-powered code editor. `.cursorignore` specifies files/directories to
|
| 203 |
+
# exclude from AI features like autocomplete and code analysis. Recommended for sensitive data
|
| 204 |
+
# refer to https://docs.cursor.com/context/ignore-files
|
| 205 |
+
.cursorignore
|
| 206 |
+
.cursorindexingignore
|
LICENSE
ADDED
|
@@ -0,0 +1,201 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Apache License
|
| 2 |
+
Version 2.0, January 2004
|
| 3 |
+
http://www.apache.org/licenses/
|
| 4 |
+
|
| 5 |
+
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
| 6 |
+
|
| 7 |
+
1. Definitions.
|
| 8 |
+
|
| 9 |
+
"License" shall mean the terms and conditions for use, reproduction,
|
| 10 |
+
and distribution as defined by Sections 1 through 9 of this document.
|
| 11 |
+
|
| 12 |
+
"Licensor" shall mean the copyright owner or entity authorized by
|
| 13 |
+
the copyright owner that is granting the License.
|
| 14 |
+
|
| 15 |
+
"Legal Entity" shall mean the union of the acting entity and all
|
| 16 |
+
other entities that control, are controlled by, or are under common
|
| 17 |
+
control with that entity. For the purposes of this definition,
|
| 18 |
+
"control" means (i) the power, direct or indirect, to cause the
|
| 19 |
+
direction or management of such entity, whether by contract or
|
| 20 |
+
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
| 21 |
+
outstanding shares, or (iii) beneficial ownership of such entity.
|
| 22 |
+
|
| 23 |
+
"You" (or "Your") shall mean an individual or Legal Entity
|
| 24 |
+
exercising permissions granted by this License.
|
| 25 |
+
|
| 26 |
+
"Source" form shall mean the preferred form for making modifications,
|
| 27 |
+
including but not limited to software source code, documentation
|
| 28 |
+
source, and configuration files.
|
| 29 |
+
|
| 30 |
+
"Object" form shall mean any form resulting from mechanical
|
| 31 |
+
transformation or translation of a Source form, including but
|
| 32 |
+
not limited to compiled object code, generated documentation,
|
| 33 |
+
and conversions to other media types.
|
| 34 |
+
|
| 35 |
+
"Work" shall mean the work of authorship, whether in Source or
|
| 36 |
+
Object form, made available under the License, as indicated by a
|
| 37 |
+
copyright notice that is included in or attached to the work
|
| 38 |
+
(an example is provided in the Appendix below).
|
| 39 |
+
|
| 40 |
+
"Derivative Works" shall mean any work, whether in Source or Object
|
| 41 |
+
form, that is based on (or derived from) the Work and for which the
|
| 42 |
+
editorial revisions, annotations, elaborations, or other modifications
|
| 43 |
+
represent, as a whole, an original work of authorship. For the purposes
|
| 44 |
+
of this License, Derivative Works shall not include works that remain
|
| 45 |
+
separable from, or merely link (or bind by name) to the interfaces of,
|
| 46 |
+
the Work and Derivative Works thereof.
|
| 47 |
+
|
| 48 |
+
"Contribution" shall mean any work of authorship, including
|
| 49 |
+
the original version of the Work and any modifications or additions
|
| 50 |
+
to that Work or Derivative Works thereof, that is intentionally
|
| 51 |
+
submitted to Licensor for inclusion in the Work by the copyright owner
|
| 52 |
+
or by an individual or Legal Entity authorized to submit on behalf of
|
| 53 |
+
the copyright owner. For the purposes of this definition, "submitted"
|
| 54 |
+
means any form of electronic, verbal, or written communication sent
|
| 55 |
+
to the Licensor or its representatives, including but not limited to
|
| 56 |
+
communication on electronic mailing lists, source code control systems,
|
| 57 |
+
and issue tracking systems that are managed by, or on behalf of, the
|
| 58 |
+
Licensor for the purpose of discussing and improving the Work, but
|
| 59 |
+
excluding communication that is conspicuously marked or otherwise
|
| 60 |
+
designated in writing by the copyright owner as "Not a Contribution."
|
| 61 |
+
|
| 62 |
+
"Contributor" shall mean Licensor and any individual or Legal Entity
|
| 63 |
+
on behalf of whom a Contribution has been received by Licensor and
|
| 64 |
+
subsequently incorporated within the Work.
|
| 65 |
+
|
| 66 |
+
2. Grant of Copyright License. Subject to the terms and conditions of
|
| 67 |
+
this License, each Contributor hereby grants to You a perpetual,
|
| 68 |
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
| 69 |
+
copyright license to reproduce, prepare Derivative Works of,
|
| 70 |
+
publicly display, publicly perform, sublicense, and distribute the
|
| 71 |
+
Work and such Derivative Works in Source or Object form.
|
| 72 |
+
|
| 73 |
+
3. Grant of Patent License. Subject to the terms and conditions of
|
| 74 |
+
this License, each Contributor hereby grants to You a perpetual,
|
| 75 |
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
| 76 |
+
(except as stated in this section) patent license to make, have made,
|
| 77 |
+
use, offer to sell, sell, import, and otherwise transfer the Work,
|
| 78 |
+
where such license applies only to those patent claims licensable
|
| 79 |
+
by such Contributor that are necessarily infringed by their
|
| 80 |
+
Contribution(s) alone or by combination of their Contribution(s)
|
| 81 |
+
with the Work to which such Contribution(s) was submitted. If You
|
| 82 |
+
institute patent litigation against any entity (including a
|
| 83 |
+
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
| 84 |
+
or a Contribution incorporated within the Work constitutes direct
|
| 85 |
+
or contributory patent infringement, then any patent licenses
|
| 86 |
+
granted to You under this License for that Work shall terminate
|
| 87 |
+
as of the date such litigation is filed.
|
| 88 |
+
|
| 89 |
+
4. Redistribution. You may reproduce and distribute copies of the
|
| 90 |
+
Work or Derivative Works thereof in any medium, with or without
|
| 91 |
+
modifications, and in Source or Object form, provided that You
|
| 92 |
+
meet the following conditions:
|
| 93 |
+
|
| 94 |
+
(a) You must give any other recipients of the Work or
|
| 95 |
+
Derivative Works a copy of this License; and
|
| 96 |
+
|
| 97 |
+
(b) You must cause any modified files to carry prominent notices
|
| 98 |
+
stating that You changed the files; and
|
| 99 |
+
|
| 100 |
+
(c) You must retain, in the Source form of any Derivative Works
|
| 101 |
+
that You distribute, all copyright, patent, trademark, and
|
| 102 |
+
attribution notices from the Source form of the Work,
|
| 103 |
+
excluding those notices that do not pertain to any part of
|
| 104 |
+
the Derivative Works; and
|
| 105 |
+
|
| 106 |
+
(d) If the Work includes a "NOTICE" text file as part of its
|
| 107 |
+
distribution, then any Derivative Works that You distribute must
|
| 108 |
+
include a readable copy of the attribution notices contained
|
| 109 |
+
within such NOTICE file, excluding those notices that do not
|
| 110 |
+
pertain to any part of the Derivative Works, in at least one
|
| 111 |
+
of the following places: within a NOTICE text file distributed
|
| 112 |
+
as part of the Derivative Works; within the Source form or
|
| 113 |
+
documentation, if provided along with the Derivative Works; or,
|
| 114 |
+
within a display generated by the Derivative Works, if and
|
| 115 |
+
wherever such third-party notices normally appear. The contents
|
| 116 |
+
of the NOTICE file are for informational purposes only and
|
| 117 |
+
do not modify the License. You may add Your own attribution
|
| 118 |
+
notices within Derivative Works that You distribute, alongside
|
| 119 |
+
or as an addendum to the NOTICE text from the Work, provided
|
| 120 |
+
that such additional attribution notices cannot be construed
|
| 121 |
+
as modifying the License.
|
| 122 |
+
|
| 123 |
+
You may add Your own copyright statement to Your modifications and
|
| 124 |
+
may provide additional or different license terms and conditions
|
| 125 |
+
for use, reproduction, or distribution of Your modifications, or
|
| 126 |
+
for any such Derivative Works as a whole, provided Your use,
|
| 127 |
+
reproduction, and distribution of the Work otherwise complies with
|
| 128 |
+
the conditions stated in this License.
|
| 129 |
+
|
| 130 |
+
5. Submission of Contributions. Unless You explicitly state otherwise,
|
| 131 |
+
any Contribution intentionally submitted for inclusion in the Work
|
| 132 |
+
by You to the Licensor shall be under the terms and conditions of
|
| 133 |
+
this License, without any additional terms or conditions.
|
| 134 |
+
Notwithstanding the above, nothing herein shall supersede or modify
|
| 135 |
+
the terms of any separate license agreement you may have executed
|
| 136 |
+
with Licensor regarding such Contributions.
|
| 137 |
+
|
| 138 |
+
6. Trademarks. This License does not grant permission to use the trade
|
| 139 |
+
names, trademarks, service marks, or product names of the Licensor,
|
| 140 |
+
except as required for reasonable and customary use in describing the
|
| 141 |
+
origin of the Work and reproducing the content of the NOTICE file.
|
| 142 |
+
|
| 143 |
+
7. Disclaimer of Warranty. Unless required by applicable law or
|
| 144 |
+
agreed to in writing, Licensor provides the Work (and each
|
| 145 |
+
Contributor provides its Contributions) on an "AS IS" BASIS,
|
| 146 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
| 147 |
+
implied, including, without limitation, any warranties or conditions
|
| 148 |
+
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
| 149 |
+
PARTICULAR PURPOSE. You are solely responsible for determining the
|
| 150 |
+
appropriateness of using or redistributing the Work and assume any
|
| 151 |
+
risks associated with Your exercise of permissions under this License.
|
| 152 |
+
|
| 153 |
+
8. Limitation of Liability. In no event and under no legal theory,
|
| 154 |
+
whether in tort (including negligence), contract, or otherwise,
|
| 155 |
+
unless required by applicable law (such as deliberate and grossly
|
| 156 |
+
negligent acts) or agreed to in writing, shall any Contributor be
|
| 157 |
+
liable to You for damages, including any direct, indirect, special,
|
| 158 |
+
incidental, or consequential damages of any character arising as a
|
| 159 |
+
result of this License or out of the use or inability to use the
|
| 160 |
+
Work (including but not limited to damages for loss of goodwill,
|
| 161 |
+
work stoppage, computer failure or malfunction, or any and all
|
| 162 |
+
other commercial damages or losses), even if such Contributor
|
| 163 |
+
has been advised of the possibility of such damages.
|
| 164 |
+
|
| 165 |
+
9. Accepting Warranty or Additional Liability. While redistributing
|
| 166 |
+
the Work or Derivative Works thereof, You may choose to offer,
|
| 167 |
+
and charge a fee for, acceptance of support, warranty, indemnity,
|
| 168 |
+
or other liability obligations and/or rights consistent with this
|
| 169 |
+
License. However, in accepting such obligations, You may act only
|
| 170 |
+
on Your own behalf and on Your sole responsibility, not on behalf
|
| 171 |
+
of any other Contributor, and only if You agree to indemnify,
|
| 172 |
+
defend, and hold each Contributor harmless for any liability
|
| 173 |
+
incurred by, or claims asserted against, such Contributor by reason
|
| 174 |
+
of your accepting any such warranty or additional liability.
|
| 175 |
+
|
| 176 |
+
END OF TERMS AND CONDITIONS
|
| 177 |
+
|
| 178 |
+
APPENDIX: How to apply the Apache License to your work.
|
| 179 |
+
|
| 180 |
+
To apply the Apache License to your work, attach the following
|
| 181 |
+
boilerplate notice, with the fields enclosed by brackets "[]"
|
| 182 |
+
replaced with your own identifying information. (Don't include
|
| 183 |
+
the brackets!) The text should be enclosed in the appropriate
|
| 184 |
+
comment syntax for the file format. We also recommend that a
|
| 185 |
+
file or class name and description of purpose be included on the
|
| 186 |
+
same "printed page" as the copyright notice for easier
|
| 187 |
+
identification within third-party archives.
|
| 188 |
+
|
| 189 |
+
Copyright [yyyy] [name of copyright owner]
|
| 190 |
+
|
| 191 |
+
Licensed under the Apache License, Version 2.0 (the "License");
|
| 192 |
+
you may not use this file except in compliance with the License.
|
| 193 |
+
You may obtain a copy of the License at
|
| 194 |
+
|
| 195 |
+
http://www.apache.org/licenses/LICENSE-2.0
|
| 196 |
+
|
| 197 |
+
Unless required by applicable law or agreed to in writing, software
|
| 198 |
+
distributed under the License is distributed on an "AS IS" BASIS,
|
| 199 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 200 |
+
See the License for the specific language governing permissions and
|
| 201 |
+
limitations under the License.
|
README.md
CHANGED
|
@@ -1,14 +1,132 @@
|
|
| 1 |
-
|
| 2 |
-
|
| 3 |
-
|
| 4 |
-
|
| 5 |
-
|
| 6 |
-
|
| 7 |
-
|
| 8 |
-
|
| 9 |
-
|
| 10 |
-
|
| 11 |
-
|
| 12 |
-
|
| 13 |
-
|
| 14 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# KaLLaM - Motivational-Therapeutic Advisor
|
| 2 |
+
|
| 3 |
+
KaLLaM is a bilingual (Thai/English) multi-agent assistant designed for physical and mental-health conversations. It orchestrates specialized agents (Supervisor, Doctor, Psychologist, Translator, Summarizer), persists state in SQLite, and exposes Gradio front-ends alongside data and can use evaluation tooling for model psychological skill benchmark.
|
| 4 |
+
Finalist in PAN-SEA AI DEVELOPER CHALLENGE 2025 Round 2: Develop Deployable Solutions & Pitch
|
| 5 |
+
|
| 6 |
+
## Highlights
|
| 7 |
+
- Multi-agent orchestration that routes requests to domain specialists.
|
| 8 |
+
- Thai/English support backed by SEA-Lion translation services.
|
| 9 |
+
- Conversation persistence with export utilities for downstream analysis.
|
| 10 |
+
- Ready-to-run Gradio demo and developer interfaces.
|
| 11 |
+
- Evaluation scripts for MISC/BiMISC-style coding pipelines.
|
| 12 |
+
|
| 13 |
+
## Requirements
|
| 14 |
+
- Python 3.10 or newer (3.11+ recommended; Docker/App Runner images use 3.11).
|
| 15 |
+
- pip, virtualenv (or equivalent), and Git for local development.
|
| 16 |
+
- Access tokens for the external models you plan to call (SEA-Lion, Google Gemini, optional OpenAI or AWS Bedrock).
|
| 17 |
+
|
| 18 |
+
## Quick Start (Local)
|
| 19 |
+
1. Clone the repository and switch into it.
|
| 20 |
+
2. Create and activate a virtual environment:
|
| 21 |
+
```powershell
|
| 22 |
+
python -m venv .venv
|
| 23 |
+
.venv\Scripts\Activate.ps1
|
| 24 |
+
```
|
| 25 |
+
```bash
|
| 26 |
+
python -m venv .venv
|
| 27 |
+
source .venv/bin/activate
|
| 28 |
+
```
|
| 29 |
+
3. Install dependencies (editable mode keeps imports pointing at `src/`):
|
| 30 |
+
```bash
|
| 31 |
+
python -m pip install --upgrade pip setuptools wheel
|
| 32 |
+
pip install -e .[dev]
|
| 33 |
+
```
|
| 34 |
+
4. Create a `.env` file at the project root (see the next section) and populate the keys you have access to.
|
| 35 |
+
5. Launch one of the Gradio apps:
|
| 36 |
+
```bash
|
| 37 |
+
python gui/chatbot_demo.py # bilingual demo UI
|
| 38 |
+
python gui/chatbot_dev_app.py # Thai-first developer UI
|
| 39 |
+
```
|
| 40 |
+
|
| 41 |
+
The Gradio server binds to http://127.0.0.1:7860 by default; override via `GRADIO_SERVER_NAME` and `GRADIO_SERVER_PORT`.
|
| 42 |
+
|
| 43 |
+
## Environment Configuration
|
| 44 |
+
Configuration is loaded with `python-dotenv`, so any variables in `.env` are available at runtime. Define only the secrets relevant to the agents you intend to use.
|
| 45 |
+
|
| 46 |
+
**Core**
|
| 47 |
+
- `SEA_LION_API_KEY` *or* (`SEA_LION_GATEWAY_URL` + `SEA_LION_GATEWAY_TOKEN`) for SEA-Lion access.
|
| 48 |
+
- `SEA_LION_BASE_URL` (optional; defaults to `https://api.sea-lion.ai/v1`).
|
| 49 |
+
- `SEA_LION_MODEL_ID` to override the default SEA-Lion model.
|
| 50 |
+
- `GEMINI_API_KEY` for Doctor/Psychologist English responses.
|
| 51 |
+
|
| 52 |
+
**Optional integrations**
|
| 53 |
+
- `OPENAI_API_KEY` if you enable any OpenAI-backed tooling via `strands-agents`.
|
| 54 |
+
- `AWS_REGION` (and optionally `AWS_DEFAULT_REGION`) plus temporary credentials (`AWS_ACCESS_KEY_ID`, `AWS_SECRET_ACCESS_KEY`, `AWS_SESSION_TOKEN`) when running Bedrock-backed flows.
|
| 55 |
+
- `AWS_ROLE_ARN` if you assume roles for Bedrock access.
|
| 56 |
+
- `NGROK_AUTHTOKEN` when tunnelling Gradio externally.
|
| 57 |
+
- `TAVILY_API_KEY` if you wire in search or retrieval plugins.
|
| 58 |
+
|
| 59 |
+
Example scaffold:
|
| 60 |
+
```env
|
| 61 |
+
SEA_LION_API_KEY=your-sea-lion-token
|
| 62 |
+
SEA_LION_MODEL_ID=aisingapore/Gemma-SEA-LION-v4-27B-IT
|
| 63 |
+
GEMINI_API_KEY=your-gemini-key
|
| 64 |
+
OPENAI_API_KEY=sk-your-openai-key
|
| 65 |
+
AWS_REGION=ap-southeast-2
|
| 66 |
+
# AWS_ACCESS_KEY_ID=...
|
| 67 |
+
# AWS_SECRET_ACCESS_KEY=...
|
| 68 |
+
# AWS_SESSION_TOKEN=...
|
| 69 |
+
```
|
| 70 |
+
Keep `.env` out of version control and rotate credentials regularly. You can validate temporary AWS credentials with `python test_credentials.py`.
|
| 71 |
+
|
| 72 |
+
## Running and Persistence
|
| 73 |
+
- Conversations, summaries, and metadata persist to `chatbot_data.db` (SQLite). The schema is created automatically on first run.
|
| 74 |
+
- Export session transcripts with `ChatbotManager.export_session_json()`; JSON files land in `exported_sessions/`.
|
| 75 |
+
- Logs are emitted per agent into `logs/` (daily files) and to stdout.
|
| 76 |
+
|
| 77 |
+
## Docker
|
| 78 |
+
Build and run the containerised Gradio app:
|
| 79 |
+
```bash
|
| 80 |
+
docker build -t kallam .
|
| 81 |
+
docker run --rm -p 8080:8080 --env-file .env kallam
|
| 82 |
+
```
|
| 83 |
+
Environment variables are read at runtime; use `--env-file` or `-e` flags to provide the required keys. Override the entry script with `APP_FILE`, for example `-e APP_FILE=gui/chatbot_dev_app.py`.
|
| 84 |
+
|
| 85 |
+
## AWS App Runner
|
| 86 |
+
The repo ships with `apprunner.yaml` for AWS App Runner's managed Python 3.11 runtime.
|
| 87 |
+
1. Push the code to a connected repository (GitHub or CodeCommit) or supply an archive.
|
| 88 |
+
2. In the App Runner console choose **Source code** -> **Managed runtime** and upload/select `apprunner.yaml`.
|
| 89 |
+
3. Configure AWS Secrets Manager references for the environment variables listed under `run.env` (SEA-Lion, Gemini, OpenAI, Ngrok, etc.).
|
| 90 |
+
4. Deploy. App Runner exposes the Gradio UI on the service URL and honours the `$PORT` variable (defaults to 8080).
|
| 91 |
+
|
| 92 |
+
For fully containerised deployments on App Runner, ECS, or EKS, build the Docker image and supply the same environment variables.
|
| 93 |
+
|
| 94 |
+
## Project Layout
|
| 95 |
+
```
|
| 96 |
+
project-root/
|
| 97 |
+
|-- src/kallam/
|
| 98 |
+
| |-- app/ # ChatbotManager facade
|
| 99 |
+
| |-- domain/agents/ # Supervisor, Doctor, Psychologist, Translator, Summarizer, Orchestrator
|
| 100 |
+
| |-- infra/ # SQLite stores, exporter, token counter
|
| 101 |
+
| `-- infrastructure/ # Shared SEA-Lion configuration helpers
|
| 102 |
+
|-- gui/ # Gradio demo and developer apps
|
| 103 |
+
|-- scripts/ # Data prep and evaluation utilities
|
| 104 |
+
|-- data/ # Sample datasets (gemini, human, orchestrated, SEA-Lion)
|
| 105 |
+
|-- exported_sessions/ # JSON exports created at runtime
|
| 106 |
+
|-- logs/ # Runtime logs (generated)
|
| 107 |
+
|-- Dockerfile
|
| 108 |
+
|-- apprunner.yaml
|
| 109 |
+
|-- test_credentials.py
|
| 110 |
+
`-- README.md
|
| 111 |
+
```
|
| 112 |
+
|
| 113 |
+
## Development Tooling
|
| 114 |
+
- Run tests: `pytest -q`
|
| 115 |
+
- Lint: `ruff check src`
|
| 116 |
+
- Type-check: `mypy src`
|
| 117 |
+
- Token usage: see `src/kallam/infra/token_counter.py`
|
| 118 |
+
- Supervisor/translator fallbacks log warnings if credentials are missing.
|
| 119 |
+
|
| 120 |
+
## Scripts and Evaluation
|
| 121 |
+
The `scripts/` directory includes:
|
| 122 |
+
- `eng_silver_misc_coder.py` and `thai_silver_misc_coder.py` for SEA-Lion powered coding pipelines.
|
| 123 |
+
- `model_evaluator.py` plus preprocessing and visualisation helpers (`ex_data_preprocessor.py`, `in_data_preprocessor.py`, `visualizer.ipynb`).
|
| 124 |
+
|
| 125 |
+
## Note:
|
| 126 |
+
### Proporsal
|
| 127 |
+
Refer to KaLLaM Proporsal.pdf for more information of the project
|
| 128 |
+
### Citation
|
| 129 |
+
See `Citation.md` for references and datasets.
|
| 130 |
+
### License
|
| 131 |
+
Apache License 2.0. Refer to `LICENSE` for full terms.
|
| 132 |
+
|
gui/chatbot_demo.py
ADDED
|
@@ -0,0 +1,672 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# chatbot_demo.py
|
| 2 |
+
import gradio as gr
|
| 3 |
+
import logging
|
| 4 |
+
from datetime import datetime
|
| 5 |
+
from typing import List, Tuple, Optional
|
| 6 |
+
import os
|
| 7 |
+
import socket
|
| 8 |
+
|
| 9 |
+
from kallam.app.chatbot_manager import ChatbotManager
|
| 10 |
+
|
| 11 |
+
mgr = ChatbotManager(log_level="INFO")
|
| 12 |
+
logging.basicConfig(level=logging.INFO)
|
| 13 |
+
logger = logging.getLogger(__name__)
|
| 14 |
+
|
| 15 |
+
# INLINE SVG for icons
|
| 16 |
+
CABBAGE_SVG = """
|
| 17 |
+
<svg width="128" height="128" viewBox="0 0 64 64" xmlns="http://www.w3.org/2000/svg"
|
| 18 |
+
role="img" aria-label="cabbage">
|
| 19 |
+
<defs>
|
| 20 |
+
<linearGradient id="leaf" x1="0" x2="0" y1="0" y2="1">
|
| 21 |
+
<stop offset="0%" stop-color="#9be58b"/>
|
| 22 |
+
<stop offset="100%" stop-color="#5cc46a"/>
|
| 23 |
+
</linearGradient>
|
| 24 |
+
</defs>
|
| 25 |
+
<g fill="none">
|
| 26 |
+
<circle cx="32" cy="32" r="26" fill="url(#leaf)"/>
|
| 27 |
+
<path d="M12,34 C18,28 22,26 28,27 C34,28 38,32 44,30 C48,29 52,26 56,22"
|
| 28 |
+
stroke="#2e7d32" stroke-width="3" stroke-linecap="round"/>
|
| 29 |
+
<path d="M10,40 C18,36 22,42 28,42 C34,42 38,38 44,40 C50,42 54,40 58,36"
|
| 30 |
+
stroke="#2e7d32" stroke-width="3" stroke-linecap="round"/>
|
| 31 |
+
<path d="M24 24 C28 20 36 20 40 24 C44 28 44 36 32 38 C20 36 20 28 24 24 Z"
|
| 32 |
+
fill="#bff5b6"/>
|
| 33 |
+
</g>
|
| 34 |
+
</svg>
|
| 35 |
+
"""
|
| 36 |
+
|
| 37 |
+
# -----------------------
|
| 38 |
+
# Core handlers
|
| 39 |
+
# -----------------------
|
| 40 |
+
def _session_status(session_id: str) -> str:
|
| 41 |
+
"""Get current session status using mgr.get_session()"""
|
| 42 |
+
if not session_id:
|
| 43 |
+
return "🔴 **No Active Session** - Click **New Session** to start"
|
| 44 |
+
|
| 45 |
+
try:
|
| 46 |
+
# Use same method as simple app
|
| 47 |
+
now = datetime.now()
|
| 48 |
+
s = mgr.get_session(session_id) or {}
|
| 49 |
+
ts = s.get("timestamp", now.strftime("%d %b %Y | %I:%M %p"))
|
| 50 |
+
model = s.get("model_used", "Orchestrated SEA-Lion")
|
| 51 |
+
total = s.get("total_messages", 0)
|
| 52 |
+
saved_memories = s.get("saved_memories") or "General consultation"
|
| 53 |
+
|
| 54 |
+
return f"""
|
| 55 |
+
🟢 **Session:** `{session_id[:8]}...`
|
| 56 |
+
🏥 **Profile:** {saved_memories[:50]}{"..." if len(saved_memories) > 50 else ""}
|
| 57 |
+
📅 **Created:** {ts}
|
| 58 |
+
💬 **Messages:** {total}
|
| 59 |
+
🤖 **Model:** {model}
|
| 60 |
+
""".strip()
|
| 61 |
+
except Exception as e:
|
| 62 |
+
logger.error(f"Error getting session status: {e}")
|
| 63 |
+
return f"❌ **Error loading session:** {session_id[:8]}..."
|
| 64 |
+
|
| 65 |
+
def start_new_session(health_profile: str = ""):
|
| 66 |
+
"""Create new session using mgr - same as simple app"""
|
| 67 |
+
try:
|
| 68 |
+
sid = mgr.start_session(saved_memories=health_profile.strip() or None)
|
| 69 |
+
status = _session_status(sid)
|
| 70 |
+
|
| 71 |
+
# Initial welcome message
|
| 72 |
+
welcome_msg = {
|
| 73 |
+
"role": "assistant",
|
| 74 |
+
"content": """Hello! I'm KaLLaM 🌿, your caring AI health advisor 💖
|
| 75 |
+
|
| 76 |
+
I can communicate in both **Thai** and **English**. I'm here to support your health and well-being with personalized advice. How are you feeling today? 😊
|
| 77 |
+
|
| 78 |
+
สวัสดีค่ะ! ฉันชื่อกะหล่ำ 🌿 เป็นที่ปรึกษาด้านสุขภาพ AI ที่จะคอยดูแลคุณ 💖 ฉันสามารถสื่อสารได้ทั้งภาษาไทยและภาษาอังกฤษ วันนี้รู้สึกยังไงบ้างคะ? 😊"""
|
| 79 |
+
}
|
| 80 |
+
|
| 81 |
+
history = [welcome_msg]
|
| 82 |
+
result_msg = f"✅ **New Session Created Successfully!**\n\n🆔 Session ID: `{sid}`"
|
| 83 |
+
if health_profile.strip():
|
| 84 |
+
result_msg += f"\n🏥 **Health Profile:** Applied successfully"
|
| 85 |
+
|
| 86 |
+
return sid, history, "", status, result_msg
|
| 87 |
+
except Exception as e:
|
| 88 |
+
logger.error(f"Error creating new session: {e}")
|
| 89 |
+
return "", [], "", "❌ **Failed to create session**", f"❌ **Error:** {e}"
|
| 90 |
+
|
| 91 |
+
def send_message(user_msg: str, history: list, session_id: str):
|
| 92 |
+
"""Send message using mgr - same as simple app"""
|
| 93 |
+
# Defensive: auto-create session if missing (same as simple app)
|
| 94 |
+
if not session_id:
|
| 95 |
+
logger.warning("No session found, auto-creating...")
|
| 96 |
+
sid, history, _, status, _ = start_new_session("")
|
| 97 |
+
history.append({"role": "assistant", "content": "🔄 **New session created automatically.** You can now continue chatting!"})
|
| 98 |
+
return history, "", sid, status
|
| 99 |
+
|
| 100 |
+
if not user_msg.strip():
|
| 101 |
+
return history, "", session_id, _session_status(session_id)
|
| 102 |
+
|
| 103 |
+
try:
|
| 104 |
+
# Add user message
|
| 105 |
+
history = history + [{"role": "user", "content": user_msg}]
|
| 106 |
+
|
| 107 |
+
# Get bot response using mgr (same as simple app)
|
| 108 |
+
bot_response = mgr.handle_message(
|
| 109 |
+
session_id=session_id,
|
| 110 |
+
user_message=user_msg
|
| 111 |
+
)
|
| 112 |
+
|
| 113 |
+
# Add bot response
|
| 114 |
+
history = history + [{"role": "assistant", "content": bot_response}]
|
| 115 |
+
|
| 116 |
+
return history, "", session_id, _session_status(session_id)
|
| 117 |
+
|
| 118 |
+
except Exception as e:
|
| 119 |
+
logger.error(f"Error processing message: {e}")
|
| 120 |
+
error_msg = {"role": "assistant", "content": f"❌ **Error:** Unable to process your message. Please try again.\n\nDetails: {e}"}
|
| 121 |
+
history = history + [error_msg]
|
| 122 |
+
return history, "", session_id, _session_status(session_id)
|
| 123 |
+
|
| 124 |
+
def update_health_profile(session_id: str, health_profile: str):
|
| 125 |
+
"""Update health profile for current session using mgr's database access"""
|
| 126 |
+
if not session_id:
|
| 127 |
+
return "❌ **No active session**", _session_status(session_id)
|
| 128 |
+
|
| 129 |
+
if not health_profile.strip():
|
| 130 |
+
return "❌ **Please provide health information**", _session_status(session_id)
|
| 131 |
+
|
| 132 |
+
try:
|
| 133 |
+
# Use mgr's database path (same pattern as simple app would use)
|
| 134 |
+
from kallam.infra.db import sqlite_conn
|
| 135 |
+
with sqlite_conn(str(mgr.db_path)) as conn:
|
| 136 |
+
conn.execute(
|
| 137 |
+
"UPDATE sessions SET saved_memories = ?, last_activity = ? WHERE session_id = ?",
|
| 138 |
+
(health_profile.strip(), datetime.now().isoformat(), session_id),
|
| 139 |
+
)
|
| 140 |
+
|
| 141 |
+
result = f"✅ **Health Profile Updated Successfully!**\n\n📝 **Updated Information:** {health_profile.strip()[:100]}{'...' if len(health_profile.strip()) > 100 else ''}"
|
| 142 |
+
return result, _session_status(session_id)
|
| 143 |
+
|
| 144 |
+
except Exception as e:
|
| 145 |
+
logger.error(f"Error updating health profile: {e}")
|
| 146 |
+
return f"❌ **Error updating profile:** {e}", _session_status(session_id)
|
| 147 |
+
|
| 148 |
+
def clear_session(session_id: str):
|
| 149 |
+
"""Clear current session using mgr"""
|
| 150 |
+
if not session_id:
|
| 151 |
+
return "", [], "", "🔴 **No active session to clear**", "❌ **No active session**"
|
| 152 |
+
|
| 153 |
+
try:
|
| 154 |
+
# Check if mgr has delete_session method, otherwise handle gracefully
|
| 155 |
+
if hasattr(mgr, 'delete_session'):
|
| 156 |
+
mgr.delete_session(session_id)
|
| 157 |
+
else:
|
| 158 |
+
# Fallback: just clear the session data if method doesn't exist
|
| 159 |
+
logger.warning("delete_session method not available, clearing session state only")
|
| 160 |
+
|
| 161 |
+
return "", [], "", "🔴 **Session cleared - Create new session to continue**", f"✅ **Session `{session_id[:8]}...` cleared successfully**"
|
| 162 |
+
except Exception as e:
|
| 163 |
+
logger.error(f"Error clearing session: {e}")
|
| 164 |
+
return session_id, [], "", _session_status(session_id), f"❌ **Error clearing session:** {e}"
|
| 165 |
+
|
| 166 |
+
def force_summary(session_id: str):
|
| 167 |
+
"""Force summary using mgr (same as simple app)"""
|
| 168 |
+
if not session_id:
|
| 169 |
+
return "❌ No active session."
|
| 170 |
+
try:
|
| 171 |
+
if hasattr(mgr, 'summarize_session'):
|
| 172 |
+
s = mgr.summarize_session(session_id)
|
| 173 |
+
return f"📋 Summary updated:\n\n{s}"
|
| 174 |
+
else:
|
| 175 |
+
return "❌ Summarize function not available."
|
| 176 |
+
except Exception as e:
|
| 177 |
+
return f"❌ Failed to summarize: {e}"
|
| 178 |
+
|
| 179 |
+
def lock_inputs():
|
| 180 |
+
"""Lock inputs during processing (same as simple app)"""
|
| 181 |
+
return gr.update(interactive=False), gr.update(interactive=False)
|
| 182 |
+
|
| 183 |
+
def unlock_inputs():
|
| 184 |
+
"""Unlock inputs after processing (same as simple app)"""
|
| 185 |
+
return gr.update(interactive=True), gr.update(interactive=True)
|
| 186 |
+
|
| 187 |
+
# -----------------------
|
| 188 |
+
# UI with improved architecture and greenish cream styling - LIGHT MODE DEFAULT
|
| 189 |
+
# -----------------------
|
| 190 |
+
def create_app() -> gr.Blocks:
|
| 191 |
+
# Enhanced CSS with greenish cream color scheme, fixed positioning, and light mode defaults
|
| 192 |
+
custom_css = """
|
| 193 |
+
:root {
|
| 194 |
+
--kallam-primary: #659435;
|
| 195 |
+
--kallam-secondary: #5ea0bd;
|
| 196 |
+
--kallam-accent: #b8aa54;
|
| 197 |
+
--kallam-light: #f8fdf5;
|
| 198 |
+
--kallam-dark: #2d3748;
|
| 199 |
+
--kallam-cream: #f5f7f0;
|
| 200 |
+
--kallam-green-cream: #e8f4e0;
|
| 201 |
+
--kallam-border-cream: #d4e8c7;
|
| 202 |
+
--shadow-soft: 0 4px 6px -1px rgba(0, 0, 0, 0.1), 0 2px 4px -1px rgba(0, 0, 0, 0.06);
|
| 203 |
+
--shadow-medium: 0 10px 15px -3px rgba(0, 0, 0, 0.1), 0 4px 6px -2px rgba(0, 0, 0, 0.05);
|
| 204 |
+
--border-radius: 12px;
|
| 205 |
+
--transition: all 0.3s ease;
|
| 206 |
+
}
|
| 207 |
+
|
| 208 |
+
/* Force light mode styles - Override any dark mode defaults */
|
| 209 |
+
body, .gradio-container, .app {
|
| 210 |
+
background-color: #ffffff !important;
|
| 211 |
+
color: #2d3748 !important;
|
| 212 |
+
}
|
| 213 |
+
|
| 214 |
+
/* Ensure light backgrounds for all major containers */
|
| 215 |
+
.block, .form, .gap {
|
| 216 |
+
background-color: #ffffff !important;
|
| 217 |
+
color: #2d3748 !important;
|
| 218 |
+
}
|
| 219 |
+
|
| 220 |
+
/* Light mode for input elements */
|
| 221 |
+
input, textarea, select {
|
| 222 |
+
background-color: #ffffff !important;
|
| 223 |
+
border: 1px solid #d1d5db !important;
|
| 224 |
+
color: #2d3748 !important;
|
| 225 |
+
}
|
| 226 |
+
|
| 227 |
+
input:focus, textarea:focus, select:focus {
|
| 228 |
+
border-color: var(--kallam-primary) !important;
|
| 229 |
+
box-shadow: 0 0 0 3px rgba(101, 148, 53, 0.1) !important;
|
| 230 |
+
}
|
| 231 |
+
|
| 232 |
+
/* Ensure dark mode styles don't override in light mode */
|
| 233 |
+
html:not(.dark) .dark {
|
| 234 |
+
display: none !important;
|
| 235 |
+
}
|
| 236 |
+
|
| 237 |
+
.gradio-container {
|
| 238 |
+
max-width: 100% !important;
|
| 239 |
+
width: 100% !important;
|
| 240 |
+
margin: 0 auto !important;
|
| 241 |
+
min-height: 100vh;
|
| 242 |
+
background-color: #ffffff !important;
|
| 243 |
+
}
|
| 244 |
+
|
| 245 |
+
.main-layout {
|
| 246 |
+
display: flex !important;
|
| 247 |
+
min-height: calc(100vh - 2rem) !important;
|
| 248 |
+
gap: 1.5rem !important;
|
| 249 |
+
}
|
| 250 |
+
|
| 251 |
+
.fixed-sidebar {
|
| 252 |
+
width: 320px !important;
|
| 253 |
+
min-width: 320px !important;
|
| 254 |
+
max-width: 320px !important;
|
| 255 |
+
background: #ffffff !important;
|
| 256 |
+
backdrop-filter: blur(10px) !important;
|
| 257 |
+
border-radius: var(--border-radius) !important;
|
| 258 |
+
border: 3px solid var(--kallam-primary) !important;
|
| 259 |
+
box-shadow: var(--shadow-soft) !important;
|
| 260 |
+
padding: 1.5rem !important;
|
| 261 |
+
height: fit-content !important;
|
| 262 |
+
position: sticky !important;
|
| 263 |
+
top: 1rem !important;
|
| 264 |
+
overflow: visible !important;
|
| 265 |
+
}
|
| 266 |
+
|
| 267 |
+
.main-content {
|
| 268 |
+
flex: 1 !important;
|
| 269 |
+
min-width: 0 !important;
|
| 270 |
+
}
|
| 271 |
+
|
| 272 |
+
.kallam-header {
|
| 273 |
+
background: linear-gradient(135deg, var(--kallam-secondary) 0%, var(--kallam-primary) 50%, var(--kallam-accent) 100%);
|
| 274 |
+
border-radius: var(--border-radius);
|
| 275 |
+
padding: 2rem;
|
| 276 |
+
margin-bottom: 1.5rem;
|
| 277 |
+
text-align: center;
|
| 278 |
+
box-shadow: var(--shadow-medium);
|
| 279 |
+
position: relative;
|
| 280 |
+
overflow: hidden;
|
| 281 |
+
}
|
| 282 |
+
|
| 283 |
+
.kallam-header h1 {
|
| 284 |
+
color: white !important;
|
| 285 |
+
font-size: 2.5rem !important;
|
| 286 |
+
font-weight: 700 !important;
|
| 287 |
+
margin: 0 !important;
|
| 288 |
+
text-shadow: 0 2px 4px rgba(0,0,0,0.2);
|
| 289 |
+
position: relative;
|
| 290 |
+
z-index: 1;
|
| 291 |
+
}
|
| 292 |
+
|
| 293 |
+
.kallam-subtitle {
|
| 294 |
+
color: rgba(255,255,255,0.9) !important;
|
| 295 |
+
font-size: 1.1rem !important;
|
| 296 |
+
margin-top: 0.5rem !important;
|
| 297 |
+
position: relative;
|
| 298 |
+
z-index: 1;
|
| 299 |
+
}
|
| 300 |
+
|
| 301 |
+
.btn {
|
| 302 |
+
border-radius: 8px !important;
|
| 303 |
+
font-weight: 600 !important;
|
| 304 |
+
padding: 0.75rem 1.5rem !important;
|
| 305 |
+
transition: var(--transition) !important;
|
| 306 |
+
border: none !important;
|
| 307 |
+
box-shadow: var(--shadow-soft) !important;
|
| 308 |
+
cursor: pointer !important;
|
| 309 |
+
}
|
| 310 |
+
|
| 311 |
+
.btn:hover {
|
| 312 |
+
transform: translateY(-2px) !important;
|
| 313 |
+
box-shadow: var(--shadow-medium) !important;
|
| 314 |
+
}
|
| 315 |
+
|
| 316 |
+
.btn.btn-primary {
|
| 317 |
+
background: linear-gradient(135deg, var(--kallam-primary) 0%, var(--kallam-secondary) 100%) !important;
|
| 318 |
+
color: white !important;
|
| 319 |
+
}
|
| 320 |
+
|
| 321 |
+
.btn.btn-secondary {
|
| 322 |
+
background: #f8f9fa !important;
|
| 323 |
+
color: #2d3748 !important;
|
| 324 |
+
border: 1px solid #d1d5db !important;
|
| 325 |
+
}
|
| 326 |
+
|
| 327 |
+
.chat-container {
|
| 328 |
+
background: var(--kallam-green-cream) !important;
|
| 329 |
+
border-radius: var(--border-radius) !important;
|
| 330 |
+
border: 2px solid var(--kallam-border-cream) !important;
|
| 331 |
+
box-shadow: var(--shadow-medium) !important;
|
| 332 |
+
overflow: hidden !important;
|
| 333 |
+
}
|
| 334 |
+
|
| 335 |
+
.session-status-container .markdown {
|
| 336 |
+
margin: 0 !important;
|
| 337 |
+
padding: 0 !important;
|
| 338 |
+
font-size: 0.85rem !important;
|
| 339 |
+
line-height: 1.4 !important;
|
| 340 |
+
overflow-wrap: break-word !important;
|
| 341 |
+
word-break: break-word !important;
|
| 342 |
+
}
|
| 343 |
+
|
| 344 |
+
@media (max-width: 1200px) {
|
| 345 |
+
.main-layout {
|
| 346 |
+
flex-direction: column !important;
|
| 347 |
+
}
|
| 348 |
+
|
| 349 |
+
.fixed-sidebar {
|
| 350 |
+
width: 100% !important;
|
| 351 |
+
min-width: 100% !important;
|
| 352 |
+
max-width: 100% !important;
|
| 353 |
+
position: static !important;
|
| 354 |
+
}
|
| 355 |
+
}
|
| 356 |
+
"""
|
| 357 |
+
|
| 358 |
+
# Create a light theme with explicit light mode settings
|
| 359 |
+
light_theme = gr.themes.Soft( # type: ignore
|
| 360 |
+
primary_hue="green",
|
| 361 |
+
secondary_hue="blue",
|
| 362 |
+
neutral_hue="slate"
|
| 363 |
+
).set(
|
| 364 |
+
# Force light mode colors
|
| 365 |
+
body_background_fill="white",
|
| 366 |
+
body_text_color="#2d3748",
|
| 367 |
+
background_fill_primary="white",
|
| 368 |
+
background_fill_secondary="#f8f9fa",
|
| 369 |
+
border_color_primary="#d1d5db",
|
| 370 |
+
border_color_accent="#659435",
|
| 371 |
+
button_primary_background_fill="#659435",
|
| 372 |
+
button_primary_text_color="white",
|
| 373 |
+
button_secondary_background_fill="#f8f9fa",
|
| 374 |
+
button_secondary_text_color="#2d3748"
|
| 375 |
+
)
|
| 376 |
+
|
| 377 |
+
with gr.Blocks(
|
| 378 |
+
title="🥬 KaLLaM - Thai Motivational Therapeutic Advisor",
|
| 379 |
+
theme=light_theme,
|
| 380 |
+
css=custom_css,
|
| 381 |
+
js="""
|
| 382 |
+
function() {
|
| 383 |
+
// Force light mode on load by removing any dark classes and setting light preferences
|
| 384 |
+
document.documentElement.classList.remove('dark');
|
| 385 |
+
document.body.classList.remove('dark');
|
| 386 |
+
|
| 387 |
+
// Set data attributes for light mode
|
| 388 |
+
document.documentElement.setAttribute('data-theme', 'light');
|
| 389 |
+
|
| 390 |
+
// Override any system preferences for dark mode
|
| 391 |
+
const style = document.createElement('style');
|
| 392 |
+
style.textContent = `
|
| 393 |
+
@media (prefers-color-scheme: dark) {
|
| 394 |
+
:root {
|
| 395 |
+
color-scheme: light !important;
|
| 396 |
+
}
|
| 397 |
+
body, .gradio-container {
|
| 398 |
+
background-color: white !important;
|
| 399 |
+
color: #2d3748 !important;
|
| 400 |
+
}
|
| 401 |
+
}
|
| 402 |
+
`;
|
| 403 |
+
document.head.appendChild(style);
|
| 404 |
+
}
|
| 405 |
+
"""
|
| 406 |
+
) as app:
|
| 407 |
+
|
| 408 |
+
# State management - same as simple app
|
| 409 |
+
session_id = gr.State(value="")
|
| 410 |
+
|
| 411 |
+
# Header
|
| 412 |
+
gr.HTML(f"""
|
| 413 |
+
<div class="kallam-header">
|
| 414 |
+
<div style="display: flex; align-items: center; justify-content: flex-start; gap: 2rem; padding: 0 2rem;">
|
| 415 |
+
{CABBAGE_SVG}
|
| 416 |
+
<div style="text-align: left;">
|
| 417 |
+
<h1 style="text-align: left; margin: 0;">KaLLaM</h1>
|
| 418 |
+
<p class="kallam-subtitle" style="text-align: left; margin: 0.5rem 0 0 0;">Thai Motivational Therapeutic Advisor</p>
|
| 419 |
+
</div>
|
| 420 |
+
</div>
|
| 421 |
+
</div>
|
| 422 |
+
""")
|
| 423 |
+
|
| 424 |
+
# Main layout
|
| 425 |
+
with gr.Row(elem_classes=["main-layout"]):
|
| 426 |
+
# Sidebar with enhanced styling
|
| 427 |
+
with gr.Column(scale=1, elem_classes=["fixed-sidebar"]):
|
| 428 |
+
gr.HTML("""
|
| 429 |
+
<div style="text-align: center; padding: 0.5rem 0 1rem 0;">
|
| 430 |
+
<h3 style="color: #659435; margin: 0; font-size: 1.2rem;">Controls</h3>
|
| 431 |
+
<p style="color: #666; margin: 0.25rem 0 0 0; font-size: 0.9rem;">Manage session and health profile</p>
|
| 432 |
+
</div>
|
| 433 |
+
""")
|
| 434 |
+
|
| 435 |
+
with gr.Group():
|
| 436 |
+
new_session_btn = gr.Button("➕ New Session", variant="primary", size="lg", elem_classes=["btn", "btn-primary"])
|
| 437 |
+
health_profile_btn = gr.Button("👤 Custom Health Profile", variant="secondary", elem_classes=["btn", "btn-secondary"])
|
| 438 |
+
clear_session_btn = gr.Button("🗑️ Clear Session", variant="secondary", elem_classes=["btn", "btn-secondary"])
|
| 439 |
+
|
| 440 |
+
# Hidden health profile section
|
| 441 |
+
with gr.Column(visible=False) as health_profile_section:
|
| 442 |
+
gr.HTML('<div style="margin: 1rem 0;"><hr style="border: none; border-top: 1px solid #d1d5db;"></div>')
|
| 443 |
+
|
| 444 |
+
health_context = gr.Textbox(
|
| 445 |
+
label="🏥 Patient's Health Information",
|
| 446 |
+
placeholder="e.g., Patient's name, age, medical conditions (high blood pressure, diabetes), current symptoms, medications, lifestyle factors, mental health status...",
|
| 447 |
+
lines=5,
|
| 448 |
+
max_lines=8,
|
| 449 |
+
info="This information helps KaLLaM provide more personalized and relevant health advice. All data is kept confidential within your session."
|
| 450 |
+
)
|
| 451 |
+
|
| 452 |
+
with gr.Row():
|
| 453 |
+
update_profile_btn = gr.Button("💾 Update Health Profile", variant="primary", elem_classes=["btn", "btn-primary"])
|
| 454 |
+
back_btn = gr.Button("⏪ Back", variant="secondary", elem_classes=["btn", "btn-secondary"])
|
| 455 |
+
|
| 456 |
+
gr.HTML('<div style="margin: 1rem 0;"><hr style="border: none; border-top: 1px solid #d1d5db;"></div>')
|
| 457 |
+
|
| 458 |
+
# Session status
|
| 459 |
+
session_status = gr.Markdown(value="🔄 **Initializing...**")
|
| 460 |
+
|
| 461 |
+
# Main chat area
|
| 462 |
+
with gr.Column(scale=3, elem_classes=["main-content"]):
|
| 463 |
+
gr.HTML("""
|
| 464 |
+
<div style="text-align: center; padding: 1rem 0;">
|
| 465 |
+
<h2 style="color: #659435; margin: 0; font-size: 1.5rem;">💬 Health Consultation Chat</h2>
|
| 466 |
+
<p style="color: #666; margin: 0.5rem 0 0 0;">Chat with your AI health advisor in Thai or English</p>
|
| 467 |
+
</div>
|
| 468 |
+
""")
|
| 469 |
+
|
| 470 |
+
chatbot = gr.Chatbot(
|
| 471 |
+
label="Chat with KaLLaM",
|
| 472 |
+
height=500,
|
| 473 |
+
show_label=False,
|
| 474 |
+
type="messages",
|
| 475 |
+
elem_classes=["chat-container"]
|
| 476 |
+
)
|
| 477 |
+
|
| 478 |
+
with gr.Row():
|
| 479 |
+
with gr.Column(scale=5):
|
| 480 |
+
msg = gr.Textbox(
|
| 481 |
+
label="Message",
|
| 482 |
+
placeholder="Ask about your health in Thai or English...",
|
| 483 |
+
lines=1,
|
| 484 |
+
max_lines=4,
|
| 485 |
+
show_label=False,
|
| 486 |
+
elem_classes=["chat-container"]
|
| 487 |
+
)
|
| 488 |
+
with gr.Column(scale=1, min_width=120):
|
| 489 |
+
send_btn = gr.Button("➤", variant="primary", size="lg", elem_classes=["btn", "btn-primary"])
|
| 490 |
+
|
| 491 |
+
# Result display
|
| 492 |
+
result_display = gr.Markdown(visible=False)
|
| 493 |
+
|
| 494 |
+
# Footer
|
| 495 |
+
gr.HTML("""
|
| 496 |
+
<div style="
|
| 497 |
+
position: fixed; bottom: 0; left: 0; right: 0;
|
| 498 |
+
background: linear-gradient(135deg, var(--kallam-secondary) 0%, var(--kallam-primary) 100%);
|
| 499 |
+
color: white; padding: 0.75rem 1rem; text-align: center; font-size: 0.8rem;
|
| 500 |
+
box-shadow: 0 -4px 6px -1px rgba(0, 0, 0, 0.1); z-index: 1000;
|
| 501 |
+
border-top: 1px solid rgba(255,255,255,0.2);
|
| 502 |
+
">
|
| 503 |
+
<div style="max-width: 1400px; margin: 0 auto; display: flex; justify-content: center; align-items: center; flex-wrap: wrap; gap: 1.5rem;">
|
| 504 |
+
<span style="font-weight: 600;">Built with ❤️ by:</span>
|
| 505 |
+
<div style="display: flex; flex-direction: column; align-items: center; gap: 0.2rem;">
|
| 506 |
+
<span style="font-weight: 500;">👨💻 Nopnatee Trivoravong</span>
|
| 507 |
+
<div style="display: flex; gap: 0.5rem; font-size: 0.75rem;">
|
| 508 |
+
<span>📧 nopnatee.triv@gmail.com</span>
|
| 509 |
+
<span>•</span>
|
| 510 |
+
<a href="https://github.com/Nopnatee" target="_blank" style="color: rgba(255,255,255,0.9); text-decoration: none;">GitHub</a>
|
| 511 |
+
</div>
|
| 512 |
+
</div>
|
| 513 |
+
<span style="color: rgba(255,255,255,0.7);">|</span>
|
| 514 |
+
<div style="display: flex; flex-direction: column; align-items: center; gap: 0.2rem;">
|
| 515 |
+
<span style="font-weight: 500;">👨💻 Khamic Srisutrapon</span>
|
| 516 |
+
<div style="display: flex; gap: 0.5rem; font-size: 0.75rem;">
|
| 517 |
+
<span>📧 khamic.sk@gmail.com</span>
|
| 518 |
+
<span>•</span>
|
| 519 |
+
<a href="https://github.com/Khamic672" target="_blank" style="color: rgba(255,255,255,0.9); text-decoration: none;">GitHub</a>
|
| 520 |
+
</div>
|
| 521 |
+
</div>
|
| 522 |
+
<span style="color: rgba(255,255,255,0.7);">|</span>
|
| 523 |
+
<div style="display: flex; flex-direction: column; align-items: center; gap: 0.2rem;">
|
| 524 |
+
<span style="font-weight: 500;">👩💻 Napas Siripala</span>
|
| 525 |
+
<div style="display: flex; gap: 0.5rem; font-size: 0.75rem;">
|
| 526 |
+
<span>📧 millynapas@gmail.com</span>
|
| 527 |
+
<span>•</span>
|
| 528 |
+
<a href="https://github.com/kaoqueri" target="_blank" style="color: rgba(255,255,255,0.9); text-decoration: none;">GitHub</a>
|
| 529 |
+
</div>
|
| 530 |
+
</div>
|
| 531 |
+
</div>
|
| 532 |
+
</div>
|
| 533 |
+
""")
|
| 534 |
+
|
| 535 |
+
# ====== EVENT HANDLERS - Same pattern as simple app ======
|
| 536 |
+
|
| 537 |
+
# Auto-initialize on page load (same as simple app)
|
| 538 |
+
def _init():
|
| 539 |
+
sid, history, _, status, note = start_new_session("")
|
| 540 |
+
return sid, history, status, note
|
| 541 |
+
|
| 542 |
+
app.load(
|
| 543 |
+
fn=_init,
|
| 544 |
+
inputs=None,
|
| 545 |
+
outputs=[session_id, chatbot, session_status, result_display]
|
| 546 |
+
)
|
| 547 |
+
|
| 548 |
+
# New session
|
| 549 |
+
new_session_btn.click(
|
| 550 |
+
fn=lambda: start_new_session(""),
|
| 551 |
+
inputs=None,
|
| 552 |
+
outputs=[session_id, chatbot, msg, session_status, result_display]
|
| 553 |
+
)
|
| 554 |
+
|
| 555 |
+
# Show/hide health profile section
|
| 556 |
+
def show_health_profile():
|
| 557 |
+
return gr.update(visible=True)
|
| 558 |
+
|
| 559 |
+
def hide_health_profile():
|
| 560 |
+
return gr.update(visible=False)
|
| 561 |
+
|
| 562 |
+
health_profile_btn.click(
|
| 563 |
+
fn=show_health_profile,
|
| 564 |
+
outputs=[health_profile_section]
|
| 565 |
+
)
|
| 566 |
+
|
| 567 |
+
back_btn.click(
|
| 568 |
+
fn=hide_health_profile,
|
| 569 |
+
outputs=[health_profile_section]
|
| 570 |
+
)
|
| 571 |
+
|
| 572 |
+
# Update health profile
|
| 573 |
+
update_profile_btn.click(
|
| 574 |
+
fn=update_health_profile,
|
| 575 |
+
inputs=[session_id, health_context],
|
| 576 |
+
outputs=[result_display, session_status]
|
| 577 |
+
).then(
|
| 578 |
+
fn=hide_health_profile,
|
| 579 |
+
outputs=[health_profile_section]
|
| 580 |
+
)
|
| 581 |
+
|
| 582 |
+
# Send message with lock/unlock pattern (inspired by simple app)
|
| 583 |
+
send_btn.click(
|
| 584 |
+
fn=lock_inputs,
|
| 585 |
+
inputs=None,
|
| 586 |
+
outputs=[send_btn, msg],
|
| 587 |
+
queue=False, # lock applies instantly
|
| 588 |
+
).then(
|
| 589 |
+
fn=send_message,
|
| 590 |
+
inputs=[msg, chatbot, session_id],
|
| 591 |
+
outputs=[chatbot, msg, session_id, session_status],
|
| 592 |
+
).then(
|
| 593 |
+
fn=unlock_inputs,
|
| 594 |
+
inputs=None,
|
| 595 |
+
outputs=[send_btn, msg],
|
| 596 |
+
queue=False,
|
| 597 |
+
)
|
| 598 |
+
|
| 599 |
+
# Enter/submit flow: same treatment
|
| 600 |
+
msg.submit(
|
| 601 |
+
fn=lock_inputs,
|
| 602 |
+
inputs=None,
|
| 603 |
+
outputs=[send_btn, msg],
|
| 604 |
+
queue=False,
|
| 605 |
+
).then(
|
| 606 |
+
fn=send_message,
|
| 607 |
+
inputs=[msg, chatbot, session_id],
|
| 608 |
+
outputs=[chatbot, msg, session_id, session_status],
|
| 609 |
+
).then(
|
| 610 |
+
fn=unlock_inputs,
|
| 611 |
+
inputs=None,
|
| 612 |
+
outputs=[send_btn, msg],
|
| 613 |
+
queue=False,
|
| 614 |
+
)
|
| 615 |
+
|
| 616 |
+
# Clear session
|
| 617 |
+
clear_session_btn.click(
|
| 618 |
+
fn=clear_session,
|
| 619 |
+
inputs=[session_id],
|
| 620 |
+
outputs=[session_id, chatbot, msg, session_status, result_display]
|
| 621 |
+
)
|
| 622 |
+
|
| 623 |
+
return app
|
| 624 |
+
|
| 625 |
+
def main():
|
| 626 |
+
app = create_app()
|
| 627 |
+
# Resolve bind address and port
|
| 628 |
+
server_name = os.getenv("GRADIO_SERVER_NAME", "0.0.0.0")
|
| 629 |
+
server_port = int(os.getenv("PORT", os.getenv("GRADIO_SERVER_PORT", 8080)))
|
| 630 |
+
|
| 631 |
+
# Basic health log to confirm listening address
|
| 632 |
+
try:
|
| 633 |
+
hostname = socket.gethostname()
|
| 634 |
+
ip_addr = socket.gethostbyname(hostname)
|
| 635 |
+
except Exception:
|
| 636 |
+
hostname = "unknown"
|
| 637 |
+
ip_addr = "unknown"
|
| 638 |
+
|
| 639 |
+
logger.info(
|
| 640 |
+
"Starting Gradio app | bind=%s:%s | host=%s ip=%s",
|
| 641 |
+
server_name,
|
| 642 |
+
server_port,
|
| 643 |
+
hostname,
|
| 644 |
+
ip_addr,
|
| 645 |
+
)
|
| 646 |
+
logger.info(
|
| 647 |
+
"Env: PORT=%s GRADIO_SERVER_NAME=%s GRADIO_SERVER_PORT=%s",
|
| 648 |
+
os.getenv("PORT"),
|
| 649 |
+
os.getenv("GRADIO_SERVER_NAME"),
|
| 650 |
+
os.getenv("GRADIO_SERVER_PORT"),
|
| 651 |
+
)
|
| 652 |
+
# Secrets presence check (mask values)
|
| 653 |
+
def _mask(v: str | None) -> str:
|
| 654 |
+
if not v:
|
| 655 |
+
return "<missing>"
|
| 656 |
+
return f"set(len={len(v)})"
|
| 657 |
+
logger.info(
|
| 658 |
+
"Secrets: SEA_LION_API_KEY=%s GEMINI_API_KEY=%s",
|
| 659 |
+
_mask(os.getenv("SEA_LION_API_KEY")),
|
| 660 |
+
_mask(os.getenv("GEMINI_API_KEY")),
|
| 661 |
+
)
|
| 662 |
+
app.launch(
|
| 663 |
+
share=False,
|
| 664 |
+
server_name=server_name, # cloud: 0.0.0.0, local: 127.0.0.1
|
| 665 |
+
server_port=server_port, # cloud: $PORT, local: 7860/8080
|
| 666 |
+
debug=False,
|
| 667 |
+
show_error=True,
|
| 668 |
+
inbrowser=True
|
| 669 |
+
)
|
| 670 |
+
|
| 671 |
+
if __name__ == "__main__":
|
| 672 |
+
main()
|
gui/chatbot_dev_app.py
ADDED
|
@@ -0,0 +1,508 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# chatbot_dev_app.py
|
| 2 |
+
import gradio as gr
|
| 3 |
+
import logging
|
| 4 |
+
import socket
|
| 5 |
+
from datetime import datetime
|
| 6 |
+
from typing import List, Tuple, Optional
|
| 7 |
+
import os
|
| 8 |
+
|
| 9 |
+
from kallam.app.chatbot_manager import ChatbotManager
|
| 10 |
+
from kallam.infra.db import sqlite_conn # use the shared helper
|
| 11 |
+
|
| 12 |
+
# -----------------------
|
| 13 |
+
# Init
|
| 14 |
+
# -----------------------
|
| 15 |
+
chatbot_manager = ChatbotManager(log_level="DEBUG")
|
| 16 |
+
logging.basicConfig(level=logging.INFO)
|
| 17 |
+
logger = logging.getLogger(__name__)
|
| 18 |
+
|
| 19 |
+
class AppState:
|
| 20 |
+
def __init__(self):
|
| 21 |
+
self.current_session_id: str = ""
|
| 22 |
+
self.message_count: int = 0
|
| 23 |
+
self.followup_note: str = "Request follow-up analysis..."
|
| 24 |
+
|
| 25 |
+
def reset(self):
|
| 26 |
+
self.current_session_id = ""
|
| 27 |
+
self.message_count = 0
|
| 28 |
+
|
| 29 |
+
app_state = AppState()
|
| 30 |
+
|
| 31 |
+
# -----------------------
|
| 32 |
+
# Helpers
|
| 33 |
+
# -----------------------
|
| 34 |
+
def _safe_latency_str(v) -> str:
|
| 35 |
+
try:
|
| 36 |
+
return f"{float(v):.1f}"
|
| 37 |
+
except Exception:
|
| 38 |
+
return "0.0"
|
| 39 |
+
|
| 40 |
+
def _extract_stats_pack() -> tuple[dict, dict]:
|
| 41 |
+
# returns (session_info, stats_dict)
|
| 42 |
+
data = chatbot_manager.get_session_stats(app_state.current_session_id) # new shape
|
| 43 |
+
session_info = data.get("session_info", {}) if isinstance(data, dict) else {}
|
| 44 |
+
stats = data.get("stats", {}) if isinstance(data, dict) else {}
|
| 45 |
+
return session_info, stats
|
| 46 |
+
|
| 47 |
+
def get_current_session_status() -> str:
|
| 48 |
+
if not app_state.current_session_id:
|
| 49 |
+
return "🔴 **ไม่มี Session ที่ใช้งาน** - กรุณาสร้าง Session ใหม่"
|
| 50 |
+
|
| 51 |
+
try:
|
| 52 |
+
session_info, stats = _extract_stats_pack()
|
| 53 |
+
|
| 54 |
+
avg_latency = _safe_latency_str(stats.get("avg_latency"))
|
| 55 |
+
saved_memories = session_info.get("saved_memories") or "ไม่ได้ระบุ"
|
| 56 |
+
return f"""
|
| 57 |
+
🟢 **Session ปัจจุบัน:** `{app_state.current_session_id}`
|
| 58 |
+
📅 **สร้างเมื่อ:** {session_info.get('timestamp', 'N/A')}
|
| 59 |
+
💬 **จำนวนข้อความ:** {stats.get('message_count', 0) or 0} ข้อความ
|
| 60 |
+
📋 **จำนวนสรุป:** {session_info.get('total_summaries', 0) or 0} ครั้ง
|
| 61 |
+
🏥 **สถานะกำหนดเอง:** {saved_memories}
|
| 62 |
+
⚡ **Latency เฉลี่ย:** {avg_latency} ms
|
| 63 |
+
🔧 **Model:** {session_info.get('model_used', 'N/A')}
|
| 64 |
+
""".strip()
|
| 65 |
+
except Exception as e:
|
| 66 |
+
logger.error(f"Error getting session status: {e}")
|
| 67 |
+
return f"❌ **Error:** ไม่สามารถโหลดข้อมูล Session {app_state.current_session_id}"
|
| 68 |
+
|
| 69 |
+
def get_session_list() -> List[str]:
|
| 70 |
+
try:
|
| 71 |
+
sessions = chatbot_manager.list_sessions(active_only=True, limit=50)
|
| 72 |
+
opts = []
|
| 73 |
+
for s in sessions:
|
| 74 |
+
saved = (s.get("saved_memories") or "ไม่ระบุ")[:20]
|
| 75 |
+
msgs = s.get("total_messages", 0)
|
| 76 |
+
sums = s.get("total_summaries", 0)
|
| 77 |
+
opts.append(f"{s['session_id']} | {msgs}💬 {sums}📋 | {saved}")
|
| 78 |
+
return opts or ["ไม่มี Session"]
|
| 79 |
+
except Exception as e:
|
| 80 |
+
logger.error(f"Error getting session list: {e}")
|
| 81 |
+
return ["Error loading sessions"]
|
| 82 |
+
|
| 83 |
+
def extract_session_id(dropdown_value: str) -> Optional[str]:
|
| 84 |
+
if not dropdown_value or dropdown_value in ["ไม่มี Session", "Error loading sessions"]:
|
| 85 |
+
return None
|
| 86 |
+
return dropdown_value.split(" | ")[0]
|
| 87 |
+
|
| 88 |
+
def refresh_session_list():
|
| 89 |
+
sessions = get_session_list()
|
| 90 |
+
return gr.update(choices=sessions, value=sessions[0] if sessions else None)
|
| 91 |
+
|
| 92 |
+
def create_new_session(saved_memories: str = "") -> Tuple[List, str, str, str, str]:
|
| 93 |
+
try:
|
| 94 |
+
sid = chatbot_manager.start_session(saved_memories=saved_memories or None)
|
| 95 |
+
app_state.current_session_id = sid
|
| 96 |
+
app_state.message_count = 0
|
| 97 |
+
status = get_current_session_status()
|
| 98 |
+
result = f"✅ **สร้าง Session ใหม่สำเร็จ!**\n\n🆔 Session ID: `{sid}`"
|
| 99 |
+
return [], "", result, status, saved_memories
|
| 100 |
+
except Exception as e:
|
| 101 |
+
logger.error(f"Error creating new session: {e}")
|
| 102 |
+
return [], "", f"❌ **ไม่สามารถสร้าง Session ใหม่:** {e}", get_current_session_status(), ""
|
| 103 |
+
|
| 104 |
+
def switch_session(dropdown_value: str) -> Tuple[List, str, str, str, str]:
|
| 105 |
+
sid = extract_session_id(dropdown_value)
|
| 106 |
+
if not sid:
|
| 107 |
+
return [], "", "❌ **Session ID ไม่ถูกต้อง**", get_current_session_status(), ""
|
| 108 |
+
|
| 109 |
+
try:
|
| 110 |
+
session = chatbot_manager.get_session(sid)
|
| 111 |
+
if not session:
|
| 112 |
+
return [], "", f"❌ **ไม่พบ Session:** {sid}", get_current_session_status(), ""
|
| 113 |
+
app_state.current_session_id = sid
|
| 114 |
+
app_state.message_count = session.get("total_messages", 0)
|
| 115 |
+
|
| 116 |
+
# use the new helper on manager (provided)
|
| 117 |
+
chat_history = chatbot_manager.get_original_chat_history(sid)
|
| 118 |
+
gr_history = []
|
| 119 |
+
for m in chat_history:
|
| 120 |
+
role = m.get("role")
|
| 121 |
+
content = m.get("content", "")
|
| 122 |
+
if role == "user":
|
| 123 |
+
gr_history.append({"role": "user", "content": content})
|
| 124 |
+
elif role == "assistant":
|
| 125 |
+
gr_history.append({"role": "assistant", "content": content})
|
| 126 |
+
|
| 127 |
+
status = get_current_session_status()
|
| 128 |
+
result = f"✅ **เปลี่ยน Session สำเร็จ!**\n\n🆔 Session ID: `{sid}`"
|
| 129 |
+
saved_memories = session.get("saved_memories", "")
|
| 130 |
+
return gr_history, "", result, status, saved_memories
|
| 131 |
+
except Exception as e:
|
| 132 |
+
logger.error(f"Error switching session: {e}")
|
| 133 |
+
return [], "", f"❌ **ไม่สามารถเปลี่ยน Session:** {e}", get_current_session_status(), ""
|
| 134 |
+
|
| 135 |
+
def get_session_info() -> str:
|
| 136 |
+
if not app_state.current_session_id:
|
| 137 |
+
return "❌ **ไม่มี Session ที่ใช้งาน**"
|
| 138 |
+
try:
|
| 139 |
+
session_info, stats = _extract_stats_pack()
|
| 140 |
+
|
| 141 |
+
latency_str = f"{float(stats.get('avg_latency') or 0):.2f}"
|
| 142 |
+
total_tokens_in = stats.get("total_tokens_in") or 0
|
| 143 |
+
total_tokens_out = stats.get("total_tokens_out") or 0
|
| 144 |
+
saved_memories = session_info.get("saved_memories") or "ไม่ได้ระบุ"
|
| 145 |
+
summarized_history = session_info.get("summarized_history") or "ยังไม่มีการสรุป"
|
| 146 |
+
|
| 147 |
+
return f"""
|
| 148 |
+
## 📊 ข้อมูลรายละเอียด Session: `{app_state.current_session_id}`
|
| 149 |
+
|
| 150 |
+
### 🔧 ข้อมูลพื้นฐาน
|
| 151 |
+
- **Session ID:** `{session_info.get('session_id', 'N/A')}`
|
| 152 |
+
- **สร้างเมื่อ:** {session_info.get('timestamp', 'N/A')}
|
| 153 |
+
- **ใช้งานล่าสุด:** {session_info.get('last_activity', 'N/A')}
|
| 154 |
+
- **Model:** {session_info.get('model_used', 'N/A')}
|
| 155 |
+
- **สถานะ:** {'🟢 Active' if session_info.get('is_active') else '🔴 Inactive'}
|
| 156 |
+
|
| 157 |
+
### 🏥 ข้อมูลสถานะกำหนดเอง
|
| 158 |
+
- **สถานะกำหนดเอง:** {saved_memories}
|
| 159 |
+
|
| 160 |
+
### 📈 สถิติการใช้งาน
|
| 161 |
+
- **จำนวนข้อความทั้งหมด:** {stats.get('message_count', 0) or 0} ข้อความ
|
| 162 |
+
- **จำนวนสรุป:** {session_info.get('total_summaries', 0) or 0} ครั้ง
|
| 163 |
+
- **Token Input รวม:** {total_tokens_in:,} tokens
|
| 164 |
+
- **Token Output รวม:** {total_tokens_out:,} tokens
|
| 165 |
+
- **Latency เฉลี่ย:** {latency_str} ms
|
| 166 |
+
- **ข้อความแรก:** {stats.get('first_message', 'N/A') or 'N/A'}
|
| 167 |
+
- **ข้อความล่าสุด:** {stats.get('last_message', 'N/A') or 'N/A'}
|
| 168 |
+
|
| 169 |
+
### 📋 ประวัติการสรุป
|
| 170 |
+
{summarized_history}
|
| 171 |
+
""".strip()
|
| 172 |
+
except Exception as e:
|
| 173 |
+
logger.error(f"Error getting session info: {e}")
|
| 174 |
+
return f"❌ **Error:** {e}"
|
| 175 |
+
|
| 176 |
+
def get_all_sessions_info() -> str:
|
| 177 |
+
try:
|
| 178 |
+
sessions = chatbot_manager.list_sessions(active_only=False, limit=20)
|
| 179 |
+
if not sessions:
|
| 180 |
+
return "📭 **ไม่มี Session ในระบบ**"
|
| 181 |
+
|
| 182 |
+
parts = ["# 📁 ข้อมูล Session ทั้งหมด\n"]
|
| 183 |
+
for i, s in enumerate(sessions, 1):
|
| 184 |
+
status_icon = "🟢" if s.get("is_active") else "🔴"
|
| 185 |
+
saved = (s.get("saved_memories") or "ไม่ระบุ")[:30]
|
| 186 |
+
parts.append(f"""
|
| 187 |
+
## {i}. {status_icon} `{s['session_id']}`
|
| 188 |
+
- **สร้าง:** {s.get('timestamp', 'N/A')}
|
| 189 |
+
- **ใช้งานล่าสุด:** {s.get('last_activity', 'N/A')}
|
| 190 |
+
- **ข้อความ:** {s.get('total_messages', 0)} | **สรุป:** {s.get('total_summaries', 0)}
|
| 191 |
+
- **สภาวะ:** {saved}
|
| 192 |
+
- **Model:** {s.get('model_used', 'N/A')}
|
| 193 |
+
""".strip())
|
| 194 |
+
return "\n\n".join(parts)
|
| 195 |
+
except Exception as e:
|
| 196 |
+
logger.error(f"Error getting all sessions info: {e}")
|
| 197 |
+
return f"❌ **Error:** {e}"
|
| 198 |
+
|
| 199 |
+
def update_medical_saved_memories(saved_memories: str) -> Tuple[str, str]:
|
| 200 |
+
if not app_state.current_session_id:
|
| 201 |
+
return get_current_session_status(), "❌ **ไม่มี Session ที่ใช้งาน**"
|
| 202 |
+
if not saved_memories.strip():
|
| 203 |
+
return get_current_session_status(), "❌ **กรุณาสถานะกำหนดเอง**"
|
| 204 |
+
|
| 205 |
+
try:
|
| 206 |
+
with sqlite_conn(str(chatbot_manager.db_path)) as conn:
|
| 207 |
+
conn.execute(
|
| 208 |
+
"UPDATE sessions SET saved_memories = ?, last_activity = ? WHERE session_id = ?",
|
| 209 |
+
(saved_memories.strip(), datetime.now().isoformat(), app_state.current_session_id),
|
| 210 |
+
)
|
| 211 |
+
status = get_current_session_status()
|
| 212 |
+
result = f"✅ **อัปเดตสถานะกำหนดเองสำเร็จ!**\n\n📝 **ข้อมูลใหม่:** {saved_memories.strip()}"
|
| 213 |
+
return status, result
|
| 214 |
+
except Exception as e:
|
| 215 |
+
logger.error(f"Error updating saved_memories: {e}")
|
| 216 |
+
return get_current_session_status(), f"❌ **ไม่สามารถอัปเดตสถานะกำหนดเอง:** {e}"
|
| 217 |
+
|
| 218 |
+
def process_chat_message(user_message: str, history: List) -> Tuple[List, str]:
|
| 219 |
+
if not app_state.current_session_id:
|
| 220 |
+
history.append({"role": "assistant", "content": "❌ **กรุณาสร้าง Session ใหม่ก่อนใช้งาน**"})
|
| 221 |
+
return history, ""
|
| 222 |
+
if not user_message.strip():
|
| 223 |
+
return history, ""
|
| 224 |
+
|
| 225 |
+
try:
|
| 226 |
+
history.append({"role": "user", "content": user_message})
|
| 227 |
+
bot = chatbot_manager.handle_message(
|
| 228 |
+
session_id=app_state.current_session_id,
|
| 229 |
+
user_message=user_message,
|
| 230 |
+
)
|
| 231 |
+
history.append({"role": "assistant", "content": bot})
|
| 232 |
+
app_state.message_count += 2
|
| 233 |
+
return history, ""
|
| 234 |
+
except Exception as e:
|
| 235 |
+
logger.error(f"Error processing chat message: {e}")
|
| 236 |
+
history.append({"role": "assistant", "content": f"❌ **ข้อผิดพลาด:** {e}"})
|
| 237 |
+
return history, ""
|
| 238 |
+
|
| 239 |
+
def generate_followup(history: List) -> List:
|
| 240 |
+
# No dedicated handle_followup in new manager.
|
| 241 |
+
# We just inject the follow-up note as a plain user turn.
|
| 242 |
+
if not app_state.current_session_id:
|
| 243 |
+
history.append({"role": "assistant", "content": "❌ **กรุณาสร้าง Session ใหม่ก่อนใช้งาน**"})
|
| 244 |
+
return history
|
| 245 |
+
try:
|
| 246 |
+
note = app_state.followup_note
|
| 247 |
+
history.append({"role": "user", "content": note})
|
| 248 |
+
bot = chatbot_manager.handle_message(
|
| 249 |
+
session_id=app_state.current_session_id,
|
| 250 |
+
user_message=note,
|
| 251 |
+
)
|
| 252 |
+
history.append({"role": "assistant", "content": bot})
|
| 253 |
+
app_state.message_count += 2
|
| 254 |
+
return history
|
| 255 |
+
except Exception as e:
|
| 256 |
+
logger.error(f"Error generating follow-up: {e}")
|
| 257 |
+
history.append({"role": "assistant", "content": f"❌ **ไม่สามารถสร้างการวิเคราะห์:** {e}"})
|
| 258 |
+
return history
|
| 259 |
+
|
| 260 |
+
def force_update_summary() -> str:
|
| 261 |
+
if not app_state.current_session_id:
|
| 262 |
+
return "❌ **ไม่มี Session ที่ใช้งาน**"
|
| 263 |
+
try:
|
| 264 |
+
s = chatbot_manager.summarize_session(app_state.current_session_id)
|
| 265 |
+
return f"✅ **สรุปข้อมูลสำเร็จ!**\n\n📋 **สรุป:** {s}"
|
| 266 |
+
except Exception as e:
|
| 267 |
+
logger.error(f"Error forcing summary update: {e}")
|
| 268 |
+
return f"❌ **ไม่สามารถสรุปข้อมูล:** {e}"
|
| 269 |
+
|
| 270 |
+
def clear_session() -> Tuple[List, str, str, str, str]:
|
| 271 |
+
if not app_state.current_session_id:
|
| 272 |
+
return [], "", "❌ **ไม่มี Session ที่ใช้งาน**", get_current_session_status(), ""
|
| 273 |
+
try:
|
| 274 |
+
old = app_state.current_session_id
|
| 275 |
+
chatbot_manager.delete_session(old)
|
| 276 |
+
app_state.reset()
|
| 277 |
+
return [], "", f"✅ **ลบ Session สำเร็จ!**\n\n🗑️ **Session ที่ลบ:** `{old}`", get_current_session_status(), ""
|
| 278 |
+
except Exception as e:
|
| 279 |
+
logger.error(f"Error clearing session: {e}")
|
| 280 |
+
return [], "", f"❌ **ไม่สามารถลบ Session:** {e}", get_current_session_status(), ""
|
| 281 |
+
|
| 282 |
+
def clear_all_summaries() -> str:
|
| 283 |
+
if not app_state.current_session_id:
|
| 284 |
+
return "❌ **ไม่มี Session ที่ใช้งาน**"
|
| 285 |
+
try:
|
| 286 |
+
with sqlite_conn(str(chatbot_manager.db_path)) as conn:
|
| 287 |
+
conn.execute("DELETE FROM summaries WHERE session_id = ?", (app_state.current_session_id,))
|
| 288 |
+
return f"✅ **ล้างสรุปสำเร็จ!**\n\n🗑️ **Session:** `{app_state.current_session_id}`"
|
| 289 |
+
except Exception as e:
|
| 290 |
+
logger.error(f"Error clearing summaries: {e}")
|
| 291 |
+
return f"❌ **ไม่สามารถล้างสรุป:** {e}"
|
| 292 |
+
|
| 293 |
+
def export_session() -> str:
|
| 294 |
+
if not app_state.current_session_id:
|
| 295 |
+
return "❌ **ไม่มี Session ที่ใช้งาน**"
|
| 296 |
+
try:
|
| 297 |
+
chatbot_manager.export_session_json(app_state.current_session_id)
|
| 298 |
+
return "✅ **ส่งออกข้อมูลสำเร็จ!**"
|
| 299 |
+
except Exception as e:
|
| 300 |
+
logger.error(f"Error exporting session: {e}")
|
| 301 |
+
return f"❌ **ไม่สามารถส่งออกข้อมูล:** {e}"
|
| 302 |
+
|
| 303 |
+
def export_all_sessions() -> str:
|
| 304 |
+
try:
|
| 305 |
+
chatbot_manager.export_all_sessions_json()
|
| 306 |
+
return "✅ **ส่งออกข้อมูลสำเร็จ!**"
|
| 307 |
+
except Exception as e:
|
| 308 |
+
logger.error(f"Error exporting session: {e}")
|
| 309 |
+
return f"❌ **ไม่สามารถส่งออกข้อมูล:** {e}"
|
| 310 |
+
# -----------------------
|
| 311 |
+
# UI
|
| 312 |
+
# -----------------------
|
| 313 |
+
def create_app() -> gr.Blocks:
|
| 314 |
+
custom_css = """
|
| 315 |
+
.gradio-container { max-width: 1400px !important; margin: 0 auto !important; }
|
| 316 |
+
.tab-nav { background: linear-gradient(90deg, #667eea 0%, #764ba2 100%); }
|
| 317 |
+
.chat-container { border-radius: 10px; border: 1px solid #e0e0e0; box-shadow: 0 2px 8px rgba(0,0,0,0.1); }
|
| 318 |
+
.summary-box { border-radius: 8px; padding: 15px; margin: 10px 0; }
|
| 319 |
+
.session-info { border-radius: 8px; padding: 15px; margin: 10px 0;
|
| 320 |
+
background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
|
| 321 |
+
color: white; font-weight: 500; }
|
| 322 |
+
.saved_memories-box { border-radius: 8px; padding: 10px; margin: 5px 0; border: 1px solid #ddd; }
|
| 323 |
+
.stat-card { background: white; border-radius: 8px; padding: 15px; box-shadow: 0 2px 4px rgba(0,0,0,0.1); border-left: 4px solid #28a745; }
|
| 324 |
+
.red-button { background-color: red !important; color: white !important; }
|
| 325 |
+
"""
|
| 326 |
+
|
| 327 |
+
with gr.Blocks(title="🌟 DEMO ระบบจำลองการคุยกับลูกและให้คำแนะนำสำหรับผู้ปกครอง",
|
| 328 |
+
css=custom_css) as app:
|
| 329 |
+
|
| 330 |
+
gr.Markdown("""
|
| 331 |
+
# 🌟 ระบบจำลองการคุยกับลูกและให้คำแนะนำสำหรับผู้ปกครอง
|
| 332 |
+
|
| 333 |
+
🔄 **สรุปอัตโนมัติ:** ทุก 10 ข้อความ
|
| 334 |
+
🔔 **วิเคราะห์บทสนทนา:** เรียกใช้ด้วยปุ่ม Follow-up
|
| 335 |
+
💾 **Session Management**
|
| 336 |
+
🏥 **Custom saved_memories**
|
| 337 |
+
📊 **Analytics**
|
| 338 |
+
""")
|
| 339 |
+
|
| 340 |
+
session_status = gr.Markdown(value=get_current_session_status(), elem_classes=["session-info"])
|
| 341 |
+
|
| 342 |
+
gr.Markdown("## 🗂️ การจัดการ Session")
|
| 343 |
+
with gr.Row():
|
| 344 |
+
with gr.Column(scale=3):
|
| 345 |
+
session_dropdown = gr.Dropdown(
|
| 346 |
+
choices=get_session_list(),
|
| 347 |
+
value=None,
|
| 348 |
+
label="🗒️ เลือก Session",
|
| 349 |
+
info="เลือก session ที่ต้องการเปลี่ยนไป",
|
| 350 |
+
)
|
| 351 |
+
with gr.Column(scale=1):
|
| 352 |
+
refresh_btn = gr.Button("🔄 รีเฟรช", variant="primary")
|
| 353 |
+
switch_btn = gr.Button("🔀 โหลด Session", variant="secondary")
|
| 354 |
+
new_session_btn = gr.Button("➕ Session ใหม่", variant="secondary")
|
| 355 |
+
|
| 356 |
+
with gr.Row():
|
| 357 |
+
session_info_btn = gr.Button("👀 ข้อมูล Session", variant="secondary")
|
| 358 |
+
all_sessions_btn = gr.Button("📁 ดู Session ทั้งหมด", variant="secondary")
|
| 359 |
+
export_btn = gr.Button("📤 ส่งออกข้อมูลทั้งหมดเป็น.json (dev)", variant="secondary")
|
| 360 |
+
|
| 361 |
+
with gr.Accordion("📊 ข้อมูลรายละเอียด Session", open=False):
|
| 362 |
+
session_result = gr.Markdown(value="**กำลังรอการอัปเดต...**", elem_classes=["summary-box"])
|
| 363 |
+
session_info_display = gr.Markdown(value="", elem_classes=["summary-box"])
|
| 364 |
+
|
| 365 |
+
gr.Markdown("---")
|
| 366 |
+
gr.Markdown("## 🏥 การจัดการสถานะการสนทนา")
|
| 367 |
+
|
| 368 |
+
with gr.Row():
|
| 369 |
+
health_context = gr.Textbox(
|
| 370 |
+
label="🏥 ข้อมูลสถานะของการสนทนา",
|
| 371 |
+
placeholder="เช่น: ชื่อเด็ก, อายุ, พฤติกรรมที่อยากโฟกัส",
|
| 372 |
+
value="",
|
| 373 |
+
max_lines=5, lines=3,
|
| 374 |
+
info="ข้อมูลนี้จะถูกเก็บใน session และใช้ปรับแต่งบทสนทนา",
|
| 375 |
+
elem_classes=["saved_memories-box"],
|
| 376 |
+
)
|
| 377 |
+
update_saved_memories_btn = gr.Button("💾 อัปเดตข้อมูล", variant="primary")
|
| 378 |
+
|
| 379 |
+
gr.Markdown("---")
|
| 380 |
+
gr.Markdown("## 💬 แชทบอทจำลองการสนทนา")
|
| 381 |
+
|
| 382 |
+
chatbot = gr.Chatbot(
|
| 383 |
+
label="💭 การสนทนากับ AI",
|
| 384 |
+
height=500, show_label=True, type="messages",
|
| 385 |
+
elem_classes=["chat-container"], avatar_images=("👤", "🤖")
|
| 386 |
+
)
|
| 387 |
+
|
| 388 |
+
with gr.Row():
|
| 389 |
+
with gr.Column(scale=4):
|
| 390 |
+
msg = gr.Textbox(
|
| 391 |
+
label="💬 พิมพ์ข้อความของคุณ",
|
| 392 |
+
placeholder="พิมพ์คำถามหรือข้อมูล...",
|
| 393 |
+
lines=2, max_lines=8,
|
| 394 |
+
)
|
| 395 |
+
with gr.Column(scale=1):
|
| 396 |
+
send_btn = gr.Button("📤 ส่งข้อความ", variant="primary")
|
| 397 |
+
followup_btn = gr.Button("🔔 สร้างการวิเคราะห์บทสนทนา", variant="secondary")
|
| 398 |
+
update_summary_btn = gr.Button("📋 บังคับสรุปแชท (dev)", variant="secondary")
|
| 399 |
+
|
| 400 |
+
with gr.Row():
|
| 401 |
+
clear_chat_btn = gr.Button("🗑️ ล้าง Session", variant="secondary")
|
| 402 |
+
clear_summary_btn = gr.Button("📝 ล้างสรุป", variant="secondary")
|
| 403 |
+
|
| 404 |
+
# Small helpers for button UX
|
| 405 |
+
def set_button_loading(text): return gr.update(value=text, elem_classes=["red-button"], variant="stop")
|
| 406 |
+
def reset_button(text, variant): return gr.update(value=text, elem_classes=[], variant=variant)
|
| 407 |
+
|
| 408 |
+
# Wiring
|
| 409 |
+
refresh_btn.click(fn=refresh_session_list, outputs=[session_dropdown])
|
| 410 |
+
|
| 411 |
+
switch_btn.click(
|
| 412 |
+
fn=switch_session,
|
| 413 |
+
inputs=[session_dropdown],
|
| 414 |
+
outputs=[chatbot, msg, session_result, session_status, health_context],
|
| 415 |
+
)
|
| 416 |
+
|
| 417 |
+
new_session_btn.click(
|
| 418 |
+
fn=create_new_session,
|
| 419 |
+
inputs=[health_context],
|
| 420 |
+
outputs=[chatbot, msg, session_result, session_status, health_context],
|
| 421 |
+
)
|
| 422 |
+
|
| 423 |
+
session_info_btn.click(fn=get_session_info, outputs=[session_info_display])
|
| 424 |
+
all_sessions_btn.click(fn=get_all_sessions_info, outputs=[session_info_display])
|
| 425 |
+
export_btn.click(fn=export_all_sessions, outputs=[session_info_display])
|
| 426 |
+
export_btn.click(fn=lambda: set_button_loading("⏳ ประมวลผล..."), outputs=[export_btn]) \
|
| 427 |
+
.then(fn=export_all_sessions) \
|
| 428 |
+
.then(fn=lambda: reset_button("📤 ส่งออกข้อมูลทั้งหมดเป็น.json (dev)", variant="secondary"), outputs=[export_btn])
|
| 429 |
+
|
| 430 |
+
update_saved_memories_btn.click(
|
| 431 |
+
fn=update_medical_saved_memories,
|
| 432 |
+
inputs=[health_context],
|
| 433 |
+
outputs=[session_status, session_result],
|
| 434 |
+
)
|
| 435 |
+
|
| 436 |
+
send_btn.click(fn=lambda: set_button_loading("⏳ ประมวลผล..."), outputs=[send_btn]) \
|
| 437 |
+
.then(fn=process_chat_message, inputs=[msg, chatbot], outputs=[chatbot, msg]) \
|
| 438 |
+
.then(fn=lambda: reset_button("📤 ส่งข้อความ", "primary"), outputs=[send_btn])
|
| 439 |
+
|
| 440 |
+
msg.submit(fn=process_chat_message, inputs=[msg, chatbot], outputs=[chatbot, msg])
|
| 441 |
+
|
| 442 |
+
followup_btn.click(fn=lambda: set_button_loading("⏳ ประมวลผล..."), outputs=[followup_btn]) \
|
| 443 |
+
.then(fn=generate_followup, inputs=[chatbot], outputs=[chatbot]) \
|
| 444 |
+
.then(fn=lambda: reset_button("🔔 สร้างการวิเคราะห์บทสนทนา", "secondary"), outputs=[followup_btn])
|
| 445 |
+
|
| 446 |
+
update_summary_btn.click(fn=lambda: set_button_loading("⏳ กำลังสรุป..."), outputs=[update_summary_btn]) \
|
| 447 |
+
.then(fn=force_update_summary, outputs=[session_result]) \
|
| 448 |
+
.then(fn=lambda: reset_button("📋 บังคับสรุปแชท (dev)", "secondary"), outputs=[update_summary_btn])
|
| 449 |
+
|
| 450 |
+
clear_chat_btn.click(fn=lambda: set_button_loading("⏳ กำลังลบ..."), outputs=[clear_chat_btn]) \
|
| 451 |
+
.then(fn=clear_session, outputs=[chatbot, msg, session_result, session_status, health_context]) \
|
| 452 |
+
.then(fn=lambda: reset_button("🗑️ ล้าง Session", "secondary"), outputs=[clear_chat_btn])
|
| 453 |
+
|
| 454 |
+
clear_summary_btn.click(fn=lambda: set_button_loading("⏳ กำลังล้าง..."), outputs=[clear_summary_btn]) \
|
| 455 |
+
.then(fn=clear_all_summaries, outputs=[session_result]) \
|
| 456 |
+
.then(fn=lambda: reset_button("📝 ล้างสรุป", "secondary"), outputs=[clear_summary_btn])
|
| 457 |
+
|
| 458 |
+
return app
|
| 459 |
+
|
| 460 |
+
def main():
|
| 461 |
+
app = create_app()
|
| 462 |
+
# Resolve bind address and port
|
| 463 |
+
server_name = os.getenv("GRADIO_SERVER_NAME", "0.0.0.0")
|
| 464 |
+
server_port = int(os.getenv("PORT", os.getenv("GRADIO_SERVER_PORT", 8080)))
|
| 465 |
+
|
| 466 |
+
# Basic health log to confirm listening address
|
| 467 |
+
try:
|
| 468 |
+
hostname = socket.gethostname()
|
| 469 |
+
ip_addr = socket.gethostbyname(hostname)
|
| 470 |
+
except Exception:
|
| 471 |
+
hostname = "unknown"
|
| 472 |
+
ip_addr = "unknown"
|
| 473 |
+
|
| 474 |
+
logger.info(
|
| 475 |
+
"Starting Gradio app | bind=%s:%s | host=%s ip=%s",
|
| 476 |
+
server_name,
|
| 477 |
+
server_port,
|
| 478 |
+
hostname,
|
| 479 |
+
ip_addr,
|
| 480 |
+
)
|
| 481 |
+
logger.info(
|
| 482 |
+
"Env: PORT=%s GRADIO_SERVER_NAME=%s GRADIO_SERVER_PORT=%s",
|
| 483 |
+
os.getenv("PORT"),
|
| 484 |
+
os.getenv("GRADIO_SERVER_NAME"),
|
| 485 |
+
os.getenv("GRADIO_SERVER_PORT"),
|
| 486 |
+
)
|
| 487 |
+
# Secrets presence check (mask values)
|
| 488 |
+
def _mask(v: str | None) -> str:
|
| 489 |
+
if not v:
|
| 490 |
+
return "<missing>"
|
| 491 |
+
return f"set(len={len(v)})"
|
| 492 |
+
logger.info(
|
| 493 |
+
"Secrets: SEA_LION_API_KEY=%s GEMINI_API_KEY=%s",
|
| 494 |
+
_mask(os.getenv("SEA_LION_API_KEY")),
|
| 495 |
+
_mask(os.getenv("GEMINI_API_KEY")),
|
| 496 |
+
)
|
| 497 |
+
|
| 498 |
+
app.launch(
|
| 499 |
+
share=True,
|
| 500 |
+
server_name=server_name, # cloud: 0.0.0.0, local: 127.0.0.1
|
| 501 |
+
server_port=server_port, # cloud: $PORT, local: 7860/8080
|
| 502 |
+
debug=True,
|
| 503 |
+
show_error=True,
|
| 504 |
+
inbrowser=True,
|
| 505 |
+
)
|
| 506 |
+
|
| 507 |
+
if __name__ == "__main__":
|
| 508 |
+
main()
|
gui/simple_gui.py
ADDED
|
@@ -0,0 +1,156 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# simple_chat_app.py
|
| 2 |
+
import gradio as gr
|
| 3 |
+
from datetime import datetime
|
| 4 |
+
|
| 5 |
+
# Your existing manager
|
| 6 |
+
from kallam.app.chatbot_manager import ChatbotManager
|
| 7 |
+
|
| 8 |
+
mgr = ChatbotManager(log_level="INFO")
|
| 9 |
+
|
| 10 |
+
# -----------------------
|
| 11 |
+
# Core handlers
|
| 12 |
+
# -----------------------
|
| 13 |
+
def _session_status(session_id: str) -> str:
|
| 14 |
+
if not session_id:
|
| 15 |
+
return "🔴 No active session. Click **New Session**."
|
| 16 |
+
s = mgr.get_session(session_id) or {}
|
| 17 |
+
ts = s.get("timestamp", "N/A")
|
| 18 |
+
model = s.get("model_used", "N/A")
|
| 19 |
+
total = s.get("total_messages", 0)
|
| 20 |
+
return (
|
| 21 |
+
f"🟢 **Session:** `{session_id}` \n"
|
| 22 |
+
f"📅 Created: {ts} \n"
|
| 23 |
+
f"🤖 Model: {model} \n"
|
| 24 |
+
f"💬 Messages: {total}"
|
| 25 |
+
)
|
| 26 |
+
|
| 27 |
+
def start_new_session(saved_memories: str = ""):
|
| 28 |
+
"""Create a brand-new session and return clean UI state."""
|
| 29 |
+
sid = mgr.start_session(saved_memories=saved_memories or None)
|
| 30 |
+
status = _session_status(sid)
|
| 31 |
+
history = [] # gr.Chatbot(messages) shape: list[dict(role, content)]
|
| 32 |
+
return sid, history, "", status, "✅ New session created."
|
| 33 |
+
|
| 34 |
+
def send_message(user_msg: str, history: list, session_id: str):
|
| 35 |
+
"""Append user turn, get bot reply, and return updated history."""
|
| 36 |
+
if not session_id:
|
| 37 |
+
# Defensive: if somehow no session, auto-create one
|
| 38 |
+
sid, history, _, status, _ = start_new_session("")
|
| 39 |
+
history.append({"role": "assistant", "content": "New session spun up automatically. Proceed."})
|
| 40 |
+
return history, "", sid, status
|
| 41 |
+
|
| 42 |
+
if not user_msg.strip():
|
| 43 |
+
return history, "", session_id, _session_status(session_id)
|
| 44 |
+
|
| 45 |
+
# User turn
|
| 46 |
+
history = history + [{"role": "user", "content": user_msg}]
|
| 47 |
+
# Bot turn (your manager handles persistence)
|
| 48 |
+
bot = mgr.handle_message(session_id=session_id, user_message=user_msg)
|
| 49 |
+
history = history + [{"role": "assistant", "content": bot}]
|
| 50 |
+
|
| 51 |
+
return history, "", session_id, _session_status(session_id)
|
| 52 |
+
|
| 53 |
+
def force_summary(session_id: str):
|
| 54 |
+
if not session_id:
|
| 55 |
+
return "❌ No active session."
|
| 56 |
+
try:
|
| 57 |
+
s = mgr.summarize_session(session_id)
|
| 58 |
+
return f"📋 Summary updated:\n\n{s}"
|
| 59 |
+
except Exception as e:
|
| 60 |
+
return f"❌ Failed to summarize: {e}"
|
| 61 |
+
|
| 62 |
+
def lock_inputs():
|
| 63 |
+
# disable send + textbox
|
| 64 |
+
return gr.update(interactive=False), gr.update(interactive=False)
|
| 65 |
+
|
| 66 |
+
def unlock_inputs():
|
| 67 |
+
# re-enable send + textbox
|
| 68 |
+
return gr.update(interactive=True), gr.update(interactive=True)
|
| 69 |
+
|
| 70 |
+
# -----------------------
|
| 71 |
+
# Gradio app
|
| 72 |
+
# -----------------------
|
| 73 |
+
def create_app():
|
| 74 |
+
with gr.Blocks(title="Minimal Therapeutic Chat Sessions") as demo:
|
| 75 |
+
gr.Markdown("# Minimal Chat Sessions • clean and boring but try me.")
|
| 76 |
+
|
| 77 |
+
session_id = gr.State(value="")
|
| 78 |
+
with gr.Row():
|
| 79 |
+
status_md = gr.Markdown(value="🔄 Initializing session...")
|
| 80 |
+
saved_memories = gr.Textbox(
|
| 81 |
+
label="เนื้อหาเกี่ยวกับคุณ (optional)",
|
| 82 |
+
placeholder="กด ➕ session ใหม่เพื่อใช้งาน e.g., อายุ, เพศ, นิสัย",
|
| 83 |
+
lines=2,
|
| 84 |
+
)
|
| 85 |
+
new_btn = gr.Button("➕ session ใหม่", variant="primary")
|
| 86 |
+
# summarize_btn = gr.Button("📋 Summarize", variant="secondary")
|
| 87 |
+
|
| 88 |
+
chat = gr.Chatbot(label="Chat", type="messages", height=420)
|
| 89 |
+
with gr.Row():
|
| 90 |
+
msg = gr.Textbox(label="Message box", placeholder="พิมพ์ข้อความ", lines=1, scale=9)
|
| 91 |
+
send = gr.Button("↩", variant="primary", scale=1, min_width=40)
|
| 92 |
+
|
| 93 |
+
result_md = gr.Markdown(visible=True)
|
| 94 |
+
|
| 95 |
+
# -------- wiring --------
|
| 96 |
+
# On page load: create a fresh session
|
| 97 |
+
def _init():
|
| 98 |
+
sid, history, _, status, note = start_new_session("")
|
| 99 |
+
return sid, history, status, note
|
| 100 |
+
demo.load(_init, inputs=None, outputs=[session_id, chat, status_md, result_md])
|
| 101 |
+
|
| 102 |
+
# New session button
|
| 103 |
+
def _new(saved):
|
| 104 |
+
sid, history, _, status, note = start_new_session(saved or "")
|
| 105 |
+
return sid, history, "", status, note
|
| 106 |
+
new_btn.click(_new, inputs=[saved_memories], outputs=[session_id, chat, msg, status_md, result_md])
|
| 107 |
+
|
| 108 |
+
# Click flow: lock -> send -> unlock
|
| 109 |
+
send.click(
|
| 110 |
+
fn=lock_inputs,
|
| 111 |
+
inputs=None,
|
| 112 |
+
outputs=[send, msg],
|
| 113 |
+
queue=False, # lock applies instantly
|
| 114 |
+
).then(
|
| 115 |
+
fn=send_message,
|
| 116 |
+
inputs=[msg, chat, session_id],
|
| 117 |
+
outputs=[chat, msg, session_id, status_md],
|
| 118 |
+
).then(
|
| 119 |
+
fn=unlock_inputs,
|
| 120 |
+
inputs=None,
|
| 121 |
+
outputs=[send, msg],
|
| 122 |
+
queue=False,
|
| 123 |
+
)
|
| 124 |
+
|
| 125 |
+
# Enter/submit flow: same treatment
|
| 126 |
+
msg.submit(
|
| 127 |
+
fn=lock_inputs,
|
| 128 |
+
inputs=None,
|
| 129 |
+
outputs=[send, msg],
|
| 130 |
+
queue=False,
|
| 131 |
+
).then(
|
| 132 |
+
fn=send_message,
|
| 133 |
+
inputs=[msg, chat, session_id],
|
| 134 |
+
outputs=[chat, msg, session_id, status_md],
|
| 135 |
+
).then(
|
| 136 |
+
fn=unlock_inputs,
|
| 137 |
+
inputs=None,
|
| 138 |
+
outputs=[send, msg],
|
| 139 |
+
queue=False,
|
| 140 |
+
)
|
| 141 |
+
|
| 142 |
+
return demo
|
| 143 |
+
|
| 144 |
+
def main():
|
| 145 |
+
app = create_app()
|
| 146 |
+
app.launch(
|
| 147 |
+
share=True,
|
| 148 |
+
server_name="0.0.0.0",
|
| 149 |
+
server_port=7860,
|
| 150 |
+
debug=False,
|
| 151 |
+
show_error=True,
|
| 152 |
+
inbrowser=True,
|
| 153 |
+
)
|
| 154 |
+
|
| 155 |
+
if __name__ == "__main__":
|
| 156 |
+
main()
|
pyproject.toml
ADDED
|
@@ -0,0 +1,53 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[build-system]
|
| 2 |
+
requires = ["setuptools>=69", "wheel"]
|
| 3 |
+
build-backend = "setuptools.build_meta"
|
| 4 |
+
|
| 5 |
+
[project]
|
| 6 |
+
name = "kallam"
|
| 7 |
+
version = "0.1.1"
|
| 8 |
+
description = "KaLLaM: Multi-agent chatbot with orchestration, LLM evaluation, and SQLite persistence"
|
| 9 |
+
authors = [
|
| 10 |
+
{ name = "Koalar" }
|
| 11 |
+
]
|
| 12 |
+
readme = "README.md"
|
| 13 |
+
requires-python = ">=3.10"
|
| 14 |
+
|
| 15 |
+
dependencies = [
|
| 16 |
+
"python-dotenv>=1.0.1", # for load_dotenv
|
| 17 |
+
"strands-agents>=0.1.0", # strands Agent + BedrockModel
|
| 18 |
+
"strands-agents-tools",
|
| 19 |
+
"strands-agents-builder",
|
| 20 |
+
"strands-agents[openai]>=1.0.0",
|
| 21 |
+
"google-genai", # google cloud api
|
| 22 |
+
"openai>=1.40.0",
|
| 23 |
+
"boto3>=1.34.0", # AWS SDK (brings in botocore)
|
| 24 |
+
"numpy>=1.26.0", # numerical utils
|
| 25 |
+
"sentence-transformers>=2.6.0", # embeddings
|
| 26 |
+
"transformers>=4.40.0", # Hugging Face transformers
|
| 27 |
+
"gradio>=4.0.0", # UI
|
| 28 |
+
"pyngrok==7.3.0", # Hosting gradio interface
|
| 29 |
+
"matplotlib"
|
| 30 |
+
]
|
| 31 |
+
|
| 32 |
+
[project.optional-dependencies]
|
| 33 |
+
dev = [
|
| 34 |
+
"pytest>=7.0", # testing
|
| 35 |
+
"pytest-cov>=4.0", # coverage reports
|
| 36 |
+
"ruff>=0.4.0", # linting
|
| 37 |
+
"mypy>=1.10.0", # static typing
|
| 38 |
+
]
|
| 39 |
+
|
| 40 |
+
[tool.setuptools]
|
| 41 |
+
package-dir = {"" = "src"}
|
| 42 |
+
|
| 43 |
+
[tool.pytest.ini_options]
|
| 44 |
+
testpaths = ["tests"]
|
| 45 |
+
addopts = "-q"
|
| 46 |
+
|
| 47 |
+
[tool.ruff]
|
| 48 |
+
line-length = 100
|
| 49 |
+
target-version = "py310"
|
| 50 |
+
|
| 51 |
+
[tool.mypy]
|
| 52 |
+
python_version = "3.10"
|
| 53 |
+
strict = true
|
src/kallam.egg-info/PKG-INFO
ADDED
|
@@ -0,0 +1,174 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Metadata-Version: 2.4
|
| 2 |
+
Name: kallam
|
| 3 |
+
Version: 0.1.0
|
| 4 |
+
Summary: KaLLaM: Multi-agent chatbot with orchestration, LLM evaluation, and SQLite persistence
|
| 5 |
+
Author: Koalar
|
| 6 |
+
Requires-Python: >=3.10
|
| 7 |
+
Description-Content-Type: text/markdown
|
| 8 |
+
License-File: LICENSE
|
| 9 |
+
Requires-Dist: python-dotenv>=1.0.1
|
| 10 |
+
Requires-Dist: strands-agents>=0.1.0
|
| 11 |
+
Requires-Dist: strands-agents-tools
|
| 12 |
+
Requires-Dist: strands-agents-builder
|
| 13 |
+
Requires-Dist: strands-agents[openai]>=1.0.0
|
| 14 |
+
Requires-Dist: google-genai
|
| 15 |
+
Requires-Dist: openai>=1.40.0
|
| 16 |
+
Requires-Dist: boto3>=1.34.0
|
| 17 |
+
Requires-Dist: numpy>=1.26.0
|
| 18 |
+
Requires-Dist: sentence-transformers>=2.6.0
|
| 19 |
+
Requires-Dist: transformers>=4.40.0
|
| 20 |
+
Requires-Dist: gradio>=4.0.0
|
| 21 |
+
Requires-Dist: pyngrok==7.3.0
|
| 22 |
+
Requires-Dist: matplotlib
|
| 23 |
+
Provides-Extra: dev
|
| 24 |
+
Requires-Dist: pytest>=7.0; extra == "dev"
|
| 25 |
+
Requires-Dist: pytest-cov>=4.0; extra == "dev"
|
| 26 |
+
Requires-Dist: ruff>=0.4.0; extra == "dev"
|
| 27 |
+
Requires-Dist: mypy>=1.10.0; extra == "dev"
|
| 28 |
+
Dynamic: license-file
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
---
|
| 32 |
+
|
| 33 |
+
# KaLLaM – Motivational-Therapeutic Advisor
|
| 34 |
+
|
| 35 |
+
> **Note to future stupid self**: You will forget everything. This file exists so you don’t scream at your computer in six months. Read it first.
|
| 36 |
+
|
| 37 |
+
---
|
| 38 |
+
|
| 39 |
+
## 🚀 Quickstart
|
| 40 |
+
|
| 41 |
+
1. **Create venv**
|
| 42 |
+
Windows (PowerShell):
|
| 43 |
+
|
| 44 |
+
```powershell
|
| 45 |
+
python -m venv .venv
|
| 46 |
+
.venv\Scripts\Activate.ps1
|
| 47 |
+
```
|
| 48 |
+
|
| 49 |
+
Linux/macOS:
|
| 50 |
+
|
| 51 |
+
```bash
|
| 52 |
+
python -m venv .venv
|
| 53 |
+
source .venv/bin/activate
|
| 54 |
+
```
|
| 55 |
+
|
| 56 |
+
2. **Upgrade pip**
|
| 57 |
+
|
| 58 |
+
```bash
|
| 59 |
+
python -m pip install -U pip
|
| 60 |
+
```
|
| 61 |
+
|
| 62 |
+
3. **Install project (runtime only)**
|
| 63 |
+
|
| 64 |
+
```bash
|
| 65 |
+
pip install -e .
|
| 66 |
+
```
|
| 67 |
+
|
| 68 |
+
4. **Install project + dev tools (pytest, mypy, ruff)**
|
| 69 |
+
|
| 70 |
+
```bash
|
| 71 |
+
pip install -e .[dev]
|
| 72 |
+
```
|
| 73 |
+
|
| 74 |
+
5. **Run tests**
|
| 75 |
+
|
| 76 |
+
```bash
|
| 77 |
+
pytest -q
|
| 78 |
+
```
|
| 79 |
+
|
| 80 |
+
---
|
| 81 |
+
|
| 82 |
+
## 📂 Project layout (don’t mess this up)
|
| 83 |
+
|
| 84 |
+
```
|
| 85 |
+
project-root/
|
| 86 |
+
├─ pyproject.toml # dependencies & config (editable mode uses src/)
|
| 87 |
+
├─ README.md # you are here
|
| 88 |
+
├─ src/
|
| 89 |
+
│ └─ kallam/
|
| 90 |
+
│ ├─ __init__.py
|
| 91 |
+
│ ├─ app/ # orchestrator wiring, chatbot manager
|
| 92 |
+
│ ├─ domain/ # agents, judges, orchestrator logic
|
| 93 |
+
│ └─ infra/ # db, llm clients, search, token counter
|
| 94 |
+
└─ tests/ # pytest lives here
|
| 95 |
+
```
|
| 96 |
+
|
| 97 |
+
* `app/` = entrypoint, wires everything.
|
| 98 |
+
* `domain/` = core logic (agents, judges, orchestrator rules).
|
| 99 |
+
* `infra/` = all the boring adapters (DB, APIs, token counting).
|
| 100 |
+
* `tests/` = if you don’t write them, you’ll break everything and blame Python.
|
| 101 |
+
|
| 102 |
+
---
|
| 103 |
+
|
| 104 |
+
## 🔑 Environment variables
|
| 105 |
+
|
| 106 |
+
Put these in `.env` at project root:
|
| 107 |
+
|
| 108 |
+
```
|
| 109 |
+
OPENAI_API_KEY=sk-...
|
| 110 |
+
SEA_LION_API_KEY=...
|
| 111 |
+
AWS_ROLE_ARN=...
|
| 112 |
+
AWS_DEFAULT_REGION=ap-southeast-2
|
| 113 |
+
TAVILY_API_KEY=...
|
| 114 |
+
```
|
| 115 |
+
|
| 116 |
+
Load automatically via `python-dotenv`.
|
| 117 |
+
|
| 118 |
+
---
|
| 119 |
+
|
| 120 |
+
## 🧪 Common commands
|
| 121 |
+
|
| 122 |
+
* Run chatbot manager manually:
|
| 123 |
+
|
| 124 |
+
```bash
|
| 125 |
+
python -m kallam.app.chatbot_manager
|
| 126 |
+
```
|
| 127 |
+
|
| 128 |
+
* Run lint:
|
| 129 |
+
|
| 130 |
+
```bash
|
| 131 |
+
ruff check src tests
|
| 132 |
+
```
|
| 133 |
+
|
| 134 |
+
* Run type check:
|
| 135 |
+
|
| 136 |
+
```bash
|
| 137 |
+
mypy src
|
| 138 |
+
```
|
| 139 |
+
|
| 140 |
+
* Export a session JSON (example):
|
| 141 |
+
|
| 142 |
+
```python
|
| 143 |
+
from kallam.app.chatbot_manager import ChatbotManager
|
| 144 |
+
mgr = ChatbotManager()
|
| 145 |
+
sid = mgr.start_session()
|
| 146 |
+
mgr.handle_message(sid, "hello world")
|
| 147 |
+
mgr.export_session_json(sid)
|
| 148 |
+
```
|
| 149 |
+
|
| 150 |
+
---
|
| 151 |
+
|
| 152 |
+
## 🧹 Rules for survival
|
| 153 |
+
|
| 154 |
+
* Always activate `.venv` before coding.
|
| 155 |
+
* Never `pip install` globally, always `pip install -e .[dev]` inside venv.
|
| 156 |
+
* If imports fail → you forgot editable install. Run `pip install -e .` again.
|
| 157 |
+
* If SQLite locks up → delete `*.db` files and start fresh.
|
| 158 |
+
* If you break `pyproject.toml` → copy it from git history, don’t wing it.
|
| 159 |
+
|
| 160 |
+
---
|
| 161 |
+
|
| 162 |
+
## ☠️ Known pitfalls
|
| 163 |
+
|
| 164 |
+
* **Windows error “source not recognized”** → you’re not on Linux. Use `.venv\Scripts\Activate.ps1`.
|
| 165 |
+
* **“No module named kallam”** → you didn’t install with `-e`.
|
| 166 |
+
* **Tests can’t import your code** → run `pytest` from project root, not inside `tests/`.
|
| 167 |
+
* **Pip complains about `[dev]`** → you typo’d in `pyproject.toml`. Fix the `[project.optional-dependencies]` block.
|
| 168 |
+
|
| 169 |
+
---
|
| 170 |
+
|
| 171 |
+
That’s it. If you follow this, future you won’t rage-quit.
|
| 172 |
+
|
| 173 |
+
---
|
| 174 |
+
|
src/kallam.egg-info/SOURCES.txt
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
LICENSE
|
| 2 |
+
README.md
|
| 3 |
+
pyproject.toml
|
| 4 |
+
src/kallam/__init__.py
|
| 5 |
+
src/kallam.egg-info/PKG-INFO
|
| 6 |
+
src/kallam.egg-info/SOURCES.txt
|
| 7 |
+
src/kallam.egg-info/dependency_links.txt
|
| 8 |
+
src/kallam.egg-info/requires.txt
|
| 9 |
+
src/kallam.egg-info/top_level.txt
|
| 10 |
+
src/kallam/app/__init__.py
|
| 11 |
+
src/kallam/app/chatbot_manager.py
|
| 12 |
+
src/kallam/app/services/summarizer.py
|
| 13 |
+
src/kallam/domain/__init__.py
|
| 14 |
+
src/kallam/domain/agents/__init__.py
|
| 15 |
+
src/kallam/domain/agents/chatbot_prompt.py
|
| 16 |
+
src/kallam/domain/agents/doctor.py
|
| 17 |
+
src/kallam/domain/agents/orchestrator.py
|
| 18 |
+
src/kallam/domain/agents/psychologist.py
|
| 19 |
+
src/kallam/domain/agents/summarizer.py
|
| 20 |
+
src/kallam/domain/agents/supervisor.py
|
| 21 |
+
src/kallam/domain/agents/supervisor_bedrock.py
|
| 22 |
+
src/kallam/domain/agents/translator.py
|
| 23 |
+
src/kallam/domain/judges/__init__.py
|
| 24 |
+
src/kallam/infra/__init__.py
|
| 25 |
+
src/kallam/infra/config.py
|
| 26 |
+
src/kallam/infra/db.py
|
| 27 |
+
src/kallam/infra/exporter.py
|
| 28 |
+
src/kallam/infra/message_store.py
|
| 29 |
+
src/kallam/infra/session_store.py
|
| 30 |
+
src/kallam/infra/summary_store.py
|
| 31 |
+
src/kallam/infra/token_counter.py
|
| 32 |
+
src/kallam/interfaces/__init__.py
|
src/kallam.egg-info/dependency_links.txt
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
|
src/kallam.egg-info/requires.txt
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
python-dotenv>=1.0.1
|
| 2 |
+
strands-agents>=0.1.0
|
| 3 |
+
strands-agents-tools
|
| 4 |
+
strands-agents-builder
|
| 5 |
+
strands-agents[openai]>=1.0.0
|
| 6 |
+
google-genai
|
| 7 |
+
openai>=1.40.0
|
| 8 |
+
boto3>=1.34.0
|
| 9 |
+
numpy>=1.26.0
|
| 10 |
+
sentence-transformers>=2.6.0
|
| 11 |
+
transformers>=4.40.0
|
| 12 |
+
gradio>=4.0.0
|
| 13 |
+
pyngrok==7.3.0
|
| 14 |
+
matplotlib
|
| 15 |
+
|
| 16 |
+
[dev]
|
| 17 |
+
pytest>=7.0
|
| 18 |
+
pytest-cov>=4.0
|
| 19 |
+
ruff>=0.4.0
|
| 20 |
+
mypy>=1.10.0
|
src/kallam.egg-info/top_level.txt
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
kallam
|
src/kallam/__init__.py
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Top-level package for KaLLaM."""
|
| 2 |
+
|
| 3 |
+
file = __file__
|
| 4 |
+
|
src/kallam/__pycache__/__init__.cpython-311.pyc
ADDED
|
Binary file (252 Bytes). View file
|
|
|
src/kallam/app/__init__.py
ADDED
|
File without changes
|
src/kallam/app/__pycache__/__init__.cpython-311.pyc
ADDED
|
Binary file (190 Bytes). View file
|
|
|
src/kallam/app/__pycache__/chatbot_manager.cpython-311.pyc
ADDED
|
Binary file (26.1 kB). View file
|
|
|
src/kallam/app/chatbot_manager.py
ADDED
|
@@ -0,0 +1,412 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# src/your_pkg/app/chatbot_manager.py
|
| 2 |
+
from __future__ import annotations
|
| 3 |
+
|
| 4 |
+
import json
|
| 5 |
+
import logging
|
| 6 |
+
import os
|
| 7 |
+
import threading
|
| 8 |
+
import time
|
| 9 |
+
import uuid
|
| 10 |
+
from dataclasses import dataclass
|
| 11 |
+
from datetime import datetime, timedelta
|
| 12 |
+
from pathlib import Path
|
| 13 |
+
from typing import Optional, Dict, List, Any
|
| 14 |
+
from functools import wraps
|
| 15 |
+
from contextvars import ContextVar
|
| 16 |
+
|
| 17 |
+
# Keep your orchestrator import AS-IS to avoid ripples.
|
| 18 |
+
from kallam.domain.agents.orchestrator import Orchestrator
|
| 19 |
+
|
| 20 |
+
from kallam.infra.session_store import SessionStore
|
| 21 |
+
from kallam.infra.message_store import MessageStore
|
| 22 |
+
from kallam.infra.summary_store import SummaryStore
|
| 23 |
+
from kallam.infra.exporter import JsonExporter
|
| 24 |
+
from kallam.infra.token_counter import TokenCounter
|
| 25 |
+
from kallam.infra.db import sqlite_conn # for the cleanup method
|
| 26 |
+
|
| 27 |
+
# -----------------------------------------------------------------------------
|
| 28 |
+
# Logging setup (configurable)
|
| 29 |
+
# -----------------------------------------------------------------------------
|
| 30 |
+
|
| 31 |
+
_request_id: ContextVar[str] = ContextVar("_request_id", default="-")
|
| 32 |
+
|
| 33 |
+
class RequestIdFilter(logging.Filter):
|
| 34 |
+
def filter(self, record: logging.LogRecord) -> bool:
|
| 35 |
+
record.request_id = _request_id.get()
|
| 36 |
+
return True
|
| 37 |
+
|
| 38 |
+
def _setup_logging(level: Optional[str] = None, json_mode: bool = False, logger_name: str = "kallam.chatbot"):
|
| 39 |
+
lvl = (level or os.getenv("LOG_LEVEL", "INFO")).upper()
|
| 40 |
+
root = logging.getLogger()
|
| 41 |
+
# Avoid duplicate handlers if constructed multiple times in REPL/tests
|
| 42 |
+
if not any(isinstance(h, logging.StreamHandler) for h in root.handlers):
|
| 43 |
+
handler = logging.StreamHandler()
|
| 44 |
+
if json_mode or os.getenv("LOG_JSON", "0") in {"1", "true", "True"}:
|
| 45 |
+
fmt = '{"ts":"%(asctime)s","lvl":"%(levelname)s","logger":"%(name)s","req":"%(request_id)s","msg":"%(message)s"}'
|
| 46 |
+
else:
|
| 47 |
+
fmt = "%(asctime)s | %(levelname)-7s | %(name)s | req=%(request_id)s | %(message)s"
|
| 48 |
+
handler.setFormatter(logging.Formatter(fmt))
|
| 49 |
+
handler.addFilter(RequestIdFilter())
|
| 50 |
+
root.addHandler(handler)
|
| 51 |
+
root.setLevel(lvl)
|
| 52 |
+
return logging.getLogger(logger_name)
|
| 53 |
+
|
| 54 |
+
logger = _setup_logging() # default init; can be reconfigured via ChatbotManager args
|
| 55 |
+
|
| 56 |
+
def _with_trace(level: int = logging.INFO):
|
| 57 |
+
"""
|
| 58 |
+
Decorator to visualize call sequence with timing and exceptions.
|
| 59 |
+
Uses the same request_id context if already set, or creates one.
|
| 60 |
+
"""
|
| 61 |
+
def deco(fn):
|
| 62 |
+
@wraps(fn)
|
| 63 |
+
def wrapper(self, *args, **kwargs):
|
| 64 |
+
rid = _request_id.get()
|
| 65 |
+
created_here = False
|
| 66 |
+
if rid == "-" and fn.__name__ in {"handle_message", "start_session"}:
|
| 67 |
+
rid = uuid.uuid4().hex[:8]
|
| 68 |
+
_request_id.set(rid)
|
| 69 |
+
created_here = True
|
| 70 |
+
logger.log(level, f"→ {fn.__name__}")
|
| 71 |
+
t0 = time.time()
|
| 72 |
+
try:
|
| 73 |
+
out = fn(self, *args, **kwargs)
|
| 74 |
+
dt = int((time.time() - t0) * 1000)
|
| 75 |
+
logger.log(level, f"← {fn.__name__} done in {dt} ms")
|
| 76 |
+
return out
|
| 77 |
+
except Exception:
|
| 78 |
+
logger.exception(f"✖ {fn.__name__} failed")
|
| 79 |
+
raise
|
| 80 |
+
finally:
|
| 81 |
+
# Reset the request id when we originated it here
|
| 82 |
+
if created_here:
|
| 83 |
+
_request_id.set("-")
|
| 84 |
+
return wrapper
|
| 85 |
+
return deco
|
| 86 |
+
|
| 87 |
+
|
| 88 |
+
EXPORT_FOLDER = "exported_sessions"
|
| 89 |
+
|
| 90 |
+
@dataclass
|
| 91 |
+
class SessionStats:
|
| 92 |
+
message_count: int = 0
|
| 93 |
+
total_tokens_in: int = 0
|
| 94 |
+
total_tokens_out: int = 0
|
| 95 |
+
avg_latency: float = 0.0
|
| 96 |
+
first_message: Optional[str] = None
|
| 97 |
+
last_message: Optional[str] = None
|
| 98 |
+
|
| 99 |
+
|
| 100 |
+
class ChatbotManager:
|
| 101 |
+
"""
|
| 102 |
+
Backward-compatible facade. Same constructor and methods as your original class.
|
| 103 |
+
Under the hood we delegate to infra stores and the orchestrator.
|
| 104 |
+
"""
|
| 105 |
+
|
| 106 |
+
def __init__(self,
|
| 107 |
+
db_path: str = "chatbot_data.db",
|
| 108 |
+
summarize_every_n_messages: int = 10,
|
| 109 |
+
message_limit: int = 10,
|
| 110 |
+
sunmmary_limit: int = 20,
|
| 111 |
+
chain_of_thoughts_limit: int = 5,
|
| 112 |
+
# logging knobs
|
| 113 |
+
log_level: Optional[str] = None,
|
| 114 |
+
log_json: bool = False,
|
| 115 |
+
log_name: str = "kallam.chatbot",
|
| 116 |
+
trace_level: int = logging.INFO):
|
| 117 |
+
if summarize_every_n_messages <= 0:
|
| 118 |
+
raise ValueError("summarize_every_n_messages must be positive")
|
| 119 |
+
if message_limit <= 0:
|
| 120 |
+
raise ValueError("message_limit must be positive")
|
| 121 |
+
|
| 122 |
+
# Reconfigure logger per instance if caller wants
|
| 123 |
+
global logger
|
| 124 |
+
logger = _setup_logging(level=log_level, json_mode=log_json, logger_name=log_name)
|
| 125 |
+
self._trace_level = trace_level
|
| 126 |
+
|
| 127 |
+
self.orchestrator = Orchestrator()
|
| 128 |
+
self.sum_every_n = summarize_every_n_messages
|
| 129 |
+
self.message_limit = message_limit
|
| 130 |
+
self.summary_limit = sunmmary_limit
|
| 131 |
+
self.chain_of_thoughts_limit = chain_of_thoughts_limit
|
| 132 |
+
self.db_path = Path(db_path)
|
| 133 |
+
self.lock = threading.RLock()
|
| 134 |
+
self.tokens = TokenCounter(capacity=1000)
|
| 135 |
+
|
| 136 |
+
# wire infra
|
| 137 |
+
db_url = f"sqlite:///{self.db_path}"
|
| 138 |
+
self.sessions = SessionStore(db_url)
|
| 139 |
+
self.messages = MessageStore(db_url)
|
| 140 |
+
self.summaries = SummaryStore(db_url)
|
| 141 |
+
self.exporter = JsonExporter(db_url, out_dir=EXPORT_FOLDER)
|
| 142 |
+
|
| 143 |
+
# ensure schema exists
|
| 144 |
+
self._ensure_schema()
|
| 145 |
+
|
| 146 |
+
logger.info(f"ChatbotManager initialized with database: {self.db_path}")
|
| 147 |
+
|
| 148 |
+
# ---------- schema bootstrap ----------
|
| 149 |
+
@_with_trace()
|
| 150 |
+
def _ensure_schema(self) -> None:
|
| 151 |
+
ddl_sessions = """
|
| 152 |
+
CREATE TABLE IF NOT EXISTS sessions (
|
| 153 |
+
session_id TEXT PRIMARY KEY,
|
| 154 |
+
timestamp TEXT NOT NULL,
|
| 155 |
+
last_activity TEXT NOT NULL,
|
| 156 |
+
saved_memories TEXT,
|
| 157 |
+
total_messages INTEGER DEFAULT 0,
|
| 158 |
+
total_user_messages INTEGER DEFAULT 0,
|
| 159 |
+
total_assistant_messages INTEGER DEFAULT 0,
|
| 160 |
+
total_summaries INTEGER DEFAULT 0,
|
| 161 |
+
created_at TEXT DEFAULT CURRENT_TIMESTAMP,
|
| 162 |
+
is_active BOOLEAN DEFAULT 1
|
| 163 |
+
);
|
| 164 |
+
"""
|
| 165 |
+
ddl_messages = """
|
| 166 |
+
CREATE TABLE IF NOT EXISTS messages (
|
| 167 |
+
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
| 168 |
+
session_id TEXT NOT NULL,
|
| 169 |
+
message_id TEXT UNIQUE NOT NULL,
|
| 170 |
+
timestamp TEXT NOT NULL,
|
| 171 |
+
role TEXT NOT NULL CHECK(role IN ('user','assistant','system')),
|
| 172 |
+
content TEXT NOT NULL,
|
| 173 |
+
translated_content TEXT,
|
| 174 |
+
chain_of_thoughts TEXT,
|
| 175 |
+
tokens_input INTEGER DEFAULT 0,
|
| 176 |
+
tokens_output INTEGER DEFAULT 0,
|
| 177 |
+
latency_ms INTEGER,
|
| 178 |
+
flags TEXT,
|
| 179 |
+
created_at TEXT DEFAULT CURRENT_TIMESTAMP,
|
| 180 |
+
FOREIGN KEY(session_id) REFERENCES sessions(session_id) ON DELETE CASCADE
|
| 181 |
+
);
|
| 182 |
+
"""
|
| 183 |
+
ddl_summaries = """
|
| 184 |
+
CREATE TABLE IF NOT EXISTS summaries (
|
| 185 |
+
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
| 186 |
+
session_id TEXT NOT NULL,
|
| 187 |
+
timestamp TEXT NOT NULL,
|
| 188 |
+
summary TEXT NOT NULL,
|
| 189 |
+
created_at TEXT DEFAULT CURRENT_TIMESTAMP,
|
| 190 |
+
FOREIGN KEY(session_id) REFERENCES sessions(session_id) ON DELETE CASCADE
|
| 191 |
+
);
|
| 192 |
+
"""
|
| 193 |
+
idx = [
|
| 194 |
+
"CREATE INDEX IF NOT EXISTS idx_messages_session_id ON messages(session_id)",
|
| 195 |
+
"CREATE INDEX IF NOT EXISTS idx_messages_timestamp ON messages(timestamp DESC)",
|
| 196 |
+
"CREATE INDEX IF NOT EXISTS idx_sessions_last_activity ON sessions(last_activity DESC)",
|
| 197 |
+
"CREATE INDEX IF NOT EXISTS idx_summaries_session_id ON summaries(session_id)",
|
| 198 |
+
"CREATE INDEX IF NOT EXISTS idx_messages_role ON messages(role)"
|
| 199 |
+
]
|
| 200 |
+
with sqlite_conn(str(self.db_path)) as c:
|
| 201 |
+
c.execute(ddl_sessions)
|
| 202 |
+
c.execute(ddl_messages)
|
| 203 |
+
c.execute(ddl_summaries)
|
| 204 |
+
for q in idx:
|
| 205 |
+
c.execute(q)
|
| 206 |
+
|
| 207 |
+
# ---------- validation/util ----------
|
| 208 |
+
def _validate_inputs(self, **kwargs):
|
| 209 |
+
validators = {
|
| 210 |
+
'user_message': lambda x: bool(x and str(x).strip()),
|
| 211 |
+
'session_id': lambda x: bool(x),
|
| 212 |
+
'role': lambda x: x in ('user', 'assistant', 'system'),
|
| 213 |
+
}
|
| 214 |
+
for k, v in kwargs.items():
|
| 215 |
+
fn = validators.get(k)
|
| 216 |
+
if fn and not fn(v):
|
| 217 |
+
raise ValueError(f"Invalid {k}: {v}")
|
| 218 |
+
|
| 219 |
+
# ---------- public API ----------
|
| 220 |
+
@_with_trace()
|
| 221 |
+
def start_session(self, saved_memories: Optional[str] = None) -> str:
|
| 222 |
+
sid = self.sessions.create(saved_memories=saved_memories)
|
| 223 |
+
logger.debug(f"start_session: saved_memories_len={len(saved_memories or '')} session_id={sid}")
|
| 224 |
+
return sid
|
| 225 |
+
|
| 226 |
+
@_with_trace()
|
| 227 |
+
def get_session(self, session_id: str) -> Optional[Dict[str, Any]]:
|
| 228 |
+
return self.sessions.get_meta(session_id)
|
| 229 |
+
|
| 230 |
+
@_with_trace()
|
| 231 |
+
def list_sessions(self, active_only: bool = True, limit: int = 50) -> List[Dict[str, Any]]:
|
| 232 |
+
return self.sessions.list(active_only=active_only, limit=limit)
|
| 233 |
+
|
| 234 |
+
@_with_trace()
|
| 235 |
+
def close_session(self, session_id: str) -> bool:
|
| 236 |
+
return self.sessions.close(session_id)
|
| 237 |
+
|
| 238 |
+
@_with_trace()
|
| 239 |
+
def delete_session(self, session_id: str) -> bool:
|
| 240 |
+
return self.sessions.delete(session_id)
|
| 241 |
+
|
| 242 |
+
@_with_trace()
|
| 243 |
+
def cleanup_old_sessions(self, days_old: int = 30) -> int:
|
| 244 |
+
if days_old <= 0:
|
| 245 |
+
raise ValueError("days_old must be positive")
|
| 246 |
+
cutoff = (datetime.now() - timedelta(days=days_old)).isoformat()
|
| 247 |
+
return self.sessions.cleanup_before(cutoff)
|
| 248 |
+
|
| 249 |
+
@_with_trace()
|
| 250 |
+
def handle_message(self, session_id: str, user_message: str) -> str:
|
| 251 |
+
# Ensure one correlation id per request flow
|
| 252 |
+
if _request_id.get() == "-":
|
| 253 |
+
_request_id.set(uuid.uuid4().hex[:8])
|
| 254 |
+
|
| 255 |
+
self._validate_inputs(session_id=session_id, user_message=user_message)
|
| 256 |
+
with self.lock:
|
| 257 |
+
t0 = time.time()
|
| 258 |
+
|
| 259 |
+
# ensure session exists
|
| 260 |
+
if not self.get_session(session_id):
|
| 261 |
+
raise ValueError(f"Session {session_id} not found")
|
| 262 |
+
|
| 263 |
+
# fetch context
|
| 264 |
+
original_history = self.messages.get_original_history(session_id, limit=self.message_limit)
|
| 265 |
+
eng_history = self.messages.get_translated_history(session_id, limit=self.message_limit)
|
| 266 |
+
eng_summaries = self.summaries.list(session_id, limit=self.summary_limit)
|
| 267 |
+
chain = self.messages.get_reasoning_traces(session_id, limit=self.chain_of_thoughts_limit)
|
| 268 |
+
|
| 269 |
+
meta = self.sessions.get_meta(session_id) or {}
|
| 270 |
+
memory_context = (meta.get("saved_memories") or "") if isinstance(meta, dict) else ""
|
| 271 |
+
|
| 272 |
+
if logger.isEnabledFor(logging.DEBUG):
|
| 273 |
+
logger.debug(
|
| 274 |
+
"context pulled: history=%d summaries=%d chain=%d mem_len=%d",
|
| 275 |
+
len(eng_history or []), len(eng_summaries or []), len(chain or []), len(memory_context),
|
| 276 |
+
)
|
| 277 |
+
|
| 278 |
+
# flags and translation
|
| 279 |
+
flags = self._get_flags_dict(session_id, user_message)
|
| 280 |
+
if logger.isEnabledFor(logging.DEBUG):
|
| 281 |
+
# keep flags concise if large
|
| 282 |
+
short_flags = {k: (v if isinstance(v, (int, float, bool, str)) else "…") for k, v in (flags or {}).items()}
|
| 283 |
+
logger.debug(f"flags: {short_flags}")
|
| 284 |
+
|
| 285 |
+
eng_msg = self.orchestrator.get_translation(
|
| 286 |
+
message=user_message, flags=flags, translation_type="forward"
|
| 287 |
+
)
|
| 288 |
+
|
| 289 |
+
# respond
|
| 290 |
+
response_commentary = self.orchestrator.get_commented_response(
|
| 291 |
+
original_history=original_history,
|
| 292 |
+
original_message=user_message,
|
| 293 |
+
eng_history=eng_history,
|
| 294 |
+
eng_message=eng_msg,
|
| 295 |
+
flags=flags,
|
| 296 |
+
chain_of_thoughts=chain,
|
| 297 |
+
memory_context=memory_context, # type: ignore
|
| 298 |
+
summarized_histories=eng_summaries,
|
| 299 |
+
)
|
| 300 |
+
|
| 301 |
+
bot_message = response_commentary["final_output"]
|
| 302 |
+
bot_eng = self.orchestrator.get_translation(
|
| 303 |
+
message=bot_message, flags=flags, translation_type="forward"
|
| 304 |
+
)
|
| 305 |
+
latency_ms = int((time.time() - t0) * 1000)
|
| 306 |
+
|
| 307 |
+
# persist
|
| 308 |
+
tok_user = self.tokens.count(user_message)
|
| 309 |
+
tok_bot = self.tokens.count(bot_message)
|
| 310 |
+
|
| 311 |
+
self.messages.append_user(session_id, content=user_message,
|
| 312 |
+
translated=eng_msg, flags=flags, tokens_in=tok_user)
|
| 313 |
+
self.messages.append_assistant(session_id, content=bot_message,
|
| 314 |
+
translated=bot_eng, reasoning=response_commentary,
|
| 315 |
+
tokens_out=tok_bot)
|
| 316 |
+
|
| 317 |
+
if logger.isEnabledFor(logging.DEBUG):
|
| 318 |
+
logger.debug(
|
| 319 |
+
"persisted: tokens_in=%d tokens_out=%d latency_ms=%d", tok_user, tok_bot, latency_ms
|
| 320 |
+
)
|
| 321 |
+
|
| 322 |
+
# summarize checkpoint
|
| 323 |
+
meta = self.sessions.get_meta(session_id)
|
| 324 |
+
if meta and (meta["total_user_messages"] % self.sum_every_n == 0) and (meta["total_user_messages"] > 0):
|
| 325 |
+
logger.log(self._trace_level, f"checkpoint: summarizing session {session_id}")
|
| 326 |
+
self.summarize_session(session_id)
|
| 327 |
+
|
| 328 |
+
return bot_message
|
| 329 |
+
|
| 330 |
+
@_with_trace()
|
| 331 |
+
def _get_flags_dict(self, session_id: str, user_message: str) -> Dict[str, Any]:
|
| 332 |
+
self._validate_inputs(session_id=session_id)
|
| 333 |
+
try:
|
| 334 |
+
# Build context Supervisor expects
|
| 335 |
+
chat_history = self.messages.get_translated_history(session_id, limit=self.message_limit) or []
|
| 336 |
+
summaries = self.summaries.list(session_id, limit=self.message_limit) or []
|
| 337 |
+
meta = self.sessions.get_meta(session_id) or {}
|
| 338 |
+
memory_context = (meta.get("saved_memories") or "") if isinstance(meta, dict) else ""
|
| 339 |
+
|
| 340 |
+
flags = self.orchestrator.get_flags_from_supervisor(
|
| 341 |
+
chat_history=chat_history,
|
| 342 |
+
user_message=user_message,
|
| 343 |
+
memory_context=memory_context,
|
| 344 |
+
summarized_histories=summaries
|
| 345 |
+
)
|
| 346 |
+
return flags
|
| 347 |
+
except Exception as e:
|
| 348 |
+
logger.warning(f"Failed to get flags from supervisor: {e}, using safe defaults")
|
| 349 |
+
# Safe defaults keep the pipeline alive
|
| 350 |
+
return {"language": "english", "doctor": False, "psychologist": False}
|
| 351 |
+
|
| 352 |
+
@_with_trace()
|
| 353 |
+
def summarize_session(self, session_id: str) -> str:
|
| 354 |
+
eng_history = self.messages.get_translated_history(session_id, limit=self.message_limit)
|
| 355 |
+
if not eng_history:
|
| 356 |
+
raise ValueError("No chat history found for session")
|
| 357 |
+
eng_summaries = self.summaries.list(session_id)
|
| 358 |
+
eng_summary = self.orchestrator.summarize_history(
|
| 359 |
+
chat_history=eng_history, eng_summaries=eng_summaries
|
| 360 |
+
)
|
| 361 |
+
self.summaries.add(session_id, eng_summary) # type: ignore
|
| 362 |
+
logger.debug("summary_len=%d total_summaries=%d", len(eng_summary or ""), len(eng_summaries or []) + 1)
|
| 363 |
+
return eng_summary # type: ignore
|
| 364 |
+
|
| 365 |
+
@_with_trace()
|
| 366 |
+
def get_session_stats(self, session_id: str) -> dict:
|
| 367 |
+
stats, session = self.messages.aggregate_stats(session_id)
|
| 368 |
+
stats_dict = {
|
| 369 |
+
"message_count": stats.get("message_count") or 0,
|
| 370 |
+
"total_tokens_in": stats.get("total_tokens_in") or 0,
|
| 371 |
+
"total_tokens_out": stats.get("total_tokens_out") or 0,
|
| 372 |
+
"avg_latency": float(stats.get("avg_latency") or 0),
|
| 373 |
+
"first_message": stats.get("first_message"),
|
| 374 |
+
"last_message": stats.get("last_message"),
|
| 375 |
+
}
|
| 376 |
+
logger.debug("stats: %s", stats_dict)
|
| 377 |
+
return {
|
| 378 |
+
"session_info": session, # already a dict from MessageStore
|
| 379 |
+
"stats": stats_dict, # plain dict
|
| 380 |
+
}
|
| 381 |
+
|
| 382 |
+
@_with_trace()
|
| 383 |
+
def get_original_chat_history(self, session_id: str, limit: int | None = None) -> list[dict]:
|
| 384 |
+
self._validate_inputs(session_id=session_id)
|
| 385 |
+
if limit is None:
|
| 386 |
+
limit = self.message_limit
|
| 387 |
+
|
| 388 |
+
# Fallback: direct query
|
| 389 |
+
with sqlite_conn(str(self.db_path)) as c:
|
| 390 |
+
rows = c.execute(
|
| 391 |
+
"""
|
| 392 |
+
SELECT role, content, timestamp
|
| 393 |
+
FROM messages
|
| 394 |
+
WHERE session_id = ?
|
| 395 |
+
ORDER BY id ASC
|
| 396 |
+
LIMIT ?
|
| 397 |
+
""",
|
| 398 |
+
(session_id, limit),
|
| 399 |
+
).fetchall()
|
| 400 |
+
return [dict(r) for r in rows]
|
| 401 |
+
|
| 402 |
+
@_with_trace()
|
| 403 |
+
def export_session_json(self, session_id: str) -> str:
|
| 404 |
+
path = self.exporter.export_session_json(session_id)
|
| 405 |
+
logger.info(f"exported session to {path}")
|
| 406 |
+
return path
|
| 407 |
+
|
| 408 |
+
@_with_trace()
|
| 409 |
+
def export_all_sessions_json(self) -> str:
|
| 410 |
+
path = self.exporter.export_all_sessions_json()
|
| 411 |
+
logger.info(f"exported session to {path}")
|
| 412 |
+
return path
|
src/kallam/domain/__init__.py
ADDED
|
File without changes
|
src/kallam/domain/__pycache__/__init__.cpython-311.pyc
ADDED
|
Binary file (193 Bytes). View file
|
|
|
src/kallam/domain/agents/__init__.py
ADDED
|
File without changes
|
src/kallam/domain/agents/__pycache__/Single_agent.cpython-311.pyc
ADDED
|
Binary file (32.4 kB). View file
|
|
|
src/kallam/domain/agents/__pycache__/__init__.cpython-311.pyc
ADDED
|
Binary file (200 Bytes). View file
|
|
|
src/kallam/domain/agents/__pycache__/doctor.cpython-311.pyc
ADDED
|
Binary file (12.5 kB). View file
|
|
|
src/kallam/domain/agents/__pycache__/orchestrator.cpython-311.pyc
ADDED
|
Binary file (12.1 kB). View file
|
|
|
src/kallam/domain/agents/__pycache__/psychologist.cpython-311.pyc
ADDED
|
Binary file (40.3 kB). View file
|
|
|
src/kallam/domain/agents/__pycache__/summarizer.cpython-311.pyc
ADDED
|
Binary file (8.83 kB). View file
|
|
|
src/kallam/domain/agents/__pycache__/supervisor.cpython-311.pyc
ADDED
|
Binary file (27.1 kB). View file
|
|
|
src/kallam/domain/agents/__pycache__/translator.cpython-311.pyc
ADDED
|
Binary file (13 kB). View file
|
|
|
src/kallam/domain/agents/doctor.py
ADDED
|
@@ -0,0 +1,215 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import logging
|
| 3 |
+
import requests
|
| 4 |
+
import re
|
| 5 |
+
from google import genai
|
| 6 |
+
from pathlib import Path
|
| 7 |
+
from datetime import datetime
|
| 8 |
+
from typing import List, Literal, Dict, Any, Optional
|
| 9 |
+
|
| 10 |
+
from dotenv import load_dotenv
|
| 11 |
+
load_dotenv()
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
class DoctorAgent:
|
| 15 |
+
SeverityLevel = Literal["low", "moderate", "high", "emergency"]
|
| 16 |
+
RecommendationType = Literal["self_care", "consult_gp", "urgent_care", "emergency"]
|
| 17 |
+
|
| 18 |
+
def __init__(self, log_level: int = logging.INFO):
|
| 19 |
+
self._setup_logging(log_level)
|
| 20 |
+
self._setup_api_clients()
|
| 21 |
+
|
| 22 |
+
self.logger.info("Doctor Agent initialized successfully with Gemini API")
|
| 23 |
+
|
| 24 |
+
self.system_prompt = """
|
| 25 |
+
**Your Role:**
|
| 26 |
+
You are an expert doctor assisting medical personnel. You provide helpful medical information and guidance while being extremely careful about medical advice.
|
| 27 |
+
|
| 28 |
+
**Core Rules:**
|
| 29 |
+
- Recommend consulting a healthcare professional for serious cases
|
| 30 |
+
- Advice specific diagnosis based on the context with variable confidence on each if there is any
|
| 31 |
+
- Keep your advice very concise and use medical keywords as it will be use only for advice for a expert medical personnel
|
| 32 |
+
- You only response based on the provided JSON format
|
| 33 |
+
|
| 34 |
+
**Specific Task:**
|
| 35 |
+
- Assess symptom severity and provide appropriate recommendations
|
| 36 |
+
- Offer first aid guidance for emergency situations
|
| 37 |
+
|
| 38 |
+
**Response Guidelines:**
|
| 39 |
+
- Recommend clarifying questions when needed
|
| 40 |
+
- Use clear, actionable for guidance
|
| 41 |
+
- Include appropriate medical disclaimers
|
| 42 |
+
- Use structured assessment approach
|
| 43 |
+
- Respond in the user's preferred language when specified
|
| 44 |
+
- Do not provide any reasons to your "Diagnosis" confidence
|
| 45 |
+
|
| 46 |
+
**Output Format in JSON:**
|
| 47 |
+
{"Recommendations": [one or two most priority recommendation or note for medical personnel]
|
| 48 |
+
"Diagnosis 0-10 with Confidence": {[Disease | Symptom]: [Confidence 0-10]}
|
| 49 |
+
"Doctor Plan": [short plan for your future self and medical personnel]}
|
| 50 |
+
"""
|
| 51 |
+
|
| 52 |
+
def _setup_logging(self, log_level: int) -> None:
|
| 53 |
+
log_dir = Path("logs")
|
| 54 |
+
log_dir.mkdir(exist_ok=True)
|
| 55 |
+
self.logger = logging.getLogger(f"{__name__}.DoctorAgent")
|
| 56 |
+
self.logger.setLevel(log_level)
|
| 57 |
+
if self.logger.handlers:
|
| 58 |
+
self.logger.handlers.clear()
|
| 59 |
+
file_handler = logging.FileHandler(
|
| 60 |
+
log_dir / f"doctor_{datetime.now().strftime('%Y%m%d')}.log",
|
| 61 |
+
encoding='utf-8'
|
| 62 |
+
)
|
| 63 |
+
file_handler.setLevel(logging.DEBUG)
|
| 64 |
+
console_handler = logging.StreamHandler()
|
| 65 |
+
console_handler.setLevel(log_level)
|
| 66 |
+
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
|
| 67 |
+
file_handler.setFormatter(formatter)
|
| 68 |
+
console_handler.setFormatter(formatter)
|
| 69 |
+
self.logger.addHandler(file_handler)
|
| 70 |
+
self.logger.addHandler(console_handler)
|
| 71 |
+
|
| 72 |
+
def _setup_api_clients(self) -> None:
|
| 73 |
+
# Initialize Gemini if available; otherwise, degrade gracefully
|
| 74 |
+
raw_gemini_key = os.getenv("GEMINI_API_KEY", "")
|
| 75 |
+
self.gemini_api_key = raw_gemini_key.strip()
|
| 76 |
+
if raw_gemini_key and not self.gemini_api_key:
|
| 77 |
+
self.logger.warning("GEMINI_API_KEY contained only whitespace after stripping")
|
| 78 |
+
if self.gemini_api_key:
|
| 79 |
+
try:
|
| 80 |
+
self.gemini_client = genai.Client(api_key=self.gemini_api_key)
|
| 81 |
+
self.gemini_model_name = "gemini-2.5-flash-lite"
|
| 82 |
+
self.gemini_enabled = True
|
| 83 |
+
self.logger.info(f"Gemini API client initialized with model: {self.gemini_model_name}")
|
| 84 |
+
except Exception as e:
|
| 85 |
+
self.gemini_enabled = False
|
| 86 |
+
self.logger.error(
|
| 87 |
+
"Failed to initialize Gemini client (%s): %s",
|
| 88 |
+
e.__class__.__name__,
|
| 89 |
+
str(e),
|
| 90 |
+
)
|
| 91 |
+
else:
|
| 92 |
+
self.gemini_enabled = False
|
| 93 |
+
self.logger.warning("GEMINI_API_KEY not set. DoctorAgent will return fallback responses.")
|
| 94 |
+
|
| 95 |
+
def _format_prompt_gemini(self, message_context: str, medical_context: str = "") -> str:
|
| 96 |
+
now = datetime.now()
|
| 97 |
+
current_context = f"""
|
| 98 |
+
**Current Context:**
|
| 99 |
+
- Date/Time: {now.strftime("%Y-%m-%d %H:%M:%S")}
|
| 100 |
+
- Medical Context: {medical_context}
|
| 101 |
+
"""
|
| 102 |
+
|
| 103 |
+
prompt = f"""{self.system_prompt}
|
| 104 |
+
{current_context}
|
| 105 |
+
**Patient Query:** {message_context}
|
| 106 |
+
|
| 107 |
+
Please provide your medical guidance following the guidelines above."""
|
| 108 |
+
|
| 109 |
+
return prompt
|
| 110 |
+
|
| 111 |
+
def _generate_response_gemini(self, prompt: str) -> str:
|
| 112 |
+
# If Gemini unavailable, return a safe fallback immediately
|
| 113 |
+
if not getattr(self, "gemini_enabled", False):
|
| 114 |
+
return "ขออภัยค่ะ ขณะนี้ไม่สามารถให้คำแนะนำทางการแพทย์เชิงลึกได้ กรุณาลองใหม่อีกครั้งหรือปรึกษาผู���เชี่ยวชาญค่ะ"
|
| 115 |
+
|
| 116 |
+
try:
|
| 117 |
+
self.logger.debug(f"Sending prompt to Gemini API (length: {len(prompt)} chars)")
|
| 118 |
+
|
| 119 |
+
response = self.gemini_client.models.generate_content(
|
| 120 |
+
model=self.gemini_model_name,
|
| 121 |
+
contents=[prompt],
|
| 122 |
+
)
|
| 123 |
+
|
| 124 |
+
response_text = response.text
|
| 125 |
+
|
| 126 |
+
if response_text is None or (isinstance(response_text, str) and response_text.strip() == ""):
|
| 127 |
+
self.logger.error("Gemini API returned None or empty content")
|
| 128 |
+
return "ขออภัยค่ะ ไม่สามารถสร้างคำตอบได้ในขณะนี้"
|
| 129 |
+
|
| 130 |
+
# Extract answer block if present
|
| 131 |
+
answer_match = re.search(r"```answer\s*(.*?)\s*```", response_text, re.DOTALL)
|
| 132 |
+
commentary = answer_match.group(1).strip() if answer_match else response_text.strip()
|
| 133 |
+
|
| 134 |
+
self.logger.info(f"Generated medical response - Commentary: {len(commentary)} chars")
|
| 135 |
+
return commentary
|
| 136 |
+
|
| 137 |
+
except Exception as e:
|
| 138 |
+
self.logger.error("Error generating Gemini response (%s): %s", e.__class__.__name__, str(e))
|
| 139 |
+
return "ขออภัยค่ะ เกิดปัญหาในการเชื่อมต่อ กรุณาลองใหม่อีกครั้งค่ะ"
|
| 140 |
+
|
| 141 |
+
def analyze(self, user_message: str, chat_history: List[Dict], chain_of_thoughts: str = "", summarized_histories: str = "") -> str:
|
| 142 |
+
"""
|
| 143 |
+
Main analyze method expected by orchestrator.
|
| 144 |
+
Returns a single commentary string.
|
| 145 |
+
"""
|
| 146 |
+
context_parts = []
|
| 147 |
+
if summarized_histories:
|
| 148 |
+
context_parts.append(f"Patient History Summary: {summarized_histories}")
|
| 149 |
+
if chain_of_thoughts:
|
| 150 |
+
context_parts.append(f"Your Previous Recommendations: {chain_of_thoughts}")
|
| 151 |
+
|
| 152 |
+
recent_context = []
|
| 153 |
+
for msg in chat_history[-3:]:
|
| 154 |
+
if msg.get("role") == "user":
|
| 155 |
+
recent_context.append(f"Patient: {msg.get('content', '')}")
|
| 156 |
+
elif msg.get("role") == "assistant":
|
| 157 |
+
recent_context.append(f"Medical Personnel: {msg.get('content', '')}")
|
| 158 |
+
if recent_context:
|
| 159 |
+
context_parts.append("Recent Conversation:\n" + "\n".join(recent_context))
|
| 160 |
+
|
| 161 |
+
full_context = "\n\n".join(context_parts) if context_parts else ""
|
| 162 |
+
|
| 163 |
+
message_context = f"""
|
| 164 |
+
|
| 165 |
+
Current Patient Message: {user_message}
|
| 166 |
+
Available Context:
|
| 167 |
+
{full_context if full_context else "No previous context available"}
|
| 168 |
+
"""
|
| 169 |
+
|
| 170 |
+
prompt = self._format_prompt_gemini(message_context=message_context)
|
| 171 |
+
print(prompt)
|
| 172 |
+
return self._generate_response_gemini(prompt)
|
| 173 |
+
|
| 174 |
+
|
| 175 |
+
if __name__ == "__main__":
|
| 176 |
+
# Minimal reproducible demo for DoctorAgent using existing analyze() method
|
| 177 |
+
|
| 178 |
+
# 1) Create the agent
|
| 179 |
+
try:
|
| 180 |
+
doctor = DoctorAgent(log_level=logging.DEBUG)
|
| 181 |
+
except Exception as e:
|
| 182 |
+
print(f"[BOOT ERROR] Unable to start DoctorAgent: {e}")
|
| 183 |
+
raise SystemExit(1)
|
| 184 |
+
|
| 185 |
+
# 2) Dummy chat history (what the user and assistant said earlier)
|
| 186 |
+
chat_history = [
|
| 187 |
+
{"role": "user", "content": "Hi, I've been having some stomach issues lately."},
|
| 188 |
+
{"role": "assistant", "content": "I'm sorry to hear about your stomach issues. Can you tell me more about the symptoms?"}
|
| 189 |
+
]
|
| 190 |
+
|
| 191 |
+
# 3) Chain of thoughts from previous analysis
|
| 192 |
+
chain_of_thoughts = "Patient reports digestive issues, need to assess severity and duration."
|
| 193 |
+
|
| 194 |
+
# 4) Summarized patient history
|
| 195 |
+
summarized_histories = "Previous sessions: Patient is 25 y/o, works in high-stress environment, irregular eating habits, drinks 3-4 cups of coffee daily."
|
| 196 |
+
|
| 197 |
+
# 5) Current user message about medical concern
|
| 198 |
+
user_message = "I've been having sharp stomach pains after eating, and I feel nauseous. It's been going on for about a week now."
|
| 199 |
+
|
| 200 |
+
# ===== Test: Medical Analysis =====
|
| 201 |
+
print("\n=== DOCTOR AGENT TEST ===")
|
| 202 |
+
print(f"User Message: {user_message}")
|
| 203 |
+
print(f"Chat History Length: {len(chat_history)}")
|
| 204 |
+
print(f"Context: {summarized_histories}")
|
| 205 |
+
print("\n=== MEDICAL ANALYSIS RESULT ===")
|
| 206 |
+
|
| 207 |
+
medical_response = doctor.analyze(
|
| 208 |
+
user_message=user_message,
|
| 209 |
+
chat_history=chat_history,
|
| 210 |
+
chain_of_thoughts=chain_of_thoughts,
|
| 211 |
+
summarized_histories=summarized_histories
|
| 212 |
+
)
|
| 213 |
+
|
| 214 |
+
print(medical_response)
|
| 215 |
+
print("\n=== TEST COMPLETED ===")
|
src/kallam/domain/agents/orchestrator.py
ADDED
|
@@ -0,0 +1,206 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
import logging
|
| 3 |
+
from typing import Optional, Dict, Any, List
|
| 4 |
+
from functools import wraps
|
| 5 |
+
import time
|
| 6 |
+
|
| 7 |
+
# Load agents
|
| 8 |
+
from .supervisor import SupervisorAgent
|
| 9 |
+
from .summarizer import SummarizerAgent
|
| 10 |
+
from .translator import TranslatorAgent
|
| 11 |
+
from .doctor import DoctorAgent
|
| 12 |
+
from .psychologist import PsychologistAgent
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
def _trace(level: int = logging.INFO):
|
| 16 |
+
"""Lightweight enter/exit trace with timing; relies on parent handlers/format."""
|
| 17 |
+
def deco(fn):
|
| 18 |
+
@wraps(fn)
|
| 19 |
+
def wrapper(self, *args, **kwargs):
|
| 20 |
+
self.logger.log(level, f"→ {fn.__name__}")
|
| 21 |
+
t0 = time.time()
|
| 22 |
+
try:
|
| 23 |
+
out = fn(self, *args, **kwargs)
|
| 24 |
+
dt = int((time.time() - t0) * 1000)
|
| 25 |
+
self.logger.log(level, f"← {fn.__name__} done in {dt} ms")
|
| 26 |
+
return out
|
| 27 |
+
except Exception:
|
| 28 |
+
self.logger.exception(f"✖ {fn.__name__} failed")
|
| 29 |
+
raise
|
| 30 |
+
return wrapper
|
| 31 |
+
return deco
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
class Orchestrator:
|
| 35 |
+
# ----------------------------------------------------------------------------------------------
|
| 36 |
+
# Initialization
|
| 37 |
+
|
| 38 |
+
def __init__(self, log_level: int | None = None, logger_name: str = "kallam.chatbot.orchestrator"):
|
| 39 |
+
"""
|
| 40 |
+
log_level: if provided, sets this logger's level; otherwise inherit from parent.
|
| 41 |
+
logger_name: child logger under the manager's logger hierarchy.
|
| 42 |
+
"""
|
| 43 |
+
self._setup_logging(log_level, logger_name)
|
| 44 |
+
|
| 45 |
+
# Initialize available agents
|
| 46 |
+
self.supervisor = SupervisorAgent()
|
| 47 |
+
self.summarizer = SummarizerAgent()
|
| 48 |
+
self.translator = TranslatorAgent()
|
| 49 |
+
self.doctor = DoctorAgent()
|
| 50 |
+
self.psychologist = PsychologistAgent()
|
| 51 |
+
|
| 52 |
+
# Optional config (model names, thresholds, etc.)
|
| 53 |
+
self.config = {
|
| 54 |
+
"default_model": "gpt-4o",
|
| 55 |
+
"translation_model": "gpt-4o",
|
| 56 |
+
"summarization_model": "gpt-4o",
|
| 57 |
+
"doctor_model": "gpt-4o",
|
| 58 |
+
"psychologist_model": "gpt-4o",
|
| 59 |
+
"similarity_threshold": 0.75,
|
| 60 |
+
"supported_languages": {"thai", "english"},
|
| 61 |
+
"agents_language": "english"
|
| 62 |
+
}
|
| 63 |
+
|
| 64 |
+
eff = logging.getLevelName(self.logger.getEffectiveLevel())
|
| 65 |
+
self.logger.info(f"KaLLaM agents manager initialized. Effective log level: {eff}")
|
| 66 |
+
|
| 67 |
+
def _setup_logging(self, log_level: int | None, logger_name: str) -> None:
|
| 68 |
+
"""
|
| 69 |
+
Use a child logger so we inherit handlers/formatters/filters (incl. request_id)
|
| 70 |
+
from the ChatbotManager root logger setup. We do NOT add handlers here.
|
| 71 |
+
"""
|
| 72 |
+
self.logger = logging.getLogger(logger_name)
|
| 73 |
+
# Inherit manager's handlers
|
| 74 |
+
self.logger.propagate = True
|
| 75 |
+
# If caller explicitly sets a level, respect it; otherwise leave unset so it inherits.
|
| 76 |
+
if log_level is not None:
|
| 77 |
+
self.logger.setLevel(log_level)
|
| 78 |
+
|
| 79 |
+
# ----------------------------------------------------------------------------------------------
|
| 80 |
+
# Supervisor & Routing
|
| 81 |
+
@_trace()
|
| 82 |
+
def get_flags_from_supervisor(
|
| 83 |
+
self,
|
| 84 |
+
chat_history: Optional[List[Dict[str, str]]] = None,
|
| 85 |
+
user_message: str = "",
|
| 86 |
+
memory_context: Optional[str] = "",
|
| 87 |
+
summarized_histories: Optional[List] = None
|
| 88 |
+
) -> Dict[str, Any]:
|
| 89 |
+
self.logger.info("Getting flags from SupervisorAgent")
|
| 90 |
+
chat_history = chat_history or []
|
| 91 |
+
summarized_histories = summarized_histories or []
|
| 92 |
+
memory_context = memory_context or ""
|
| 93 |
+
|
| 94 |
+
string_flags = self.supervisor.generate_feedback(
|
| 95 |
+
chat_history=chat_history,
|
| 96 |
+
user_message=user_message,
|
| 97 |
+
memory_context=memory_context,
|
| 98 |
+
task="flag",
|
| 99 |
+
summarized_histories=summarized_histories
|
| 100 |
+
)
|
| 101 |
+
dict_flags = json.loads(string_flags)
|
| 102 |
+
self.logger.debug(f"Supervisor flags: {dict_flags}")
|
| 103 |
+
return dict_flags
|
| 104 |
+
|
| 105 |
+
@_trace()
|
| 106 |
+
def get_translation(self, message: str, flags: dict, translation_type: str) -> str:
|
| 107 |
+
"""Translate message based on flags and translation type."""
|
| 108 |
+
try:
|
| 109 |
+
source_lang = flags.get("language")
|
| 110 |
+
default_target = self.config["agents_language"]
|
| 111 |
+
supported_langs = self.config["supported_languages"]
|
| 112 |
+
|
| 113 |
+
if translation_type not in {"forward", "backward"}:
|
| 114 |
+
raise ValueError(f"Invalid translation type: {translation_type}. Allowed: 'forward', 'backward'")
|
| 115 |
+
|
| 116 |
+
if source_lang is None:
|
| 117 |
+
self.logger.debug("No translation flag set, using original message")
|
| 118 |
+
return message
|
| 119 |
+
|
| 120 |
+
if source_lang not in supported_langs:
|
| 121 |
+
supported = ", ".join(f"'{lang}'" for lang in supported_langs)
|
| 122 |
+
raise ValueError(f"Invalid translate flag: '{source_lang}'. Supported: {supported}")
|
| 123 |
+
|
| 124 |
+
if translation_type == "forward":
|
| 125 |
+
target_lang = default_target if source_lang != default_target else source_lang
|
| 126 |
+
needs_translation = source_lang != default_target
|
| 127 |
+
else:
|
| 128 |
+
target_lang = source_lang if source_lang != default_target else default_target
|
| 129 |
+
needs_translation = source_lang != default_target
|
| 130 |
+
|
| 131 |
+
if needs_translation:
|
| 132 |
+
self.logger.debug(f"Translating {translation_type}: '{source_lang}' -> '{target_lang}'")
|
| 133 |
+
return self.translator.get_translation(message=message, target=target_lang)
|
| 134 |
+
else:
|
| 135 |
+
self.logger.debug(f"Source '{source_lang}' same as target, using original message")
|
| 136 |
+
return message
|
| 137 |
+
|
| 138 |
+
except ValueError:
|
| 139 |
+
raise
|
| 140 |
+
except Exception as e:
|
| 141 |
+
self.logger.error(f"Unexpected error in translation: {e}", exc_info=True)
|
| 142 |
+
raise RuntimeError(
|
| 143 |
+
"เกิดข้อผิดพลาดขณะแปล โปรดตรวจสอบให้แน่ใจว่าคุณใช้ภาษาที่รองรับ แล้วลองอีกครั้ง\n"
|
| 144 |
+
"An error occurred while translating. Please make sure you are using a supported language and try again."
|
| 145 |
+
)
|
| 146 |
+
|
| 147 |
+
@_trace()
|
| 148 |
+
def get_commented_response(self,
|
| 149 |
+
original_history: List[Dict[str, str]],
|
| 150 |
+
original_message: str,
|
| 151 |
+
eng_history: List[Dict[str, str]],
|
| 152 |
+
eng_message: str,
|
| 153 |
+
flags: Dict[str, Any],
|
| 154 |
+
chain_of_thoughts: List[Dict[str, str]],
|
| 155 |
+
memory_context: Optional[Dict[str, Any]],
|
| 156 |
+
summarized_histories: List[Dict[str, str]]) -> Dict[str, Any]:
|
| 157 |
+
|
| 158 |
+
self.logger.info(f"Routing message: {eng_message} | Flags: {flags}")
|
| 159 |
+
|
| 160 |
+
commentary = {}
|
| 161 |
+
|
| 162 |
+
if flags.get("doctor"): # Dummy for now
|
| 163 |
+
self.logger.debug("Activating DoctorAgent")
|
| 164 |
+
commentary["doctor"] = self.doctor.analyze(
|
| 165 |
+
eng_message, eng_history, json.dumps(chain_of_thoughts, ensure_ascii=False), json.dumps(summarized_histories, ensure_ascii=False)
|
| 166 |
+
)
|
| 167 |
+
|
| 168 |
+
if flags.get("psychologist"): # Dummy for now
|
| 169 |
+
self.logger.debug("Activating PsychologistAgent")
|
| 170 |
+
commentary["psychologist"] = self.psychologist.analyze(
|
| 171 |
+
original_message, original_history, json.dumps(chain_of_thoughts, ensure_ascii=False), json.dumps(summarized_histories, ensure_ascii=False)
|
| 172 |
+
)
|
| 173 |
+
|
| 174 |
+
commentary["final_output"] = self.supervisor.generate_feedback(
|
| 175 |
+
chat_history=original_history,
|
| 176 |
+
user_message=original_message,
|
| 177 |
+
memory_context=json.dumps(memory_context) if memory_context else "",
|
| 178 |
+
task="finalize",
|
| 179 |
+
summarized_histories=summarized_histories,
|
| 180 |
+
commentary=commentary,
|
| 181 |
+
)
|
| 182 |
+
self.logger.info("Routing complete. Returning results.")
|
| 183 |
+
return commentary
|
| 184 |
+
|
| 185 |
+
def _merge_outputs(self, outputs: dict) -> str:
|
| 186 |
+
"""
|
| 187 |
+
Merge multiple agent responses into a single final string.
|
| 188 |
+
For now: concatenate. Later: ranking or weighting logic.
|
| 189 |
+
"""
|
| 190 |
+
final = []
|
| 191 |
+
for agent, out in outputs.items():
|
| 192 |
+
if agent != "final_output":
|
| 193 |
+
final.append(f"[{agent.upper()}]: {out}")
|
| 194 |
+
return "\n".join(final)
|
| 195 |
+
|
| 196 |
+
@_trace()
|
| 197 |
+
def summarize_history(self,
|
| 198 |
+
chat_history: List[Dict[str, str]],
|
| 199 |
+
eng_summaries: List[Dict[str, str]]) -> Optional[str]:
|
| 200 |
+
try:
|
| 201 |
+
summary = self.summarizer.summarize(chat_history, eng_summaries)
|
| 202 |
+
self.logger.info("Summarization complete.")
|
| 203 |
+
except Exception as e:
|
| 204 |
+
self.logger.error(f"Error during summarization: {e}", exc_info=True)
|
| 205 |
+
summary = "Error during summarization."
|
| 206 |
+
return str(summary)
|
src/kallam/domain/agents/psychologist.py
ADDED
|
@@ -0,0 +1,620 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import json
|
| 3 |
+
import logging
|
| 4 |
+
import requests
|
| 5 |
+
import re
|
| 6 |
+
from google import genai
|
| 7 |
+
from pathlib import Path
|
| 8 |
+
from datetime import datetime
|
| 9 |
+
from typing import List, Literal, Dict, Any, Optional, Tuple
|
| 10 |
+
|
| 11 |
+
from dotenv import load_dotenv
|
| 12 |
+
load_dotenv()
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
from kallam.infrastructure.sea_lion import load_sea_lion_settings, fingerprint_secret
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
class PsychologistAgent:
|
| 19 |
+
TherapyApproach = Literal["cbt", "dbt", "act", "motivational", "solution_focused", "mindfulness"]
|
| 20 |
+
CrisisLevel = Literal["none", "mild", "moderate", "severe", "emergency"]
|
| 21 |
+
|
| 22 |
+
def __init__(self, log_level: int = logging.INFO):
|
| 23 |
+
self._setup_logging(log_level)
|
| 24 |
+
self._setup_api_clients()
|
| 25 |
+
self.logger.info(f"PsychologistAgent initialized successfully - Thai->SEA-Lion, English->Gemini")
|
| 26 |
+
|
| 27 |
+
def _setup_logging(self, log_level: int) -> None:
|
| 28 |
+
"""Setup logging configuration"""
|
| 29 |
+
log_dir = Path("logs")
|
| 30 |
+
log_dir.mkdir(exist_ok=True)
|
| 31 |
+
|
| 32 |
+
self.logger = logging.getLogger(f"{__name__}.PsychologistAgent")
|
| 33 |
+
self.logger.setLevel(log_level)
|
| 34 |
+
|
| 35 |
+
if self.logger.handlers:
|
| 36 |
+
self.logger.handlers.clear()
|
| 37 |
+
|
| 38 |
+
file_handler = logging.FileHandler(
|
| 39 |
+
log_dir / f"psychologist_{datetime.now().strftime('%Y%m%d')}.log",
|
| 40 |
+
encoding='utf-8'
|
| 41 |
+
)
|
| 42 |
+
file_handler.setLevel(logging.DEBUG)
|
| 43 |
+
|
| 44 |
+
console_handler = logging.StreamHandler()
|
| 45 |
+
console_handler.setLevel(log_level)
|
| 46 |
+
|
| 47 |
+
formatter = logging.Formatter(
|
| 48 |
+
'%(asctime)s - %(name)s - %(levelname)s - %(message)s'
|
| 49 |
+
)
|
| 50 |
+
file_handler.setFormatter(formatter)
|
| 51 |
+
console_handler.setFormatter(formatter)
|
| 52 |
+
|
| 53 |
+
self.logger.addHandler(file_handler)
|
| 54 |
+
self.logger.addHandler(console_handler)
|
| 55 |
+
|
| 56 |
+
def _setup_api_clients(self) -> None:
|
| 57 |
+
"""Setup both API clients with graceful degradation."""
|
| 58 |
+
# SEA-Lion API (Thai)
|
| 59 |
+
sea_settings = load_sea_lion_settings(default_model="aisingapore/Gemma-SEA-LION-v4-27B-IT")
|
| 60 |
+
self.sea_lion_base_url = sea_settings.base_url
|
| 61 |
+
self.sea_lion_token = sea_settings.token
|
| 62 |
+
self.sea_lion_model = sea_settings.model
|
| 63 |
+
self.sea_mode = sea_settings.mode
|
| 64 |
+
self.sea_enabled = self.sea_lion_token is not None
|
| 65 |
+
|
| 66 |
+
if self.sea_enabled:
|
| 67 |
+
self.logger.info(
|
| 68 |
+
"SEA-Lion API client initialized (mode=%s, base_url=%s, model=%s, token_fp=%s)",
|
| 69 |
+
self.sea_mode,
|
| 70 |
+
self.sea_lion_base_url,
|
| 71 |
+
self.sea_lion_model,
|
| 72 |
+
fingerprint_secret(self.sea_lion_token),
|
| 73 |
+
)
|
| 74 |
+
else:
|
| 75 |
+
self.logger.warning("SEA_LION credentials not set. Thai responses will use fallbacks.")
|
| 76 |
+
|
| 77 |
+
# Gemini API (English)
|
| 78 |
+
self.gemini_api_key = os.getenv("GEMINI_API_KEY")
|
| 79 |
+
if self.gemini_api_key:
|
| 80 |
+
try:
|
| 81 |
+
self.gemini_client = genai.Client(api_key=self.gemini_api_key)
|
| 82 |
+
self.gemini_model_name = "gemini-2.5-flash-lite"
|
| 83 |
+
self.gemini_enabled = True
|
| 84 |
+
self.logger.info(f"Gemini API client initialized with model: {self.gemini_model_name}")
|
| 85 |
+
except Exception as e:
|
| 86 |
+
self.gemini_enabled = False
|
| 87 |
+
self.logger.error(f"Failed to initialize Gemini client: {str(e)}")
|
| 88 |
+
else:
|
| 89 |
+
self.gemini_enabled = False
|
| 90 |
+
self.logger.warning("GEMINI_API_KEY not set. English responses will use fallbacks.")
|
| 91 |
+
|
| 92 |
+
##############USE PROMPT ENGINERING LATER#################
|
| 93 |
+
def _detect_language(self, text: str) -> str:
|
| 94 |
+
"""
|
| 95 |
+
Detect if the text is primarily Thai or English
|
| 96 |
+
"""
|
| 97 |
+
# Count Thai characters (Unicode range for Thai)
|
| 98 |
+
thai_chars = len(re.findall(r'[\u0E00-\u0E7F]', text))
|
| 99 |
+
# Count English characters (basic Latin letters)
|
| 100 |
+
english_chars = len(re.findall(r'[a-zA-Z]', text))
|
| 101 |
+
|
| 102 |
+
total_chars = thai_chars + english_chars
|
| 103 |
+
|
| 104 |
+
if total_chars == 0:
|
| 105 |
+
# Default to English if no detectable characters
|
| 106 |
+
return 'english'
|
| 107 |
+
|
| 108 |
+
thai_ratio = thai_chars / total_chars
|
| 109 |
+
|
| 110 |
+
# If more than 10% Thai characters, consider it Thai
|
| 111 |
+
if thai_ratio > 0.1:
|
| 112 |
+
detected = 'thai'
|
| 113 |
+
else:
|
| 114 |
+
detected = 'english'
|
| 115 |
+
|
| 116 |
+
self.logger.debug(f"Language detection: {detected} (Thai: {thai_chars}, English: {english_chars}, Ratio: {thai_ratio:.2f})")
|
| 117 |
+
return detected
|
| 118 |
+
##############USE PROMPT ENGINERING LATER#################
|
| 119 |
+
|
| 120 |
+
# ===== SEA-LION BLOCK (THAI) =====
|
| 121 |
+
def _get_sealion_prompt(self) -> str:
|
| 122 |
+
"""Thai therapeutic prompt for SEA-Lion"""
|
| 123 |
+
return """
|
| 124 |
+
**บทบาทของคุณ:**
|
| 125 |
+
คุณเป็น “ที่ปรึกษาทางจิตวิทยาเชิง MI” สำหรับบุคลากรแพทย์
|
| 126 |
+
เป้าหมายของคุณคือให้คำแนะนำในการตอบสนองของผู้สื่อสารทางการแพทย์เพื่อเพิ่มประสิทธิภาพในการสนทนาของผู้สื่อสารทางการแพทย์และผู้ใช้งาน
|
| 127 |
+
|
| 128 |
+
**หลักการบังคับใช้:**
|
| 129 |
+
- ยึด MI Spirit: Collaboration, Evocation, Autonomy
|
| 130 |
+
- ใช้ OARS: Open questions, Affirmations, Reflections, Summaries
|
| 131 |
+
- ตั้งเป้าอัตราส่วน Reflection:Question ≈ 2:1 เมื่อเป็นไปได้
|
| 132 |
+
- ถ้าข้อมูลไม่พอ: ขอเก็บข้อมูลด้วยคำถามปลายเปิด 1–2 ข้อ
|
| 133 |
+
- ใช้การตอบที่สั้นกระชับใจความ
|
| 134 |
+
- ใช้ภาษาไทยเท่านั้น (ยกเว้นคำทับศัพย์เช่น Motivational Interview)
|
| 135 |
+
- คุณให้คำแนะนำสำหรับการสื่อสารแบบ Motivational Interviewing (MI) บุคลาการทางการแพทย์เพื่อใช้ในการวินิจฉัยและรักษาอาการทางจิต
|
| 136 |
+
- ในกรณีฉุกเฉิน (ความคิดฆ่าตัวตาย การทำร้ายตนเอง โรคจิต) แนะนำให้แสวงหาความช่วยเหลือจากผู้เชี่ยวชาญฉุกเฉิน
|
| 137 |
+
- คุณตอบในรูปแบบของ JSON เท่านั้น
|
| 138 |
+
|
| 139 |
+
**คู่มือ Motivational Interview (MI):**
|
| 140 |
+
1.จิตวิญญาณของ MI (MI Spirit)
|
| 141 |
+
- ความร่วมมือ (Collaboration) → บุคลากรทางการแพทย์ทำงานแบบหุ้นส่วน ไม่ใช่สั่งการหรือตำหนิ
|
| 142 |
+
- การกระตุ้น (Evocation) → กระตุ้นให้ผู้ป่วยพูดถึงเหตุผลและแรงจูงใจของตนเอง
|
| 143 |
+
- การเคารพสิทธิ์การตัดสินใจ (Autonomy) → ผู้ป่วยเป็นผู้เลือกแนวทาง ไม่ถูกบังคับหรือกดดัน
|
| 144 |
+
2.ทักษะหลัก OARS
|
| 145 |
+
- คำถามปลายเปิด (Open questions) → ใช้คำถามที่ชวนให้ขยายความ ไม่ใช่แค่ตอบ “ใช่/ไม่ใช่”
|
| 146 |
+
- การยืนยัน (Affirmations) → ชื่นชมจุดแข็ง ความตั้งใจ และความพยายามของผู้ป่วย
|
| 147 |
+
- การสะท้อน (Reflections) → ทวนสิ่งที่ผู้ป่วยพูด ทั้งแบบสั้น ๆ และเชิงลึก เพื่อแสดงความเข้าใจ
|
| 148 |
+
- การสรุป (Summaries) → ทบทวนประเด็นหลักเป็นระยะ เพื่อสร้างความชัดเจนและตอกย้ำความก้าวหน้า
|
| 149 |
+
3.การแยก Change Talk กับ Sustain Talk
|
| 150 |
+
- Change Talk: เมื่อผู้ป่วยพูดถึงความปรารถนา ความสามารถ เหตุผล หรือความมุ่งมั่นในการเปลี่ยนแปลง → ผู้ให้บริการควรจับประเด็นและเสริมแรง
|
| 151 |
+
- Sustain Talk: เมื่อผู้ป่วยแสดงความลังเลหรือคงเดิม → ควรสะท้อนอย่างเป็นกลางหรือจัดกรอบใหม่ ไม่เถียงหรือบังคับ
|
| 152 |
+
4.สไตล์การสื่อสาร
|
| 153 |
+
- โทนเสียง (Tone) → ใช้น้ำเสียงเข้าใจ อ่อนโยน ไม่ตัดสิน
|
| 154 |
+
- การให้ข้อมูล (Information-giving) → ให้ข้อมูลด้วยการขออนุญาตก่อน เช่น “คุณอยากฟังคำแนะนำทางการแพทย์ที่มักใช้กันไหม”
|
| 155 |
+
- การควบคุมจังหวะ (Pacing) → เปิดพื้นที่ให้ผู้ป่วยพูด ฟังอย่างเพียงพอ ไม่เร่งรีบหรือสรุปแทนเร็วเกินไป
|
| 156 |
+
5.มุมมองเชิงเทคนิค (กร��บการ Coding เช่น MISC/AnnoMI)
|
| 157 |
+
- ใช้เทคนิคในอัตราส่วน Reflections : Questions ให้มากกว่า 1:1
|
| 158 |
+
- ในการใช้ Reflections ควรใช้ Complex Reflexion เช่น งานทำให้ชั้นเหนื่อย -> ความเครียดในการทำงานอาจทำให้คุณรู้สึกไม่เป็นตัวเอง
|
| 159 |
+
- ใช้คำถามที่กระตุ้นความคิดเพื่อเชิญชวนให้มีการเปลี่ยนแปลง สร้างความมั่นใจ และตั้งใจ (ทำไมคุณถึงอยากทำแบบนั้น... อะไรทำให้สิ่งนี้สำคัญในตอนนี้)
|
| 160 |
+
- คำตอบจะถูกให้คะแนนตามอัตราส่วน R/Q, %คำถามเปิด, %ของความคิดเห็นที่ซับซ้อน และความสอดคล้องของ MI เพิ่มประสิทธิภาพผลลัพธ์เพื่อให้ได้ค่าสูงสุดตามเกณฑ์เหล่านี้ โดยยังคงรักษาการสนทนาให้เป็นธรรมชาติ
|
| 161 |
+
|
| 162 |
+
**โครงสร้างที่ใช้ในการตอบ (JSON):**
|
| 163 |
+
{
|
| 164 |
+
"อารมณ์ของผู้ใช้ 1-10": {[อารมณ์ของผู้ใช้]: [ความเข้มของอารมณ์]}
|
| 165 |
+
"เทคนิคกับความน่าใช้ 1-10": {[เทคนิคอ้างอิงจากคู่มือที่ผู้สื่อสารควรใช้]: [ความน่าใช้]}
|
| 166 |
+
"สไตล์การสื่อสาร": [สไตล์การสื่อสารที่ควรใช้]
|
| 167 |
+
"ตัวอย่าง": [ตัวอย่างการใช้เทคนิคโดยย่อ]
|
| 168 |
+
"แผนการสนทนา": [แผนการดำเนินการสนทนาต่อไปโดยย่อ]
|
| 169 |
+
}
|
| 170 |
+
"""
|
| 171 |
+
|
| 172 |
+
def _format_messages_sealion(self, user_message: str, therapeutic_context: str = "") -> List[Dict[str, str]]:
|
| 173 |
+
"""Format messages for SEA-Lion API (Thai)"""
|
| 174 |
+
now = datetime.now()
|
| 175 |
+
|
| 176 |
+
context_info = f"""
|
| 177 |
+
**บริบทปัจจุบัน:**
|
| 178 |
+
- วันที่/เวลา: {now.strftime("%Y-%m-%d %H:%M:%S")}
|
| 179 |
+
- คำแนะนำของคุณก่อนหน้า: {therapeutic_context}
|
| 180 |
+
"""
|
| 181 |
+
|
| 182 |
+
system_message = {
|
| 183 |
+
"role": "system",
|
| 184 |
+
"content": f"{self._get_sealion_prompt()}\n\n{context_info}"
|
| 185 |
+
}
|
| 186 |
+
|
| 187 |
+
user_message_formatted = {
|
| 188 |
+
"role": "user",
|
| 189 |
+
"content": user_message
|
| 190 |
+
}
|
| 191 |
+
|
| 192 |
+
return [system_message, user_message_formatted]
|
| 193 |
+
|
| 194 |
+
def _generate_response_sealion(self, messages: List[Dict[str, str]]) -> str:
|
| 195 |
+
"""Generate response using SEA-Lion API for Thai"""
|
| 196 |
+
if not getattr(self, "sea_enabled", False):
|
| 197 |
+
return "ขออภัยค่ะ ขณะนี้ไม่สามารถให้คำแนะนำด้านจิตวิทยาเชิงลึกได้ กรุณาลองใหม่อีกครั้งค่ะ"
|
| 198 |
+
try:
|
| 199 |
+
self.logger.debug(f"Sending {len(messages)} messages to SEA-Lion API")
|
| 200 |
+
|
| 201 |
+
headers = {
|
| 202 |
+
"Content-Type": "application/json"
|
| 203 |
+
}
|
| 204 |
+
if self.sea_lion_token:
|
| 205 |
+
headers["Authorization"] = f"Bearer {self.sea_lion_token}"
|
| 206 |
+
|
| 207 |
+
payload = {
|
| 208 |
+
"model": self.sea_lion_model,
|
| 209 |
+
"messages": messages,
|
| 210 |
+
"chat_template_kwargs": {
|
| 211 |
+
"thinking_mode": "on"
|
| 212 |
+
},
|
| 213 |
+
"max_tokens": 2000,
|
| 214 |
+
"temperature": 0,
|
| 215 |
+
"top_p": 1.0,
|
| 216 |
+
"frequency_penalty": 0.1,
|
| 217 |
+
"presence_penalty": 0.1
|
| 218 |
+
}
|
| 219 |
+
|
| 220 |
+
response = requests.post(
|
| 221 |
+
f"{self.sea_lion_base_url}/chat/completions",
|
| 222 |
+
headers=headers,
|
| 223 |
+
json=payload,
|
| 224 |
+
timeout=90
|
| 225 |
+
)
|
| 226 |
+
|
| 227 |
+
response.raise_for_status()
|
| 228 |
+
response_data = response.json()
|
| 229 |
+
|
| 230 |
+
if "choices" not in response_data or len(response_data["choices"]) == 0:
|
| 231 |
+
self.logger.error(f"Unexpected response structure: {response_data}")
|
| 232 |
+
return "ขออภัยค่ะ ไม่สามารถประมวลผลคำตอบได้ในขณะนี้"
|
| 233 |
+
|
| 234 |
+
choice = response_data["choices"][0]
|
| 235 |
+
if "message" not in choice or "content" not in choice["message"]:
|
| 236 |
+
self.logger.error(f"Unexpected message structure: {choice}")
|
| 237 |
+
return "ขออภัยค่ะ ไม่สามารถประมวลผลคำตอบได้ในขณะนี้"
|
| 238 |
+
|
| 239 |
+
raw_content = choice["message"]["content"]
|
| 240 |
+
|
| 241 |
+
if raw_content is None or (isinstance(raw_content, str) and raw_content.strip() == ""):
|
| 242 |
+
self.logger.error("SEA-Lion API returned None or empty content")
|
| 243 |
+
return "ขออภัยค่ะ ไม่สามารถสร้างคำตอบได้ในขณะนี้"
|
| 244 |
+
|
| 245 |
+
# Extract answer block
|
| 246 |
+
answer_match = re.search(r"```answer\s*(.*?)\s*```", raw_content, re.DOTALL)
|
| 247 |
+
final_answer = answer_match.group(1).strip() if answer_match else raw_content.strip()
|
| 248 |
+
|
| 249 |
+
# Log thinking privately
|
| 250 |
+
thinking_match = re.search(r"```thinking\s*(.*?)\s*```", raw_content, re.DOTALL)
|
| 251 |
+
if thinking_match:
|
| 252 |
+
self.logger.debug(f"SEA-Lion thinking:\n{thinking_match.group(1).strip()}")
|
| 253 |
+
|
| 254 |
+
self.logger.info(f"Received SEA-Lion response (length: {len(final_answer)} chars)")
|
| 255 |
+
return final_answer
|
| 256 |
+
|
| 257 |
+
except requests.exceptions.RequestException as e:
|
| 258 |
+
status_code = getattr(getattr(e, 'response', None), 'status_code', None)
|
| 259 |
+
body_preview = None
|
| 260 |
+
if getattr(e, 'response', None) is not None:
|
| 261 |
+
try:
|
| 262 |
+
body_preview = e.response.text[:500] # type: ignore
|
| 263 |
+
except Exception:
|
| 264 |
+
body_preview = '<unavailable>'
|
| 265 |
+
request_url = getattr(getattr(e, 'request', None), 'url', None)
|
| 266 |
+
self.logger.error(
|
| 267 |
+
"SEA-Lion psychologist request failed (%s) url=%s status=%s body=%s message=%s",
|
| 268 |
+
e.__class__.__name__,
|
| 269 |
+
request_url,
|
| 270 |
+
status_code,
|
| 271 |
+
body_preview,
|
| 272 |
+
str(e),
|
| 273 |
+
)
|
| 274 |
+
if status_code == 401:
|
| 275 |
+
self.logger.error(
|
| 276 |
+
"SEA-Lion psychologist authentication failed. mode=%s model=%s token_fp=%s base_url=%s",
|
| 277 |
+
getattr(self, "sea_mode", "unknown"),
|
| 278 |
+
getattr(self, "sea_lion_model", "unknown"),
|
| 279 |
+
fingerprint_secret(getattr(self, "sea_lion_token", None)),
|
| 280 |
+
getattr(self, "sea_lion_base_url", "unknown"),
|
| 281 |
+
)
|
| 282 |
+
return "ขออภัยค่ะ เกิดปัญหาในการเชื่อมต่อ กรุณาลองใหม่อีกครั้งค่ะ"
|
| 283 |
+
except Exception as e:
|
| 284 |
+
self.logger.error(f"Error generating SEA-Lion response: {str(e)}")
|
| 285 |
+
return "ขออภัยค่ะ เกิดข้อผิดพลาดในระบบ"
|
| 286 |
+
|
| 287 |
+
# ===== GEMINI BLOCK (ENGLISH) =====
|
| 288 |
+
def _get_gemini_prompt(self) -> str:
|
| 289 |
+
"""English therapeutic prompt for Gemini"""
|
| 290 |
+
return """
|
| 291 |
+
**Your Role:**
|
| 292 |
+
You are a “Psychological Consultant” for a healthcare professional on Motivational Interview (MI) session.
|
| 293 |
+
Your goal is to provide guidance for the medical personnel on how to respond to improve the effectiveness of their conversations with healthcare professionals and users.
|
| 294 |
+
|
| 295 |
+
**Core rules:**
|
| 296 |
+
- Adhere to the MI Spirit: Collaboration, Evocation, Autonomy
|
| 297 |
+
- Use OARS: Open questions, Affirmations, Reflections, Summaries
|
| 298 |
+
- Aim for a Reflection:Question ratio of ≈ 2:1 when possible
|
| 299 |
+
- If data is insufficient, collect data using 1–2 open-ended questions
|
| 300 |
+
- Use short, concise responses
|
| 301 |
+
- Use only English language
|
| 302 |
+
- In emergencies (suicidal ideation) Self-harm, psychosis) recommend to seek professional help in emergency situations.
|
| 303 |
+
- You respond in JSON format only.
|
| 304 |
+
|
| 305 |
+
**Motivational Interview (MI) Guide:**
|
| 306 |
+
1. MI Spirit
|
| 307 |
+
- Collaboration → Healthcare professionals work in partnership, not in command or criticism.
|
| 308 |
+
- Evocation → Encourage the patient to discuss their own reasons and motivations.
|
| 309 |
+
- Autonomy → The patient chooses the path, not in force or pressure.
|
| 310 |
+
2. OARS Core Skills
|
| 311 |
+
- Open-ended questions → Use questions that encourage elaboration, not just “yes/no” answers.
|
| 312 |
+
- Affirmations → Appreciate the patient's strengths, determination, and efforts.
|
| 313 |
+
- Reflections → Briefly and in-depth recapitulate what the patient has said to demonstrate understanding.
|
| 314 |
+
- Summarization → Periodically review key points. To create clarity and reinforce progress
|
| 315 |
+
3. Distinguishing Change Talk from Sustain Talk
|
| 316 |
+
- Change Talk: When the patient discusses their desire, ability, reasons, or commitment to change → The provider should address the issue and reinforce it.
|
| 317 |
+
- Sustain Talk: When the patient expresses hesitation or persistence → Reflect neutrally or reframe the situation without argument or coercion.
|
| 318 |
+
4. Communication Style
|
| 319 |
+
- Tone → Use an understanding, gentle, and non-judgmental tone.
|
| 320 |
+
- Information-giving → Provide information by asking permission first, such as, "Would you like to hear commonly used medical advice?"
|
| 321 |
+
- Pacing → Allow the patient space to speak, listen adequately, without rushing or summarizing too quickly.
|
| 322 |
+
5. Technical Perspective (Coding Frameworks such as MISC/AnnoMI)
|
| 323 |
+
- Use a technique with a ratio of Reflections to Questions greater than 1:1.
|
| 324 |
+
- When using Reflections, use complex reflexes, such as, "Work is making me tired" -> "Work stress can make you feel out of place."
|
| 325 |
+
- Use thought-provoking questions to invite change, build confidence, and resolve ("Why do you want to do that?") What Makes This Important Now?
|
| 326 |
+
- Responses are scored based on R/Q ratio, % Open Questions, % Complex Comments, and MI Consistency. Optimize the results to maximize these criteria while maintaining a natural conversation.
|
| 327 |
+
|
| 328 |
+
**Response Guidelines:**
|
| 329 |
+
- Use open-ended questions, empathetic tone, and validation.
|
| 330 |
+
- Provide psychoeducation and coping strategies.
|
| 331 |
+
- Include evidence-based interventions tailored to the client.
|
| 332 |
+
- Always respond in English.
|
| 333 |
+
- Always include crisis safety steps when risk is detected.
|
| 334 |
+
|
| 335 |
+
**Response Structure (JSON):**
|
| 336 |
+
{
|
| 337 |
+
"User Mood with Weight 1-10": {[User's Mood]: [Weight]}
|
| 338 |
+
"Techniques with Usability 1-10": {[Techniques Referenced from the MI Guide]: [Usability]}
|
| 339 |
+
"Communication Style": [recommended communication style]
|
| 340 |
+
"Example": [a brief example of using techniques]
|
| 341 |
+
"Conversation Plan": [a brief conversation action plan for your future self]
|
| 342 |
+
}
|
| 343 |
+
"""
|
| 344 |
+
|
| 345 |
+
def _format_prompt_gemini(self, user_message: str, therapeutic_context: str = "") -> str:
|
| 346 |
+
"""Format prompt for Gemini API (English)"""
|
| 347 |
+
now = datetime.now()
|
| 348 |
+
|
| 349 |
+
context_info = f"""
|
| 350 |
+
**Current Context:**
|
| 351 |
+
- Date/Time: {now.strftime("%Y-%m-%d %H:%M:%S")}
|
| 352 |
+
- Your Previous Commentary: {therapeutic_context}
|
| 353 |
+
"""
|
| 354 |
+
|
| 355 |
+
prompt = f"""{self._get_gemini_prompt()}
|
| 356 |
+
|
| 357 |
+
{context_info}
|
| 358 |
+
|
| 359 |
+
**Client Message:** {user_message}
|
| 360 |
+
|
| 361 |
+
Please follow the guidance above."""
|
| 362 |
+
|
| 363 |
+
return prompt
|
| 364 |
+
|
| 365 |
+
def _generate_response_gemini(self, prompt: str) -> str:
|
| 366 |
+
"""Generate response using Gemini API for English"""
|
| 367 |
+
if not getattr(self, "gemini_enabled", False):
|
| 368 |
+
return "I’m unable to provide detailed psychological guidance right now. Please try again later."
|
| 369 |
+
try:
|
| 370 |
+
self.logger.debug(f"Sending prompt to Gemini API (length: {len(prompt)} chars)")
|
| 371 |
+
|
| 372 |
+
response = self.gemini_client.models.generate_content(
|
| 373 |
+
model=self.gemini_model_name,
|
| 374 |
+
contents=[prompt],
|
| 375 |
+
)
|
| 376 |
+
|
| 377 |
+
response_text = response.text
|
| 378 |
+
|
| 379 |
+
if response_text is None or (isinstance(response_text, str) and response_text.strip() == ""):
|
| 380 |
+
self.logger.error("Gemini API returned None or empty content")
|
| 381 |
+
return "I apologize, but I'm unable to generate a response at this time. Please try again later."
|
| 382 |
+
|
| 383 |
+
self.logger.info(f"Received Gemini response (length: {len(response_text)} chars)")
|
| 384 |
+
return str(response_text).strip()
|
| 385 |
+
|
| 386 |
+
except Exception as e:
|
| 387 |
+
self.logger.error(f"Error generating Gemini response: {str(e)}")
|
| 388 |
+
return "I apologize, but I'm experiencing technical difficulties. Please try again later, and if you're having thoughts of self-harm or are in crisis, please contact a mental health professional or emergency services immediately."
|
| 389 |
+
|
| 390 |
+
# ===== ANALYSIS METHODS FOR ORCHESTRATOR =====
|
| 391 |
+
def analyze(self, message: str, history: List[Dict[str, str]], chain_of_thoughts: str, summarized_histories: str) -> str:
|
| 392 |
+
"""
|
| 393 |
+
Analyze method expected by the orchestrator
|
| 394 |
+
Provides psychological analysis and therapeutic guidance
|
| 395 |
+
|
| 396 |
+
Args:
|
| 397 |
+
message: The client's message to analyze
|
| 398 |
+
history: Chat history as list of message dictionaries
|
| 399 |
+
chain_of_thoughts: Chain of thoughts from previous processing
|
| 400 |
+
summarized_histories: Previously summarized conversation histories
|
| 401 |
+
|
| 402 |
+
Returns:
|
| 403 |
+
Therapeutic analysis and guidance response
|
| 404 |
+
"""
|
| 405 |
+
self.logger.info("Starting psychological analysis")
|
| 406 |
+
self.logger.debug(f"Analyzing message: {message}")
|
| 407 |
+
self.logger.debug(f"History length: {len(history) if history else 0}")
|
| 408 |
+
self.logger.debug(f"Chain of thoughts length: {len(chain_of_thoughts) if chain_of_thoughts else 0}")
|
| 409 |
+
self.logger.debug(f"Summarized histories length: {len(summarized_histories) if summarized_histories else 0}")
|
| 410 |
+
|
| 411 |
+
try:
|
| 412 |
+
# Build therapeutic context from history and chain of thoughts
|
| 413 |
+
context_parts = []
|
| 414 |
+
|
| 415 |
+
# Add recent conversation context
|
| 416 |
+
if history:
|
| 417 |
+
recent_messages = history[-3:] if len(history) > 3 else history # Last 3 messages
|
| 418 |
+
context_parts.append("Recent conversation context:")
|
| 419 |
+
for msg in recent_messages:
|
| 420 |
+
role = msg.get('role', 'unknown')
|
| 421 |
+
content = msg.get('content', '')
|
| 422 |
+
context_parts.append(f"- {role}: {content}")
|
| 423 |
+
|
| 424 |
+
# Add chain of thoughts if available
|
| 425 |
+
if chain_of_thoughts:
|
| 426 |
+
context_parts.append("Previous analysis context:")
|
| 427 |
+
for thought in chain_of_thoughts[-2:]: # Last 2 thoughts
|
| 428 |
+
if isinstance(thought, dict):
|
| 429 |
+
content = thought.get('content', str(thought))
|
| 430 |
+
else:
|
| 431 |
+
content = str(thought)
|
| 432 |
+
context_parts.append(f"- {content}")
|
| 433 |
+
|
| 434 |
+
# Add summarized context if available
|
| 435 |
+
if summarized_histories:
|
| 436 |
+
context_parts.append("Historical context summary:")
|
| 437 |
+
for summary in summarized_histories[-1:]: # Most recent summary
|
| 438 |
+
if isinstance(summary, dict):
|
| 439 |
+
content = summary.get('content', str(summary))
|
| 440 |
+
else:
|
| 441 |
+
content = str(summary)
|
| 442 |
+
context_parts.append(f"- {content}")
|
| 443 |
+
|
| 444 |
+
therapeutic_context = "\n".join(context_parts) if context_parts else "New conversation session"
|
| 445 |
+
|
| 446 |
+
# Use the main therapeutic guidance method
|
| 447 |
+
response = self.provide_therapeutic_guidance(
|
| 448 |
+
user_message=message,
|
| 449 |
+
therapeutic_context=therapeutic_context
|
| 450 |
+
)
|
| 451 |
+
|
| 452 |
+
self.logger.info("Psychological analysis completed successfully")
|
| 453 |
+
return response
|
| 454 |
+
|
| 455 |
+
except Exception as e:
|
| 456 |
+
self.logger.error(f"Error in analyze method: {str(e)}")
|
| 457 |
+
# Return fallback based on detected language
|
| 458 |
+
try:
|
| 459 |
+
lang = self._detect_language(message)
|
| 460 |
+
if lang == 'thai':
|
| 461 |
+
return "ขออภัยค่ะ เกิดปัญหาทางเทคนิค หากมีความคิดฆ่าตัวตายหรืออยู่ในภาวะฉุกเฉิน กรุณาติดต่อนักจิตวิทยาหรือหน่วยงานฉุกเฉินทันที"
|
| 462 |
+
else:
|
| 463 |
+
return "I apologize for the technical issue. If you're having thoughts of self-harm or are in crisis, please contact a mental health professional or emergency services immediately."
|
| 464 |
+
except:
|
| 465 |
+
return "Technical difficulties. If in crisis, seek immediate professional help."
|
| 466 |
+
|
| 467 |
+
# ===== MAIN OUTPUT METHODS =====
|
| 468 |
+
def provide_therapeutic_guidance(self, user_message: str, therapeutic_context: str = "") -> str:
|
| 469 |
+
"""
|
| 470 |
+
Main method to provide psychological guidance with language-based API routing
|
| 471 |
+
|
| 472 |
+
Args:
|
| 473 |
+
user_message: The client's message or concern
|
| 474 |
+
therapeutic_context: Additional context about the client's situation
|
| 475 |
+
|
| 476 |
+
Returns:
|
| 477 |
+
Therapeutic response with guidance and support
|
| 478 |
+
"""
|
| 479 |
+
self.logger.info("Processing therapeutic guidance request")
|
| 480 |
+
self.logger.debug(f"User message: {user_message}")
|
| 481 |
+
self.logger.debug(f"Therapeutic context: {therapeutic_context}")
|
| 482 |
+
|
| 483 |
+
try:
|
| 484 |
+
# Detect language
|
| 485 |
+
detected_language = self._detect_language(user_message)
|
| 486 |
+
self.logger.info(f"Detected language: {detected_language}")
|
| 487 |
+
|
| 488 |
+
if detected_language == 'thai':
|
| 489 |
+
# Use SEA-Lion for Thai
|
| 490 |
+
messages = self._format_messages_sealion(user_message, therapeutic_context)
|
| 491 |
+
response = self._generate_response_sealion(messages)
|
| 492 |
+
else:
|
| 493 |
+
# Use Gemini for English
|
| 494 |
+
prompt = self._format_prompt_gemini(user_message, therapeutic_context)
|
| 495 |
+
response = self._generate_response_gemini(prompt)
|
| 496 |
+
|
| 497 |
+
if response is None:
|
| 498 |
+
raise Exception(f"{detected_language} API returned None response")
|
| 499 |
+
|
| 500 |
+
return response
|
| 501 |
+
|
| 502 |
+
except Exception as e:
|
| 503 |
+
self.logger.error(f"Error in provide_therapeutic_guidance: {str(e)}")
|
| 504 |
+
# Return fallback based on detected language
|
| 505 |
+
try:
|
| 506 |
+
lang = self._detect_language(user_message)
|
| 507 |
+
if lang == 'thai':
|
| 508 |
+
return "ขออภัยค่ะ ระบบมีปัญหาชั่วคราว หากมีความคิดทำร้ายตัวเองหรืออยู่ในภาวะวิกฤต กรุณาติดต่อนักจิตวิทยาหรือหน่วยงานฉุกเฉินทันที"
|
| 509 |
+
else:
|
| 510 |
+
return "I apologize for the technical issue. If you're having thoughts of self-harm or are in crisis, please contact a mental health professional or emergency services immediately."
|
| 511 |
+
except:
|
| 512 |
+
return "Technical difficulties. If in crisis, seek immediate professional help."
|
| 513 |
+
|
| 514 |
+
def get_health_status(self) -> Dict[str, Any]:
|
| 515 |
+
"""Get current agent health status"""
|
| 516 |
+
status = {
|
| 517 |
+
"status": "healthy",
|
| 518 |
+
"language_routing": "thai->SEA-Lion, english->Gemini",
|
| 519 |
+
"sea_lion_configured": getattr(self, "sea_lion_token", None) is not None,
|
| 520 |
+
"gemini_configured": hasattr(self, 'gemini_api_key') and self.gemini_api_key,
|
| 521 |
+
"sea_lion_model": getattr(self, "sea_lion_model", "unknown"),
|
| 522 |
+
"gemini_model": self.gemini_model_name,
|
| 523 |
+
"timestamp": datetime.now().isoformat(),
|
| 524 |
+
"logging_enabled": True,
|
| 525 |
+
"log_level": self.logger.level,
|
| 526 |
+
"methods_available": ["analyze", "provide_therapeutic_guidance", "get_health_status"]
|
| 527 |
+
}
|
| 528 |
+
|
| 529 |
+
self.logger.debug(f"Health status check: {status}")
|
| 530 |
+
return status
|
| 531 |
+
|
| 532 |
+
|
| 533 |
+
if __name__ == "__main__":
|
| 534 |
+
# Test the completed PsychologistAgent
|
| 535 |
+
try:
|
| 536 |
+
psychologist = PsychologistAgent(log_level=logging.DEBUG)
|
| 537 |
+
except Exception as e:
|
| 538 |
+
print(f"[BOOT ERROR] Unable to start PsychologistAgent: {e}")
|
| 539 |
+
raise SystemExit(1)
|
| 540 |
+
|
| 541 |
+
# Test cases for both languages
|
| 542 |
+
test_cases = [
|
| 543 |
+
{
|
| 544 |
+
"name": "Thai - Exam Anxiety",
|
| 545 |
+
"user_message": "หนูปวดหัวและกังวลเรื่องสอบค่ะ นอนไม่หลับและกังวลว่าจะสอบตก",
|
| 546 |
+
"therapeutic_context": "User: นักศึกษาอายุ 21 ปี ช่วงสอบกลางเทอม นอนน้อย (4-5 ชั่วโมง) ดื่มกาแฟมาก มีประวัติวิตกกังวลในช่วงความเครียดทางการศึกษา"
|
| 547 |
+
},
|
| 548 |
+
{
|
| 549 |
+
"name": "English - Work Stress",
|
| 550 |
+
"user_message": "I've been feeling overwhelmed lately with work and personal life. Everything feels like too much and I can't cope.",
|
| 551 |
+
"therapeutic_context": "User: Working professional, recent job change, managing family responsibilities, seeking coping strategies."
|
| 552 |
+
},
|
| 553 |
+
{
|
| 554 |
+
"name": "Thai - Relationship Issues",
|
| 555 |
+
"user_message": "ความสัมพันธ์กับแฟนมีปัญหามากค่ะ เราทะเลาะกันบ่อยๆ ไม่รู้จะแก้ไขยังไง",
|
| 556 |
+
"therapeutic_context": "User: อยู่ในความสัมพันธ์ที่มั่นคง มีปัญหาการสื่อสาร ขอคำแนะนำเรื่องความสัมพันธ์และการแก้ปัญหาความขัดแย้ง"
|
| 557 |
+
},
|
| 558 |
+
{
|
| 559 |
+
"name": "English - Anxiety Management",
|
| 560 |
+
"user_message": "I keep having panic attacks and I don't know how to control them. It's affecting my daily life and work performance.",
|
| 561 |
+
"therapeutic_context": "User: Experiencing frequent panic attacks, seeking anxiety management techniques, work performance concerns."
|
| 562 |
+
}
|
| 563 |
+
]
|
| 564 |
+
|
| 565 |
+
# Test the analyze method specifically
|
| 566 |
+
print(f"\n{'='*60}")
|
| 567 |
+
print("TESTING ANALYZE METHOD (Required by Orchestrator)")
|
| 568 |
+
print(f"{'='*60}")
|
| 569 |
+
|
| 570 |
+
test_message = "hello im kinda sad"
|
| 571 |
+
test_history = [
|
| 572 |
+
{"role": "user", "content": "Hi there"},
|
| 573 |
+
{"role": "assistant", "content": "Hello! How can I help you today?"},
|
| 574 |
+
{"role": "user", "content": test_message}
|
| 575 |
+
]
|
| 576 |
+
test_chain_of_thoughts = [
|
| 577 |
+
{"step": "analysis", "content": "User expressing mild sadness, needs supportive guidance"},
|
| 578 |
+
{"step": "routing", "content": "Psychological support required"}
|
| 579 |
+
]
|
| 580 |
+
test_summarized_histories = [
|
| 581 |
+
{"session": "previous", "content": "User has been dealing with some personal challenges"}
|
| 582 |
+
]
|
| 583 |
+
|
| 584 |
+
print(f"\n Test Message: {test_message}")
|
| 585 |
+
print(f" History: {len(test_history)} messages")
|
| 586 |
+
print(f" Chain of thoughts: {len(test_chain_of_thoughts)} items")
|
| 587 |
+
print(f" Summaries: {len(test_summarized_histories)} items")
|
| 588 |
+
|
| 589 |
+
print(f"\n ANALYZE METHOD RESPONSE:")
|
| 590 |
+
print("-" * 50)
|
| 591 |
+
|
| 592 |
+
analyze_response = psychologist.analyze(test_message, test_history, str(test_chain_of_thoughts), str(test_summarized_histories))
|
| 593 |
+
print(analyze_response)
|
| 594 |
+
|
| 595 |
+
# Run other tests
|
| 596 |
+
for i, test_case in enumerate(test_cases, 1):
|
| 597 |
+
print(f"\n{'='*60}")
|
| 598 |
+
print(f"TEST {i}: {test_case['name']}")
|
| 599 |
+
print(f"{'='*60}")
|
| 600 |
+
|
| 601 |
+
print(f"\n User Message: {test_case['user_message']}")
|
| 602 |
+
print(f" Context: {test_case['therapeutic_context']}")
|
| 603 |
+
|
| 604 |
+
print(f"\n PSYCHOLOGIST RESPONSE:")
|
| 605 |
+
print("-" * 50)
|
| 606 |
+
|
| 607 |
+
response = psychologist.provide_therapeutic_guidance(
|
| 608 |
+
user_message=test_case['user_message'],
|
| 609 |
+
therapeutic_context=test_case['therapeutic_context']
|
| 610 |
+
)
|
| 611 |
+
|
| 612 |
+
print(response)
|
| 613 |
+
print("\n" + "="*60)
|
| 614 |
+
|
| 615 |
+
# Test health status
|
| 616 |
+
print(f"\n{'='*60}")
|
| 617 |
+
print("HEALTH STATUS CHECK")
|
| 618 |
+
print(f"{'='*60}")
|
| 619 |
+
status = psychologist.get_health_status()
|
| 620 |
+
print(json.dumps(status, indent=2, ensure_ascii=False))
|
src/kallam/domain/agents/summarizer.py
ADDED
|
@@ -0,0 +1,157 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import json
|
| 3 |
+
import logging
|
| 4 |
+
import re
|
| 5 |
+
from google import genai
|
| 6 |
+
from pathlib import Path
|
| 7 |
+
from datetime import datetime
|
| 8 |
+
from typing import List, Dict, Optional
|
| 9 |
+
|
| 10 |
+
from dotenv import load_dotenv
|
| 11 |
+
load_dotenv()
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
class SummarizerAgent:
|
| 15 |
+
def __init__(self, log_level: int = logging.INFO):
|
| 16 |
+
self._setup_logging(log_level)
|
| 17 |
+
self._setup_api_clients()
|
| 18 |
+
self.logger.info("Summarizer Agent initialized successfully")
|
| 19 |
+
|
| 20 |
+
def _setup_logging(self, log_level: int) -> None:
|
| 21 |
+
self.logger = logging.getLogger(f"{__name__}.SummarizerAgent")
|
| 22 |
+
self.logger.setLevel(log_level)
|
| 23 |
+
if not self.logger.handlers:
|
| 24 |
+
handler = logging.StreamHandler()
|
| 25 |
+
handler.setFormatter(logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s'))
|
| 26 |
+
self.logger.addHandler(handler)
|
| 27 |
+
|
| 28 |
+
def _setup_api_clients(self) -> None:
|
| 29 |
+
raw_gemini_key = os.getenv("GEMINI_API_KEY", "")
|
| 30 |
+
self.gemini_api_key = raw_gemini_key.strip()
|
| 31 |
+
if raw_gemini_key and not self.gemini_api_key:
|
| 32 |
+
self.logger.warning("GEMINI_API_KEY contained only whitespace after stripping")
|
| 33 |
+
if not self.gemini_api_key:
|
| 34 |
+
raise ValueError("GEMINI_API_KEY not found in environment variables")
|
| 35 |
+
|
| 36 |
+
self.gemini_client = genai.Client(api_key=self.gemini_api_key)
|
| 37 |
+
self.gemini_model_name = "gemini-1.5-flash"
|
| 38 |
+
|
| 39 |
+
def _generate_response(self, prompt: str) -> str:
|
| 40 |
+
try:
|
| 41 |
+
response = self.gemini_client.models.generate_content(
|
| 42 |
+
model=self.gemini_model_name,
|
| 43 |
+
contents=[prompt]
|
| 44 |
+
)
|
| 45 |
+
return response.text.strip() if response.text else "Unable to generate summary"
|
| 46 |
+
except Exception as e:
|
| 47 |
+
self.logger.error("Error generating summary (%s): %s", e.__class__.__name__, str(e))
|
| 48 |
+
return "Error generating summary"
|
| 49 |
+
|
| 50 |
+
def _format_history(self, history: List[Dict[str, str]]) -> str:
|
| 51 |
+
if not history:
|
| 52 |
+
return "No conversation history"
|
| 53 |
+
|
| 54 |
+
formatted = []
|
| 55 |
+
for msg in history:
|
| 56 |
+
role = msg.get('role', 'unknown')
|
| 57 |
+
content = msg.get('content', '')
|
| 58 |
+
formatted.append(f"{role}: {content}")
|
| 59 |
+
return "\n".join(formatted)
|
| 60 |
+
|
| 61 |
+
def summarize_conversation_history(self, response_history: List[Dict[str, str]], **kwargs) -> str:
|
| 62 |
+
"""Summarize conversation history"""
|
| 63 |
+
formatted_history = self._format_history(response_history)
|
| 64 |
+
|
| 65 |
+
prompt = f"""
|
| 66 |
+
**Your Role:**
|
| 67 |
+
you are a medical/psychological summarization assistant.
|
| 68 |
+
|
| 69 |
+
**Core Rules:**
|
| 70 |
+
- Summarize the conversation focusing on key points and medical/health or psychological information:
|
| 71 |
+
|
| 72 |
+
{formatted_history}
|
| 73 |
+
|
| 74 |
+
Create a brief summary highlighting:
|
| 75 |
+
- Main topics discussed
|
| 76 |
+
- Any health concerns or symptoms mentioned
|
| 77 |
+
- Important advice or recommendations given
|
| 78 |
+
- Patient's emotional state or concerns
|
| 79 |
+
|
| 80 |
+
Keep it concise and medically relevant."""
|
| 81 |
+
|
| 82 |
+
return self._generate_response(prompt)
|
| 83 |
+
|
| 84 |
+
def summarize_medical_session(self, session_history: List[Dict[str, str]], **kwargs) -> str:
|
| 85 |
+
"""Summarize a medical session"""
|
| 86 |
+
formatted_history = self._format_history(session_history)
|
| 87 |
+
|
| 88 |
+
prompt = f"""Summarize this medical session:
|
| 89 |
+
|
| 90 |
+
{formatted_history}
|
| 91 |
+
|
| 92 |
+
Focus on:
|
| 93 |
+
- Chief complaints and symptoms
|
| 94 |
+
- Assessment and observations
|
| 95 |
+
- Treatment recommendations
|
| 96 |
+
- Follow-up requirements
|
| 97 |
+
|
| 98 |
+
Keep it professional and structured for medical records."""
|
| 99 |
+
|
| 100 |
+
return self._generate_response(prompt)
|
| 101 |
+
|
| 102 |
+
def summarize(self, chat_history: List[Dict[str, str]], existing_summaries: List[Dict[str, str]]) -> str:
|
| 103 |
+
"""Main summarization method called by orchestrator"""
|
| 104 |
+
formatted_history = self._format_history(chat_history)
|
| 105 |
+
|
| 106 |
+
# Include previous summaries if available
|
| 107 |
+
context = ""
|
| 108 |
+
if existing_summaries:
|
| 109 |
+
summaries_text = "\n".join([s.get('summary', s.get('content', '')) for s in existing_summaries])
|
| 110 |
+
context = f"\nPrevious summaries:\n{summaries_text}\n"
|
| 111 |
+
|
| 112 |
+
prompt = f"""Create a comprehensive summary of this conversation:{context}
|
| 113 |
+
|
| 114 |
+
Recent conversation:
|
| 115 |
+
{formatted_history}
|
| 116 |
+
|
| 117 |
+
Provide a summary that:
|
| 118 |
+
- Combines new information with previous context
|
| 119 |
+
- Highlights medical/psychological insights
|
| 120 |
+
- Notes patient progress or changes
|
| 121 |
+
- Maintains continuity of care focus
|
| 122 |
+
|
| 123 |
+
Keep it concise but comprehensive."""
|
| 124 |
+
|
| 125 |
+
return self._generate_response(prompt)
|
| 126 |
+
|
| 127 |
+
|
| 128 |
+
if __name__ == "__main__":
|
| 129 |
+
# Test the Summarizer Agent
|
| 130 |
+
try:
|
| 131 |
+
summarizer = SummarizerAgent(log_level=logging.DEBUG)
|
| 132 |
+
|
| 133 |
+
# Test conversation summary
|
| 134 |
+
print("=== TEST: CONVERSATION SUMMARY ===")
|
| 135 |
+
chat_history = [
|
| 136 |
+
{"role": "user", "content": "I've been having headaches for the past week"},
|
| 137 |
+
{"role": "assistant", "content": "Tell me more about these headaches - when do they occur and how severe are they?"},
|
| 138 |
+
{"role": "user", "content": "They're usually worse in the afternoon, around 7/10 pain level"},
|
| 139 |
+
{"role": "assistant", "content": "That sounds concerning. Have you noticed any triggers like stress, lack of sleep, or screen time?"}
|
| 140 |
+
]
|
| 141 |
+
|
| 142 |
+
summary = summarizer.summarize_conversation_history(response_history=chat_history)
|
| 143 |
+
print(summary)
|
| 144 |
+
|
| 145 |
+
# Test medical session summary
|
| 146 |
+
print("\n=== TEST: MEDICAL SESSION SUMMARY ===")
|
| 147 |
+
session_summary = summarizer.summarize_medical_session(session_history=chat_history)
|
| 148 |
+
print(session_summary)
|
| 149 |
+
|
| 150 |
+
# Test comprehensive summary (orchestrator method)
|
| 151 |
+
print("\n=== TEST: COMPREHENSIVE SUMMARY ===")
|
| 152 |
+
existing_summaries = [{"summary": "Patient reported initial headache symptoms"}]
|
| 153 |
+
comprehensive = summarizer.summarize(chat_history=chat_history, existing_summaries=existing_summaries)
|
| 154 |
+
print(comprehensive)
|
| 155 |
+
|
| 156 |
+
except Exception as e:
|
| 157 |
+
print(f"Error testing Summarizer Agent: {e}")
|
src/kallam/domain/agents/supervisor.py
ADDED
|
@@ -0,0 +1,520 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from dotenv import load_dotenv
|
| 2 |
+
load_dotenv()
|
| 3 |
+
import logging
|
| 4 |
+
import requests
|
| 5 |
+
import re
|
| 6 |
+
import json
|
| 7 |
+
from pathlib import Path
|
| 8 |
+
from datetime import datetime
|
| 9 |
+
|
| 10 |
+
from typing import Literal, Optional, Dict, Any, List
|
| 11 |
+
from kallam.infrastructure.sea_lion import load_sea_lion_settings, fingerprint_secret
|
| 12 |
+
from strands import Agent, tool
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
class SupervisorAgent:
|
| 16 |
+
def __init__(self, log_level: int = logging.INFO):
|
| 17 |
+
|
| 18 |
+
self._setup_logging(log_level)
|
| 19 |
+
self._setup_api_clients()
|
| 20 |
+
|
| 21 |
+
self.logger.info(f"KaLLaM chatbot initialized successfully using ")
|
| 22 |
+
|
| 23 |
+
self.system_prompt = """
|
| 24 |
+
**Your Role:**
|
| 25 |
+
You are "KaLLaM" or "กะหล่ำ" with a nickname "Kabby" You are a Thai, warm, friendly, female, doctor, psychiatrist, chatbot specializing in analyzing and improving patient's physical and mental health.
|
| 26 |
+
Your goal is to provide actionable guidance that motivates patients to take better care of themselves.
|
| 27 |
+
|
| 28 |
+
**Core Rules:**
|
| 29 |
+
- You are the supervisor agent that handle multiple agents for the response.
|
| 30 |
+
- You **ALWAYS** respond with the same language as the user.
|
| 31 |
+
- Do not include introduction (except first interaction) or your thoughts to your final response.
|
| 32 |
+
- You can also be very serious up to the context of the conversation.
|
| 33 |
+
"""
|
| 34 |
+
|
| 35 |
+
def _setup_logging(self, log_level: int) -> None:
|
| 36 |
+
"""Setup logging configuration"""
|
| 37 |
+
# Create logs directory if it doesn't exist
|
| 38 |
+
log_dir = Path("logs")
|
| 39 |
+
log_dir.mkdir(exist_ok=True)
|
| 40 |
+
|
| 41 |
+
# Setup logger
|
| 42 |
+
self.logger = logging.getLogger(f"{__name__}.KaLLaMChatbot")
|
| 43 |
+
self.logger.setLevel(log_level)
|
| 44 |
+
|
| 45 |
+
# Remove existing handlers to avoid duplicates
|
| 46 |
+
if self.logger.handlers:
|
| 47 |
+
self.logger.handlers.clear()
|
| 48 |
+
|
| 49 |
+
# File handler for detailed logs
|
| 50 |
+
file_handler = logging.FileHandler(
|
| 51 |
+
log_dir / f"kallam_{datetime.now().strftime('%Y%m%d')}.log",
|
| 52 |
+
encoding='utf-8'
|
| 53 |
+
)
|
| 54 |
+
file_handler.setLevel(logging.DEBUG)
|
| 55 |
+
|
| 56 |
+
# Console handler for immediate feedback
|
| 57 |
+
console_handler = logging.StreamHandler()
|
| 58 |
+
console_handler.setLevel(log_level)
|
| 59 |
+
|
| 60 |
+
# Formatter
|
| 61 |
+
formatter = logging.Formatter(
|
| 62 |
+
'%(asctime)s - %(name)s - %(levelname)s - %(message)s'
|
| 63 |
+
)
|
| 64 |
+
file_handler.setFormatter(formatter)
|
| 65 |
+
console_handler.setFormatter(formatter)
|
| 66 |
+
|
| 67 |
+
# Add handlers
|
| 68 |
+
self.logger.addHandler(file_handler)
|
| 69 |
+
self.logger.addHandler(console_handler)
|
| 70 |
+
|
| 71 |
+
def _setup_api_clients(self) -> None:
|
| 72 |
+
"""Setup API clients; do not hard-fail if env is missing."""
|
| 73 |
+
settings = load_sea_lion_settings(default_model="aisingapore/Gemma-SEA-LION-v4-27B-IT")
|
| 74 |
+
self.sea_lion_base_url = settings.base_url
|
| 75 |
+
self.sea_lion_token = settings.token
|
| 76 |
+
self.sea_lion_model = settings.model
|
| 77 |
+
self.sea_lion_mode = settings.mode
|
| 78 |
+
self.api_enabled = self.sea_lion_token is not None
|
| 79 |
+
|
| 80 |
+
if self.api_enabled:
|
| 81 |
+
self.logger.info(
|
| 82 |
+
"SEA-Lion client ready (mode=%s, base_url=%s, model=%s, token_fp=%s)",
|
| 83 |
+
self.sea_lion_mode,
|
| 84 |
+
self.sea_lion_base_url,
|
| 85 |
+
self.sea_lion_model,
|
| 86 |
+
fingerprint_secret(self.sea_lion_token),
|
| 87 |
+
)
|
| 88 |
+
else:
|
| 89 |
+
# Keep running; downstream logic will use safe fallbacks
|
| 90 |
+
self.logger.warning(
|
| 91 |
+
"SEA_LION credentials not set. Supervisor will use safe fallback responses."
|
| 92 |
+
)
|
| 93 |
+
|
| 94 |
+
def _format_chat_history_for_sea_lion(
|
| 95 |
+
self,
|
| 96 |
+
chat_histories: List[Dict[str, str]],
|
| 97 |
+
user_message: str,
|
| 98 |
+
memory_context: str,
|
| 99 |
+
task: str,
|
| 100 |
+
summarized_histories: Optional[List] = None,
|
| 101 |
+
commentary: Optional[Dict[str, str]] = None,
|
| 102 |
+
) -> List[Dict[str, str]]:
|
| 103 |
+
|
| 104 |
+
# Create system message with context
|
| 105 |
+
now = datetime.now()
|
| 106 |
+
|
| 107 |
+
# Build context information
|
| 108 |
+
context_info = f"""
|
| 109 |
+
**Current Context:**
|
| 110 |
+
- Date/Time: {now.strftime("%Y-%m-%d %H:%M:%S")}
|
| 111 |
+
- Persistent Memory: {memory_context}
|
| 112 |
+
"""
|
| 113 |
+
# Add summarized histories to context_info if available
|
| 114 |
+
if summarized_histories:
|
| 115 |
+
summarized_text = "\n".join([f"{m.get('role', 'unknown')}: {m.get('content', '')}" for m in summarized_histories])
|
| 116 |
+
context_info += f"- Previous Session Summaries: {summarized_text}\n"
|
| 117 |
+
|
| 118 |
+
if task == "flag":
|
| 119 |
+
system_message = {
|
| 120 |
+
"role": "system",
|
| 121 |
+
"content": f"""
|
| 122 |
+
{self.system_prompt}
|
| 123 |
+
|
| 124 |
+
{context_info}
|
| 125 |
+
|
| 126 |
+
**Previous Activated Agents Commentaries:**
|
| 127 |
+
{commentary}
|
| 128 |
+
|
| 129 |
+
**Specific Task (strict):**
|
| 130 |
+
Return ONLY a single JSON object and nothing else. No intro, no markdown, no code fences.
|
| 131 |
+
- If the user reports physical symptoms, illnesses, treatments, or medications → activate **DoctorAgent**.
|
| 132 |
+
- If the user reports emotional struggles, thoughts, relationships, or psychological concerns → activate **PsychologistAgent**.
|
| 133 |
+
- Always respond according to the **Output Schema:**.
|
| 134 |
+
|
| 135 |
+
**JSON Schema:**
|
| 136 |
+
{{
|
| 137 |
+
"language": "english" | "thai",
|
| 138 |
+
"doctor": true | false,
|
| 139 |
+
"psychologist": true | false
|
| 140 |
+
}}
|
| 141 |
+
|
| 142 |
+
**Rules:**
|
| 143 |
+
- If the user reports physical symptoms, illnesses, treatments, or medications → set "doctor": true
|
| 144 |
+
- If the user suggest any emotional struggles, thoughts, relationships, or psychological concerns → set "psychologist": true
|
| 145 |
+
- According to the previous commentaries the "psychologist" should be false only when the conversation clearly don't need psychologist anymore
|
| 146 |
+
- "language" MUST be exactly "english" or "thai" (lowercase)
|
| 147 |
+
- Both "doctor" and "psychologist" can be true if both aspects are present
|
| 148 |
+
- Do not include ANY text before or after the JSON object
|
| 149 |
+
- Do not use markdown code blocks or backticks
|
| 150 |
+
- Do not add explanations, commentary, or additional text
|
| 151 |
+
- Return ONLY the raw JSON object
|
| 152 |
+
"""
|
| 153 |
+
}
|
| 154 |
+
elif task == "finalize":
|
| 155 |
+
commentaries = f"{commentary}" if commentary else ""
|
| 156 |
+
context_info += f"- Commentary from each agents: {commentaries}\n"
|
| 157 |
+
system_message = {
|
| 158 |
+
"role": "system",
|
| 159 |
+
"content": f"""
|
| 160 |
+
{self.system_prompt}
|
| 161 |
+
|
| 162 |
+
{context_info}
|
| 163 |
+
|
| 164 |
+
**Specific Task:**
|
| 165 |
+
- You are a personal professional medical advisor.
|
| 166 |
+
- Read the given context and response throughly and only greet if there is no previous conversation record.
|
| 167 |
+
- Only reccommend immediate local professional help at the end of your response, if the conversation gets suicidal or very severe case.
|
| 168 |
+
- You are female so you use no "ครับ"
|
| 169 |
+
|
| 170 |
+
**Respone Guide:**
|
| 171 |
+
- Keep your response very concise unless the user need more context and response.
|
| 172 |
+
- You have 1 questions limit per response.
|
| 173 |
+
- Your final response must be concise and short according to one most recommendation from the commentary of each agents (may or maynot given) as a sentence.
|
| 174 |
+
- If you are answering question or asking question do not include reflexion.
|
| 175 |
+
- You can response longer if the user is interested in the topic.
|
| 176 |
+
- Try response with emoji based on the context (try use only 0-1 per response only).
|
| 177 |
+
- If each response have less content encorage new relative topic with longer message.
|
| 178 |
+
|
| 179 |
+
**Output Schema:**
|
| 180 |
+
[Your final response]
|
| 181 |
+
"""
|
| 182 |
+
}
|
| 183 |
+
|
| 184 |
+
# Format messages
|
| 185 |
+
messages = [system_message]
|
| 186 |
+
|
| 187 |
+
# Add full chat history (no truncation)
|
| 188 |
+
for msg in chat_histories:
|
| 189 |
+
# Ensure proper role mapping
|
| 190 |
+
role = msg.get('role', 'user')
|
| 191 |
+
if role not in ['user', 'assistant', 'system']:
|
| 192 |
+
role = 'user' if role != 'assistant' else 'assistant'
|
| 193 |
+
|
| 194 |
+
messages.append({
|
| 195 |
+
"role": role,
|
| 196 |
+
"content": msg.get('content', '')
|
| 197 |
+
})
|
| 198 |
+
|
| 199 |
+
# Add current user message
|
| 200 |
+
messages.append({
|
| 201 |
+
"role": "user",
|
| 202 |
+
"content": user_message
|
| 203 |
+
})
|
| 204 |
+
|
| 205 |
+
return messages
|
| 206 |
+
|
| 207 |
+
def _extract_and_validate_json(self, raw_content: str) -> str:
|
| 208 |
+
"""Enhanced JSON extraction and validation with fallback"""
|
| 209 |
+
if not raw_content:
|
| 210 |
+
self.logger.warning("Empty response received, returning default JSON")
|
| 211 |
+
return '{"language": "english", "doctor": false, "psychologist": false}'
|
| 212 |
+
|
| 213 |
+
# Remove thinking blocks first (if any)
|
| 214 |
+
content = raw_content
|
| 215 |
+
thinking_match = re.search(r"</think>", content, re.DOTALL)
|
| 216 |
+
if thinking_match:
|
| 217 |
+
content = re.sub(r".*?</think>\s*", "", content, flags=re.DOTALL).strip()
|
| 218 |
+
|
| 219 |
+
# Remove markdown code blocks
|
| 220 |
+
content = re.sub(r'^```(?:json|JSON)?\s*', '', content, flags=re.MULTILINE)
|
| 221 |
+
content = re.sub(r'```\s*$', '', content, flags=re.MULTILINE)
|
| 222 |
+
content = content.strip()
|
| 223 |
+
|
| 224 |
+
# Extract JSON object using multiple strategies
|
| 225 |
+
json_candidates = []
|
| 226 |
+
|
| 227 |
+
# Strategy 1: Find complete JSON objects
|
| 228 |
+
json_pattern = r'\{[^{}]*(?:\{[^{}]*\}[^{}]*)*\}'
|
| 229 |
+
matches = re.findall(json_pattern, content, re.DOTALL)
|
| 230 |
+
json_candidates.extend(matches)
|
| 231 |
+
|
| 232 |
+
# Strategy 2: Look for specific schema pattern
|
| 233 |
+
schema_pattern = r'\{\s*"language"\s*:\s*"(?:english|thai)"\s*,\s*"doctor"\s*:\s*(?:true|false)\s*,\s*"psychologist"\s*:\s*(?:true|false)\s*\}'
|
| 234 |
+
schema_matches = re.findall(schema_pattern, content, re.IGNORECASE | re.DOTALL)
|
| 235 |
+
json_candidates.extend(schema_matches)
|
| 236 |
+
|
| 237 |
+
# Strategy 3: If no complete JSON found, try to construct from parts
|
| 238 |
+
if not json_candidates:
|
| 239 |
+
language_match = re.search(r'"language"\s*:\s*"(english|thai)"', content, re.IGNORECASE)
|
| 240 |
+
doctor_match = re.search(r'"doctor"\s*:\s*(true|false)', content, re.IGNORECASE)
|
| 241 |
+
psychologist_match = re.search(r'"psychologist"\s*:\s*(true|false)', content, re.IGNORECASE)
|
| 242 |
+
|
| 243 |
+
if language_match and doctor_match and psychologist_match:
|
| 244 |
+
constructed_json = f'{{"language": "{language_match.group(1).lower()}", "doctor": {doctor_match.group(1).lower()}, "psychologist": {psychologist_match.group(1).lower()}}}'
|
| 245 |
+
json_candidates.append(constructed_json)
|
| 246 |
+
|
| 247 |
+
# Validate and return the first working JSON
|
| 248 |
+
for candidate in json_candidates:
|
| 249 |
+
try:
|
| 250 |
+
# Clean up the candidate
|
| 251 |
+
candidate = candidate.strip()
|
| 252 |
+
|
| 253 |
+
# Parse to validate structure
|
| 254 |
+
parsed = json.loads(candidate)
|
| 255 |
+
|
| 256 |
+
# Validate schema
|
| 257 |
+
if not isinstance(parsed, dict):
|
| 258 |
+
continue
|
| 259 |
+
|
| 260 |
+
required_keys = {"language", "doctor", "psychologist"}
|
| 261 |
+
if not required_keys.issubset(parsed.keys()):
|
| 262 |
+
continue
|
| 263 |
+
|
| 264 |
+
# Validate language field
|
| 265 |
+
if parsed["language"] not in ["english", "thai"]:
|
| 266 |
+
continue
|
| 267 |
+
|
| 268 |
+
# Validate boolean fields
|
| 269 |
+
if not isinstance(parsed["doctor"], bool) or not isinstance(parsed["psychologist"], bool):
|
| 270 |
+
continue
|
| 271 |
+
|
| 272 |
+
# If we reach here, the JSON is valid
|
| 273 |
+
self.logger.debug(f"Successfully validated JSON: {candidate}")
|
| 274 |
+
return candidate
|
| 275 |
+
|
| 276 |
+
except json.JSONDecodeError as e:
|
| 277 |
+
self.logger.debug(f"JSON parse failed for candidate: {candidate[:100]}... Error: {e}")
|
| 278 |
+
continue
|
| 279 |
+
except Exception as e:
|
| 280 |
+
self.logger.debug(f"Validation failed for candidate: {candidate[:100]}... Error: {e}")
|
| 281 |
+
continue
|
| 282 |
+
|
| 283 |
+
# If all strategies fail, return a safe default
|
| 284 |
+
self.logger.error("Could not extract valid JSON from response, using safe default")
|
| 285 |
+
return '{"language": "english", "doctor": false, "psychologist": false}'
|
| 286 |
+
|
| 287 |
+
def _clean_json_response(self, raw_content: str) -> str:
|
| 288 |
+
"""Legacy method - now delegates to enhanced extraction"""
|
| 289 |
+
return self._extract_and_validate_json(raw_content)
|
| 290 |
+
|
| 291 |
+
def _generate_feedback_sea_lion(self, messages: List[Dict[str, str]], show_thinking: bool = False) -> str:
|
| 292 |
+
# If API is disabled, return conservative fallback immediately
|
| 293 |
+
if not getattr(self, "api_enabled", False):
|
| 294 |
+
is_flag_task = any("Return ONLY a single JSON object" in msg.get("content", "")
|
| 295 |
+
for msg in messages if msg.get("role") == "system")
|
| 296 |
+
if is_flag_task:
|
| 297 |
+
return '{"language": "english", "doctor": false, "psychologist": false}'
|
| 298 |
+
return "ขออภัยค่ะ การเชื่อมต่อมีปัญหาชั่วคราว กรุณาลองใหม่อีกครั้งค่ะ (Sorry, there is a temporary connection problem. Please try again later.)"
|
| 299 |
+
|
| 300 |
+
try:
|
| 301 |
+
self.logger.debug(f"Sending {len(messages)} messages to SEA-Lion API")
|
| 302 |
+
|
| 303 |
+
headers = {
|
| 304 |
+
"Content-Type": "application/json"
|
| 305 |
+
}
|
| 306 |
+
if self.sea_lion_token:
|
| 307 |
+
headers["Authorization"] = f"Bearer {self.sea_lion_token}"
|
| 308 |
+
|
| 309 |
+
payload = {
|
| 310 |
+
"model": self.sea_lion_model,
|
| 311 |
+
"messages": messages,
|
| 312 |
+
"chat_template_kwargs": {
|
| 313 |
+
"thinking_mode": "off"
|
| 314 |
+
},
|
| 315 |
+
"max_tokens": 2000, # for thinking and answering
|
| 316 |
+
"temperature": 0.4,
|
| 317 |
+
"top_p": 0.9,
|
| 318 |
+
"frequency_penalty": 0.4, # prevent repetition
|
| 319 |
+
"presence_penalty": 0.1 # Encourage new topics
|
| 320 |
+
}
|
| 321 |
+
|
| 322 |
+
response = requests.post(
|
| 323 |
+
f"{self.sea_lion_base_url}/chat/completions",
|
| 324 |
+
headers=headers,
|
| 325 |
+
json=payload,
|
| 326 |
+
timeout=30
|
| 327 |
+
)
|
| 328 |
+
|
| 329 |
+
response.raise_for_status()
|
| 330 |
+
response_data = response.json()
|
| 331 |
+
|
| 332 |
+
# Check if response has expected structure
|
| 333 |
+
if "choices" not in response_data or len(response_data["choices"]) == 0:
|
| 334 |
+
self.logger.error(f"Unexpected response structure: {response_data}")
|
| 335 |
+
return f"Unexpected response structure: {response_data}. Please try again later."
|
| 336 |
+
|
| 337 |
+
choice = response_data["choices"][0]
|
| 338 |
+
if "message" not in choice or "content" not in choice["message"]:
|
| 339 |
+
self.logger.error(f"Unexpected message structure: {choice}")
|
| 340 |
+
return f"Unexpected message structure: {choice}. Please try again later."
|
| 341 |
+
|
| 342 |
+
raw_content = choice["message"]["content"]
|
| 343 |
+
|
| 344 |
+
# Check if response is None or empty
|
| 345 |
+
if raw_content is None:
|
| 346 |
+
self.logger.error("SEA-Lion API returned None content")
|
| 347 |
+
return "SEA-Lion API returned None content"
|
| 348 |
+
|
| 349 |
+
if isinstance(raw_content, str) and raw_content.strip() == "":
|
| 350 |
+
self.logger.error("SEA-Lion API returned empty content")
|
| 351 |
+
return "SEA-Lion API returned empty content"
|
| 352 |
+
|
| 353 |
+
is_flag_task = any("Return ONLY a single JSON object" in msg.get("content", "")
|
| 354 |
+
for msg in messages if msg.get("role") == "system")
|
| 355 |
+
|
| 356 |
+
if is_flag_task:
|
| 357 |
+
# Apply enhanced JSON extraction and validation for flag tasks
|
| 358 |
+
final_answer = self._extract_and_validate_json(raw_content)
|
| 359 |
+
self.logger.debug("Applied enhanced JSON extraction for flag task")
|
| 360 |
+
else:
|
| 361 |
+
# Handle thinking block for non-flag tasks
|
| 362 |
+
thinking_match = re.search(r"</think>", raw_content, re.DOTALL)
|
| 363 |
+
if thinking_match:
|
| 364 |
+
if show_thinking:
|
| 365 |
+
# Keep the thinking block visible
|
| 366 |
+
final_answer = raw_content.strip()
|
| 367 |
+
self.logger.debug("Thinking block found and kept visible")
|
| 368 |
+
else:
|
| 369 |
+
# Remove everything up to and including </think>
|
| 370 |
+
final_answer = re.sub(r".*?</think>\s*", "", raw_content, flags=re.DOTALL).strip()
|
| 371 |
+
self.logger.debug("Thinking block found and removed from response")
|
| 372 |
+
else:
|
| 373 |
+
self.logger.debug("No thinking block found in response")
|
| 374 |
+
final_answer = raw_content.strip()
|
| 375 |
+
|
| 376 |
+
# Log response information
|
| 377 |
+
self.logger.info(f"Received response from SEA-Lion API (raw length: {len(raw_content)} chars, final answer length: {len(final_answer)} chars)")
|
| 378 |
+
|
| 379 |
+
return final_answer
|
| 380 |
+
|
| 381 |
+
except requests.exceptions.RequestException as e:
|
| 382 |
+
status_code = getattr(getattr(e, "response", None), "status_code", None)
|
| 383 |
+
body_preview = None
|
| 384 |
+
if getattr(e, "response", None) is not None:
|
| 385 |
+
try:
|
| 386 |
+
body_preview = e.response.text[:500] # type: ignore
|
| 387 |
+
except Exception:
|
| 388 |
+
body_preview = "<unavailable>"
|
| 389 |
+
request_url = getattr(getattr(e, "request", None), "url", None)
|
| 390 |
+
self.logger.error(
|
| 391 |
+
"SEA-Lion request failed (%s) url=%s status=%s body=%s message=%s",
|
| 392 |
+
e.__class__.__name__,
|
| 393 |
+
request_url,
|
| 394 |
+
status_code,
|
| 395 |
+
body_preview,
|
| 396 |
+
str(e),
|
| 397 |
+
)
|
| 398 |
+
if status_code == 401:
|
| 399 |
+
self.logger.error(
|
| 400 |
+
"SEA-Lion authentication failed. mode=%s model=%s token_fp=%s base_url=%s",
|
| 401 |
+
getattr(self, "sea_lion_mode", "unknown"),
|
| 402 |
+
getattr(self, "sea_lion_model", "unknown"),
|
| 403 |
+
fingerprint_secret(getattr(self, "sea_lion_token", None)),
|
| 404 |
+
getattr(self, "sea_lion_base_url", "unknown"),
|
| 405 |
+
)
|
| 406 |
+
return "ขออภัยค่ะ เกิดปัญหาในการเชื่อมต่อ กรุณาลองใหม่อีกครั้งค่ะ (Sorry, there was a problem connecting. Please try again later.)"
|
| 407 |
+
except KeyError as e:
|
| 408 |
+
self.logger.error(f"Unexpected response format from SEA-Lion API: {str(e)}")
|
| 409 |
+
return "ขออภัยค่ะ เกิดข้อผิดพลาดในระบบ กรุณาลองใหม่อีกครั้งค่ะ (Sorry, there was an error in the system. Please try again.)"
|
| 410 |
+
except Exception as e:
|
| 411 |
+
self.logger.error(f"Error generating feedback from SEA-Lion API: {str(e)}")
|
| 412 |
+
return "ขออภัยค่ะ เกิดข้อผิดพลาดในระบบของ SEA-Lion กรุณาลองใหม่อีกครั้งค่ะ (Sorry, there was an error in the SEA-Lion system. Please try again later.)"
|
| 413 |
+
|
| 414 |
+
def generate_feedback(
|
| 415 |
+
self,
|
| 416 |
+
chat_history: List[Dict[str, str]],
|
| 417 |
+
user_message: str,
|
| 418 |
+
memory_context: str,
|
| 419 |
+
task: str,
|
| 420 |
+
summarized_histories: Optional[List] = None,
|
| 421 |
+
commentary: Optional[Dict[str, str]] = None
|
| 422 |
+
) -> str:
|
| 423 |
+
|
| 424 |
+
self.logger.info("Processing chatbot response request")
|
| 425 |
+
self.logger.debug(f"User message: {user_message}")
|
| 426 |
+
self.logger.debug(f"Health status: {memory_context}")
|
| 427 |
+
self.logger.debug(f"Chat history length: {len(chat_history)}")
|
| 428 |
+
|
| 429 |
+
try:
|
| 430 |
+
messages = self._format_chat_history_for_sea_lion(chat_history, user_message, memory_context, task, summarized_histories, commentary)
|
| 431 |
+
|
| 432 |
+
response = self._generate_feedback_sea_lion(messages)
|
| 433 |
+
if response is None:
|
| 434 |
+
raise Exception("SEA-Lion API returned None response")
|
| 435 |
+
return response
|
| 436 |
+
except Exception as e:
|
| 437 |
+
self.logger.error(f"Error in _generate_feedback: {str(e)}")
|
| 438 |
+
# Return a fallback response instead of None
|
| 439 |
+
if task == "flag":
|
| 440 |
+
# For flag tasks, return a valid JSON structure
|
| 441 |
+
return '{"language": "english", "doctor": false, "psychologist": false}'
|
| 442 |
+
else:
|
| 443 |
+
return "ขออภัยค่ะ ไม่สามารถเชื่่อมต่อกับ SEA-Lion ได้ในปัจจุบัน กรุณาลองใหม่อีกครั้งค่ะ (Sorry, we cannot connect to SEA-Lion. Please try again.)"
|
| 444 |
+
|
| 445 |
+
|
| 446 |
+
if __name__ == "__main__":
|
| 447 |
+
# Enhanced demo with JSON validation testing
|
| 448 |
+
try:
|
| 449 |
+
sup = SupervisorAgent(log_level=logging.DEBUG)
|
| 450 |
+
except Exception as e:
|
| 451 |
+
print(f"[BOOT ERROR] Unable to start SupervisorAgent: {e}")
|
| 452 |
+
raise SystemExit(1)
|
| 453 |
+
|
| 454 |
+
# Test cases for JSON validation
|
| 455 |
+
test_cases = [
|
| 456 |
+
{
|
| 457 |
+
"name": "Medical + Psychological",
|
| 458 |
+
"message": "I have a headache and feel anxious about my exams.",
|
| 459 |
+
"expected": {"doctor": True, "psychologist": True}
|
| 460 |
+
},
|
| 461 |
+
{
|
| 462 |
+
"name": "Thai Medical Only",
|
| 463 |
+
"message": "ปวดหัวมากครับ แล้วก็มีไข้ด้วย",
|
| 464 |
+
"expected": {"doctor": True, "psychologist": False, "language": "thai"}
|
| 465 |
+
},
|
| 466 |
+
{
|
| 467 |
+
"name": "Psychological Only",
|
| 468 |
+
"message": "I'm feeling very stressed and worried about my future.",
|
| 469 |
+
"expected": {"doctor": False, "psychologist": True}
|
| 470 |
+
}
|
| 471 |
+
]
|
| 472 |
+
|
| 473 |
+
chat_history = [
|
| 474 |
+
{"role": "user", "content": "Hi, I've been feeling tired lately."},
|
| 475 |
+
{"role": "assistant", "content": "Thanks for sharing. How's your sleep and stress?"}
|
| 476 |
+
]
|
| 477 |
+
memory_context = "User: 21 y/o student, midterm week, low sleep (4–5h), high caffeine, history of migraines."
|
| 478 |
+
|
| 479 |
+
print("=== TESTING ENHANCED JSON SUPERVISOR ===\n")
|
| 480 |
+
|
| 481 |
+
for i, test_case in enumerate(test_cases, 1):
|
| 482 |
+
print(f"Test {i}: {test_case['name']}")
|
| 483 |
+
print(f"Message: {test_case['message']}")
|
| 484 |
+
|
| 485 |
+
flag_output = sup.generate_feedback(
|
| 486 |
+
chat_history=chat_history,
|
| 487 |
+
user_message=test_case['message'],
|
| 488 |
+
memory_context=memory_context,
|
| 489 |
+
task="flag"
|
| 490 |
+
)
|
| 491 |
+
|
| 492 |
+
print(f"JSON Output: {flag_output}")
|
| 493 |
+
|
| 494 |
+
# Validate the JSON
|
| 495 |
+
try:
|
| 496 |
+
parsed = json.loads(flag_output)
|
| 497 |
+
print(f"Valid JSON structure")
|
| 498 |
+
print(f"Language: {parsed.get('language')}")
|
| 499 |
+
print(f"Doctor: {parsed.get('doctor')}")
|
| 500 |
+
print(f"Psychologist: {parsed.get('psychologist')}")
|
| 501 |
+
except json.JSONDecodeError as e:
|
| 502 |
+
print(f"Invalid JSON: {e}")
|
| 503 |
+
|
| 504 |
+
print("-" * 50)
|
| 505 |
+
|
| 506 |
+
# Test finalize task
|
| 507 |
+
print("\n=== TEST: FINALIZED RESPONSE ===")
|
| 508 |
+
commentary = {
|
| 509 |
+
"doctor": "Likely tension-type headache aggravated by stress and poor sleep. Suggest hydration, rest, OTC analgesic if not contraindicated.",
|
| 510 |
+
"psychologist": "Teach 4-7-8 breathing, short cognitive reframing for exam anxiety, and a 20-minute study-break cycle."
|
| 511 |
+
}
|
| 512 |
+
|
| 513 |
+
final_output = sup.generate_feedback(
|
| 514 |
+
chat_history=chat_history,
|
| 515 |
+
user_message="I have a headache and feel anxious about my exams.",
|
| 516 |
+
memory_context=memory_context,
|
| 517 |
+
task="finalize",
|
| 518 |
+
commentary=commentary
|
| 519 |
+
)
|
| 520 |
+
print(final_output)
|
src/kallam/domain/agents/translator.py
ADDED
|
@@ -0,0 +1,296 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import logging
|
| 2 |
+
import requests
|
| 3 |
+
import re
|
| 4 |
+
from pathlib import Path
|
| 5 |
+
from datetime import datetime
|
| 6 |
+
from typing import Dict, List, Optional
|
| 7 |
+
|
| 8 |
+
from dotenv import load_dotenv
|
| 9 |
+
load_dotenv()
|
| 10 |
+
|
| 11 |
+
from kallam.infrastructure.sea_lion import load_sea_lion_settings, fingerprint_secret
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
class TranslatorAgent:
|
| 15 |
+
"""
|
| 16 |
+
Handles Thai-English translation using SEA-Lion API.
|
| 17 |
+
"""
|
| 18 |
+
|
| 19 |
+
def __init__(self, log_level: int = logging.INFO):
|
| 20 |
+
self._setup_logging(log_level)
|
| 21 |
+
self._setup_api_client()
|
| 22 |
+
|
| 23 |
+
self.logger.info("Translator Agent initialized")
|
| 24 |
+
|
| 25 |
+
# Core configuration
|
| 26 |
+
self.supported_languages = {"thai", "english"}
|
| 27 |
+
self.default_language = "english"
|
| 28 |
+
|
| 29 |
+
def _setup_logging(self, log_level: int) -> None:
|
| 30 |
+
"""Setup logging configuration"""
|
| 31 |
+
log_dir = Path("logs")
|
| 32 |
+
log_dir.mkdir(exist_ok=True)
|
| 33 |
+
|
| 34 |
+
self.logger = logging.getLogger(f"{__name__}.TranslatorAgent")
|
| 35 |
+
self.logger.setLevel(log_level)
|
| 36 |
+
|
| 37 |
+
if self.logger.handlers:
|
| 38 |
+
self.logger.handlers.clear()
|
| 39 |
+
|
| 40 |
+
file_handler = logging.FileHandler(
|
| 41 |
+
log_dir / f"translator_{datetime.now().strftime('%Y%m%d')}.log",
|
| 42 |
+
encoding='utf-8'
|
| 43 |
+
)
|
| 44 |
+
file_handler.setLevel(logging.DEBUG)
|
| 45 |
+
|
| 46 |
+
console_handler = logging.StreamHandler()
|
| 47 |
+
console_handler.setLevel(log_level)
|
| 48 |
+
|
| 49 |
+
formatter = logging.Formatter(
|
| 50 |
+
'%(asctime)s - %(name)s - %(levelname)s - %(message)s'
|
| 51 |
+
)
|
| 52 |
+
file_handler.setFormatter(formatter)
|
| 53 |
+
console_handler.setFormatter(formatter)
|
| 54 |
+
|
| 55 |
+
self.logger.addHandler(file_handler)
|
| 56 |
+
self.logger.addHandler(console_handler)
|
| 57 |
+
|
| 58 |
+
def _setup_api_client(self) -> None:
|
| 59 |
+
"""Setup SEA-Lion API client; degrade gracefully if missing."""
|
| 60 |
+
settings = load_sea_lion_settings(default_model="aisingapore/Gemma-SEA-LION-v4-27B-IT")
|
| 61 |
+
self.api_url = settings.base_url
|
| 62 |
+
self.api_key = settings.token
|
| 63 |
+
self.model_id = settings.model
|
| 64 |
+
self.api_mode = settings.mode
|
| 65 |
+
# Enable/disable external API usage based on token presence
|
| 66 |
+
self.enabled = self.api_key is not None
|
| 67 |
+
|
| 68 |
+
if self.enabled:
|
| 69 |
+
self.logger.info(
|
| 70 |
+
"SEA-Lion API client initialized (mode=%s, base_url=%s, model=%s, token_fp=%s)",
|
| 71 |
+
self.api_mode,
|
| 72 |
+
self.api_url,
|
| 73 |
+
self.model_id,
|
| 74 |
+
fingerprint_secret(self.api_key),
|
| 75 |
+
)
|
| 76 |
+
else:
|
| 77 |
+
# Do NOT raise: allow app to start and operate in passthrough mode
|
| 78 |
+
self.logger.warning(
|
| 79 |
+
"SEA_LION credentials not set. Translator will run in passthrough mode (no external calls)."
|
| 80 |
+
)
|
| 81 |
+
|
| 82 |
+
def _call_api(self, text: str, target_language: str) -> str:
|
| 83 |
+
"""
|
| 84 |
+
Simple API call to SEA-Lion for translation
|
| 85 |
+
|
| 86 |
+
Args:
|
| 87 |
+
text: Text to translate
|
| 88 |
+
target_language: Target language ("thai" or "english")
|
| 89 |
+
|
| 90 |
+
Returns:
|
| 91 |
+
Translated text or original on error
|
| 92 |
+
"""
|
| 93 |
+
# Short-circuit if API disabled
|
| 94 |
+
if not getattr(self, "enabled", False):
|
| 95 |
+
return text
|
| 96 |
+
|
| 97 |
+
try:
|
| 98 |
+
# Build simple translation prompt
|
| 99 |
+
system_prompt = f"""
|
| 100 |
+
**Your Role:**
|
| 101 |
+
You are a translator for a chatbot which is used for medical and psychological help.
|
| 102 |
+
|
| 103 |
+
**Core Rules:**
|
| 104 |
+
- Translate the given text to {target_language}.
|
| 105 |
+
- Provide ONLY the translation without quotes or explanations.
|
| 106 |
+
- Maintain medical/psychological terminology accuracy.
|
| 107 |
+
- For Thai: use appropriate polite forms.
|
| 108 |
+
- For English: use clear, professional language."""
|
| 109 |
+
|
| 110 |
+
messages = [
|
| 111 |
+
{"role": "system", "content": system_prompt},
|
| 112 |
+
{"role": "user", "content": f"Translate: {text}"}
|
| 113 |
+
]
|
| 114 |
+
|
| 115 |
+
headers = {
|
| 116 |
+
"Content-Type": "application/json"
|
| 117 |
+
}
|
| 118 |
+
if self.api_key:
|
| 119 |
+
headers["Authorization"] = f"Bearer {self.api_key}"
|
| 120 |
+
|
| 121 |
+
payload = {
|
| 122 |
+
"model": self.model_id,
|
| 123 |
+
"messages": messages,
|
| 124 |
+
"chat_template_kwargs": {"thinking_mode": "off"},
|
| 125 |
+
"max_tokens": 2000,
|
| 126 |
+
"temperature": 0.1, # Low for consistency
|
| 127 |
+
"top_p": 0.9
|
| 128 |
+
}
|
| 129 |
+
|
| 130 |
+
response = requests.post(
|
| 131 |
+
f"{self.api_url}/chat/completions",
|
| 132 |
+
headers=headers,
|
| 133 |
+
json=payload,
|
| 134 |
+
timeout=30
|
| 135 |
+
)
|
| 136 |
+
|
| 137 |
+
response.raise_for_status()
|
| 138 |
+
data = response.json()
|
| 139 |
+
|
| 140 |
+
# Extract response
|
| 141 |
+
content = data["choices"][0]["message"]["content"]
|
| 142 |
+
|
| 143 |
+
# Remove thinking blocks if present
|
| 144 |
+
if "```thinking" in content and "```answer" in content:
|
| 145 |
+
match = re.search(r"```answer\s*(.*?)\s*```", content, re.DOTALL)
|
| 146 |
+
if match:
|
| 147 |
+
content = match.group(1).strip()
|
| 148 |
+
elif "</think>" in content:
|
| 149 |
+
content = re.sub(r".*?</think>\s*", "", content, flags=re.DOTALL).strip()
|
| 150 |
+
|
| 151 |
+
# Clean up quotes
|
| 152 |
+
content = content.strip()
|
| 153 |
+
if content.startswith('"') and content.endswith('"'):
|
| 154 |
+
content = content[1:-1]
|
| 155 |
+
|
| 156 |
+
return content if content else text
|
| 157 |
+
|
| 158 |
+
except requests.exceptions.RequestException as e:
|
| 159 |
+
status_code = getattr(getattr(e, 'response', None), 'status_code', None)
|
| 160 |
+
body_preview = None
|
| 161 |
+
if getattr(e, 'response', None) is not None:
|
| 162 |
+
try:
|
| 163 |
+
body_preview = e.response.text[:500] # type: ignore
|
| 164 |
+
except Exception:
|
| 165 |
+
body_preview = '<unavailable>'
|
| 166 |
+
request_url = getattr(getattr(e, 'request', None), 'url', None)
|
| 167 |
+
self.logger.error(
|
| 168 |
+
"SEA-Lion translation request failed (%s) url=%s status=%s body=%s message=%s",
|
| 169 |
+
e.__class__.__name__,
|
| 170 |
+
request_url,
|
| 171 |
+
status_code,
|
| 172 |
+
body_preview,
|
| 173 |
+
str(e),
|
| 174 |
+
)
|
| 175 |
+
if status_code == 401:
|
| 176 |
+
self.logger.error(
|
| 177 |
+
"SEA-Lion translation authentication failed. mode=%s model=%s token_fp=%s base_url=%s",
|
| 178 |
+
getattr(self, "api_mode", "unknown"),
|
| 179 |
+
getattr(self, "model_id", "unknown"),
|
| 180 |
+
fingerprint_secret(getattr(self, "api_key", None)),
|
| 181 |
+
getattr(self, "api_url", "unknown"),
|
| 182 |
+
)
|
| 183 |
+
return text
|
| 184 |
+
except Exception as e:
|
| 185 |
+
self.logger.error(f"Translation API error: {str(e)}")
|
| 186 |
+
return text # Return original on error
|
| 187 |
+
|
| 188 |
+
# ===== PUBLIC INTERFACE (Used by Orchestrator) =====
|
| 189 |
+
|
| 190 |
+
def get_translation(self, message: str, target: str) -> str:
|
| 191 |
+
"""
|
| 192 |
+
Main translation method used by orchestrator
|
| 193 |
+
"""
|
| 194 |
+
# Validate target
|
| 195 |
+
if target not in self.supported_languages:
|
| 196 |
+
self.logger.warning(f"Unsupported language '{target}', returning original")
|
| 197 |
+
return message
|
| 198 |
+
|
| 199 |
+
# Skip if empty
|
| 200 |
+
if not message or not message.strip():
|
| 201 |
+
return message
|
| 202 |
+
|
| 203 |
+
self.logger.debug(f"Translating to {target}: {message[:50]}...")
|
| 204 |
+
|
| 205 |
+
translated = self._call_api(message, target)
|
| 206 |
+
|
| 207 |
+
self.logger.debug(f"Translation complete: {translated[:50]}...")
|
| 208 |
+
|
| 209 |
+
return translated
|
| 210 |
+
|
| 211 |
+
def detect_language(self, message: str) -> str:
|
| 212 |
+
"""
|
| 213 |
+
Detect language of the message
|
| 214 |
+
"""
|
| 215 |
+
# Check for Thai characters
|
| 216 |
+
thai_chars = sum(1 for c in message if '\u0E00' <= c <= '\u0E7F')
|
| 217 |
+
total_chars = len(message.strip())
|
| 218 |
+
|
| 219 |
+
if total_chars == 0:
|
| 220 |
+
return "english"
|
| 221 |
+
|
| 222 |
+
thai_ratio = thai_chars / total_chars
|
| 223 |
+
|
| 224 |
+
# If more than 10% Thai characters, consider it Thai
|
| 225 |
+
if thai_ratio > 0.1:
|
| 226 |
+
return "thai"
|
| 227 |
+
else:
|
| 228 |
+
return "english"
|
| 229 |
+
|
| 230 |
+
def translate_forward(self, message: str, source_language: str) -> str:
|
| 231 |
+
"""
|
| 232 |
+
Forward translation to English (for agent processing)
|
| 233 |
+
|
| 234 |
+
Args:
|
| 235 |
+
message: Text in source language
|
| 236 |
+
source_language: Source language
|
| 237 |
+
|
| 238 |
+
Returns:
|
| 239 |
+
English text
|
| 240 |
+
"""
|
| 241 |
+
if source_language == "english":
|
| 242 |
+
return message
|
| 243 |
+
|
| 244 |
+
return self.get_translation(message, "english")
|
| 245 |
+
|
| 246 |
+
def translate_backward(self, message: str, target_language: str) -> str:
|
| 247 |
+
"""
|
| 248 |
+
Backward translation from English to user's language
|
| 249 |
+
|
| 250 |
+
Args:
|
| 251 |
+
message: English text
|
| 252 |
+
target_language: User's language
|
| 253 |
+
|
| 254 |
+
Returns:
|
| 255 |
+
Text in user's language
|
| 256 |
+
"""
|
| 257 |
+
if target_language == "english":
|
| 258 |
+
return message
|
| 259 |
+
|
| 260 |
+
return self.get_translation(message, target_language)
|
| 261 |
+
|
| 262 |
+
|
| 263 |
+
if __name__ == "__main__":
|
| 264 |
+
# Simple test
|
| 265 |
+
try:
|
| 266 |
+
translator = TranslatorAgent(log_level=logging.DEBUG)
|
| 267 |
+
|
| 268 |
+
print("=== SIMPLIFIED TRANSLATOR TEST ===\n")
|
| 269 |
+
|
| 270 |
+
# Test 1: Basic translations
|
| 271 |
+
test_cases = [
|
| 272 |
+
("ฉันปวดหัวมาก", "english"),
|
| 273 |
+
("I have a headache", "thai"),
|
| 274 |
+
("รู้สึกเครียดและกังวล", "english"),
|
| 275 |
+
("Feeling anxious about my health", "thai")
|
| 276 |
+
]
|
| 277 |
+
|
| 278 |
+
for text, target in test_cases:
|
| 279 |
+
result = translator.get_translation(text, target)
|
| 280 |
+
print(f"{text[:30]:30} -> {target:7} -> {result}")
|
| 281 |
+
|
| 282 |
+
# Test 2: Language detection
|
| 283 |
+
print("\n=== Language Detection ===")
|
| 284 |
+
texts = [
|
| 285 |
+
"สวัสดีครับ",
|
| 286 |
+
"Hello there",
|
| 287 |
+
"ผมชื่อ John",
|
| 288 |
+
"I'm feeling ดีมาก"
|
| 289 |
+
]
|
| 290 |
+
|
| 291 |
+
for text in texts:
|
| 292 |
+
detected = translator.detect_language(text)
|
| 293 |
+
print(f"{text:20} -> {detected}")
|
| 294 |
+
|
| 295 |
+
except Exception as e:
|
| 296 |
+
print(f"Test error: {e}")
|
src/kallam/infra/__init__.py
ADDED
|
File without changes
|
src/kallam/infra/__pycache__/__init__.cpython-311.pyc
ADDED
|
Binary file (192 Bytes). View file
|
|
|
src/kallam/infra/__pycache__/db.cpython-311.pyc
ADDED
|
Binary file (1.23 kB). View file
|
|
|
src/kallam/infra/__pycache__/exporter.cpython-311.pyc
ADDED
|
Binary file (5.13 kB). View file
|
|
|
src/kallam/infra/__pycache__/message_store.cpython-311.pyc
ADDED
|
Binary file (9.77 kB). View file
|
|
|
src/kallam/infra/__pycache__/session_store.cpython-311.pyc
ADDED
|
Binary file (8.52 kB). View file
|
|
|
src/kallam/infra/__pycache__/summary_store.cpython-311.pyc
ADDED
|
Binary file (2.74 kB). View file
|
|
|
src/kallam/infra/__pycache__/token_counter.cpython-311.pyc
ADDED
|
Binary file (1.47 kB). View file
|
|
|
src/kallam/infra/db.py
ADDED
|
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# infra/db.py
|
| 2 |
+
from contextlib import contextmanager
|
| 3 |
+
import sqlite3
|
| 4 |
+
|
| 5 |
+
@contextmanager
|
| 6 |
+
def sqlite_conn(path: str):
|
| 7 |
+
conn = sqlite3.connect(path, timeout=30.0, check_same_thread=False)
|
| 8 |
+
try:
|
| 9 |
+
conn.execute("PRAGMA foreign_keys = ON")
|
| 10 |
+
conn.execute("PRAGMA journal_mode = WAL")
|
| 11 |
+
conn.row_factory = sqlite3.Row
|
| 12 |
+
yield conn
|
| 13 |
+
conn.commit()
|
| 14 |
+
except Exception:
|
| 15 |
+
conn.rollback()
|
| 16 |
+
raise
|
| 17 |
+
finally:
|
| 18 |
+
conn.close()
|
src/kallam/infra/exporter.py
ADDED
|
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# infra/exporter.py
|
| 2 |
+
import json
|
| 3 |
+
from pathlib import Path
|
| 4 |
+
from kallam.infra.db import sqlite_conn
|
| 5 |
+
|
| 6 |
+
class JsonExporter:
|
| 7 |
+
def __init__(self, db_path: str, out_dir: str = "exported_sessions"):
|
| 8 |
+
self.db_path = db_path.replace("sqlite:///", "")
|
| 9 |
+
self.out_dir = Path(out_dir); self.out_dir.mkdir(parents=True, exist_ok=True)
|
| 10 |
+
|
| 11 |
+
def export_session_json(self, session_id: str) -> str:
|
| 12 |
+
with sqlite_conn(self.db_path) as c:
|
| 13 |
+
s = c.execute("select * from sessions where session_id=?", (session_id,)).fetchone()
|
| 14 |
+
if not s: raise ValueError(f"Session {session_id} does not exist")
|
| 15 |
+
msgs = [dict(r) for r in c.execute("select * from messages where session_id=? order by id", (session_id,))]
|
| 16 |
+
sums = [dict(r) for r in c.execute("select * from summaries where session_id=? order by id", (session_id,))]
|
| 17 |
+
data = {"session_info": dict(s), "summaries": sums, "chat_history": msgs,
|
| 18 |
+
"export_metadata": {"exported_at_version": "2.1"}}
|
| 19 |
+
out = self.out_dir / f"{session_id}.json"
|
| 20 |
+
out.write_text(json.dumps(data, ensure_ascii=False, indent=2), encoding="utf-8")
|
| 21 |
+
return str(out)
|
| 22 |
+
|
| 23 |
+
def export_all_sessions_json(self) -> str:
|
| 24 |
+
"""Export *all* sessions into a single JSON file."""
|
| 25 |
+
all_data = []
|
| 26 |
+
with sqlite_conn(self.db_path) as c:
|
| 27 |
+
session_ids = [row["session_id"] for row in c.execute("select session_id from sessions")]
|
| 28 |
+
for sid in session_ids:
|
| 29 |
+
s = c.execute("select * from sessions where session_id=?", (sid,)).fetchone()
|
| 30 |
+
msgs = [dict(r) for r in c.execute(
|
| 31 |
+
"select * from messages where session_id=? order by id", (sid,)
|
| 32 |
+
)]
|
| 33 |
+
sums = [dict(r) for r in c.execute(
|
| 34 |
+
"select * from summaries where session_id=? order by id", (sid,)
|
| 35 |
+
)]
|
| 36 |
+
data = {
|
| 37 |
+
"session_info": dict(s),
|
| 38 |
+
"summaries": sums,
|
| 39 |
+
"chat_history": msgs,
|
| 40 |
+
"export_metadata": {"exported_at_version": "2.1"},
|
| 41 |
+
}
|
| 42 |
+
all_data.append(data)
|
| 43 |
+
|
| 44 |
+
out = self.out_dir / "all_sessions.json"
|
| 45 |
+
out.write_text(json.dumps(all_data, ensure_ascii=False, indent=2), encoding="utf-8")
|
| 46 |
+
return str(out)
|
src/kallam/infra/message_store.py
ADDED
|
@@ -0,0 +1,120 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# infra/message_store.py
|
| 2 |
+
from typing import Any, Dict, List, Tuple, Optional
|
| 3 |
+
import json
|
| 4 |
+
from kallam.infra.db import sqlite_conn
|
| 5 |
+
from datetime import datetime
|
| 6 |
+
import uuid
|
| 7 |
+
|
| 8 |
+
class MessageStore:
|
| 9 |
+
def __init__(self, db_path: str):
|
| 10 |
+
self.db_path = db_path.replace("sqlite:///", "")
|
| 11 |
+
|
| 12 |
+
def get_original_history(self, session_id: str, limit: int = 10) -> List[Dict[str, str]]:
|
| 13 |
+
with sqlite_conn(self.db_path) as c:
|
| 14 |
+
rows = c.execute("""
|
| 15 |
+
select role, content as content
|
| 16 |
+
from messages where session_id=? and role in ('user','assistant')
|
| 17 |
+
order by id desc limit ?""", (session_id, limit)).fetchall()
|
| 18 |
+
return [{"role": r["role"], "content": r["content"]} for r in reversed(rows)]
|
| 19 |
+
|
| 20 |
+
def get_translated_history(self, session_id: str, limit: int = 10) -> List[Dict[str, str]]:
|
| 21 |
+
with sqlite_conn(self.db_path) as c:
|
| 22 |
+
rows = c.execute("""
|
| 23 |
+
select role, coalesce(translated_content, content) as content
|
| 24 |
+
from messages where session_id=? and role in ('user','assistant')
|
| 25 |
+
order by id desc limit ?""", (session_id, limit)).fetchall()
|
| 26 |
+
return [{"role": r["role"], "content": r["content"]} for r in reversed(rows)]
|
| 27 |
+
|
| 28 |
+
def get_reasoning_traces(self, session_id: str, limit: int = 10) -> List[Dict[str, Any]]:
|
| 29 |
+
with sqlite_conn(self.db_path) as c:
|
| 30 |
+
rows = c.execute("""
|
| 31 |
+
select message_id, chain_of_thoughts from messages
|
| 32 |
+
where session_id=? and chain_of_thoughts is not null
|
| 33 |
+
order by id desc limit ?""", (session_id, limit)).fetchall()
|
| 34 |
+
out = []
|
| 35 |
+
for r in rows:
|
| 36 |
+
try:
|
| 37 |
+
out.append({"message_id": r["message_id"], "contents": json.loads(r["chain_of_thoughts"])})
|
| 38 |
+
except json.JSONDecodeError:
|
| 39 |
+
continue
|
| 40 |
+
return out
|
| 41 |
+
|
| 42 |
+
def append_user(self, session_id: str, content: str, translated: str | None,
|
| 43 |
+
flags: Dict[str, Any] | None, tokens_in: int) -> None:
|
| 44 |
+
self._append(session_id, "user", content, translated, None, None, flags, tokens_in, 0)
|
| 45 |
+
|
| 46 |
+
def append_assistant(self, session_id: str, content: str, translated: str | None,
|
| 47 |
+
reasoning: Dict[str, Any] | None, tokens_out: int) -> None:
|
| 48 |
+
self._append(session_id, "assistant", content, translated, reasoning, None, None, 0, tokens_out)
|
| 49 |
+
|
| 50 |
+
def _append(self, session_id, role, content, translated, reasoning, latency_ms, flags, tok_in, tok_out):
|
| 51 |
+
message_id = f"MSG-{uuid.uuid4().hex[:8].upper()}"
|
| 52 |
+
now = datetime.now().isoformat()
|
| 53 |
+
with sqlite_conn(self.db_path) as c:
|
| 54 |
+
c.execute("""insert into messages (session_id,message_id,timestamp,role,content,
|
| 55 |
+
translated_content,chain_of_thoughts,tokens_input,tokens_output,latency_ms,flags)
|
| 56 |
+
values (?,?,?,?,?,?,?,?,?,?,?)""",
|
| 57 |
+
(session_id, message_id, now, role, content,
|
| 58 |
+
translated, json.dumps(reasoning, ensure_ascii=False) if reasoning else None,
|
| 59 |
+
tok_in, tok_out, latency_ms, json.dumps(flags, ensure_ascii=False) if flags else None))
|
| 60 |
+
if role == "user":
|
| 61 |
+
c.execute("""update sessions set total_messages=total_messages+1,
|
| 62 |
+
total_user_messages=coalesce(total_user_messages,0)+1,
|
| 63 |
+
last_activity=? where session_id=?""", (now, session_id))
|
| 64 |
+
elif role == "assistant":
|
| 65 |
+
c.execute("""update sessions set total_messages=total_messages+1,
|
| 66 |
+
total_assistant_messages=coalesce(total_assistant_messages,0)+1,
|
| 67 |
+
last_activity=? where session_id=?""", (now, session_id))
|
| 68 |
+
|
| 69 |
+
def aggregate_stats(self, session_id: str) -> Tuple[Dict[str, Any], Dict[str, Any]]:
|
| 70 |
+
"""
|
| 71 |
+
Returns:
|
| 72 |
+
stats: {
|
| 73 |
+
"message_count": int,
|
| 74 |
+
"total_tokens_in": int,
|
| 75 |
+
"total_tokens_out": int,
|
| 76 |
+
"avg_latency": float | None,
|
| 77 |
+
"first_message": str | None, # ISO timestamp
|
| 78 |
+
"last_message": str | None, # ISO timestamp
|
| 79 |
+
}
|
| 80 |
+
session: dict # full row from sessions table (as a mapping)
|
| 81 |
+
"""
|
| 82 |
+
with sqlite_conn(self.db_path) as c:
|
| 83 |
+
# Roll up message stats
|
| 84 |
+
row = c.execute(
|
| 85 |
+
"""
|
| 86 |
+
SELECT
|
| 87 |
+
COUNT(*) AS message_count,
|
| 88 |
+
COALESCE(SUM(tokens_input), 0) AS total_tokens_in,
|
| 89 |
+
COALESCE(SUM(tokens_output), 0) AS total_tokens_out,
|
| 90 |
+
AVG(CASE WHEN role='assistant' THEN latency_ms END) AS avg_latency,
|
| 91 |
+
MIN(timestamp) AS first_message,
|
| 92 |
+
MAX(timestamp) AS last_message
|
| 93 |
+
FROM messages
|
| 94 |
+
WHERE session_id = ?
|
| 95 |
+
AND role IN ('user','assistant')
|
| 96 |
+
""",
|
| 97 |
+
(session_id,),
|
| 98 |
+
).fetchone()
|
| 99 |
+
|
| 100 |
+
stats = {
|
| 101 |
+
"message_count": row["message_count"] or 0,
|
| 102 |
+
"total_tokens_in": row["total_tokens_in"] or 0,
|
| 103 |
+
"total_tokens_out": row["total_tokens_out"] or 0,
|
| 104 |
+
# Normalize avg_latency to float if not None
|
| 105 |
+
"avg_latency": float(row["avg_latency"]) if row["avg_latency"] is not None else None,
|
| 106 |
+
"first_message": row["first_message"],
|
| 107 |
+
"last_message": row["last_message"],
|
| 108 |
+
}
|
| 109 |
+
|
| 110 |
+
# Fetch session info (entire row)
|
| 111 |
+
session = c.execute(
|
| 112 |
+
"SELECT * FROM sessions WHERE session_id = ?",
|
| 113 |
+
(session_id,),
|
| 114 |
+
).fetchone() or {}
|
| 115 |
+
|
| 116 |
+
# Convert sqlite Row to plain dict if needed
|
| 117 |
+
if hasattr(session, "keys"):
|
| 118 |
+
session = {k: session[k] for k in session.keys()}
|
| 119 |
+
|
| 120 |
+
return stats, session
|
src/kallam/infra/session_store.py
ADDED
|
@@ -0,0 +1,122 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# infra/session_store.py
|
| 2 |
+
from __future__ import annotations
|
| 3 |
+
|
| 4 |
+
from dataclasses import dataclass
|
| 5 |
+
from datetime import datetime
|
| 6 |
+
from typing import Optional, Dict, Any, List
|
| 7 |
+
from kallam.infra.db import sqlite_conn
|
| 8 |
+
import uuid
|
| 9 |
+
|
| 10 |
+
@dataclass
|
| 11 |
+
class SessionMeta:
|
| 12 |
+
session_id: str
|
| 13 |
+
total_user_messages: int
|
| 14 |
+
|
| 15 |
+
class SessionStore:
|
| 16 |
+
"""
|
| 17 |
+
Storage facade expected by ChatbotManager.
|
| 18 |
+
Accepts either 'sqlite:///path/to.db' or 'path/to.db' and normalizes to file path.
|
| 19 |
+
"""
|
| 20 |
+
def __init__(self, db_path: str):
|
| 21 |
+
# ChatbotManager passes f"sqlite:///{Path(...)}"
|
| 22 |
+
self.db_path = db_path.replace("sqlite:///", "")
|
| 23 |
+
|
| 24 |
+
# ----------------- create -----------------
|
| 25 |
+
def create(self, saved_memories: Optional[str]) -> str:
|
| 26 |
+
sid = f"ID-{uuid.uuid4().hex[:8].upper()}"
|
| 27 |
+
now = datetime.now().isoformat()
|
| 28 |
+
with sqlite_conn(self.db_path) as c:
|
| 29 |
+
c.execute(
|
| 30 |
+
"""
|
| 31 |
+
INSERT INTO sessions (
|
| 32 |
+
session_id, timestamp, last_activity, saved_memories,
|
| 33 |
+
total_messages, total_user_messages, total_assistant_messages,
|
| 34 |
+
total_summaries, is_active
|
| 35 |
+
)
|
| 36 |
+
VALUES (?, ?, ?, ?, 0, 0, 0, 0, 1)
|
| 37 |
+
""",
|
| 38 |
+
(sid, now, now, saved_memories),
|
| 39 |
+
)
|
| 40 |
+
return sid
|
| 41 |
+
|
| 42 |
+
# ----------------- read (typed) -----------------
|
| 43 |
+
def get(self, session_id: str) -> Optional[SessionMeta]:
|
| 44 |
+
with sqlite_conn(self.db_path) as c:
|
| 45 |
+
r = c.execute(
|
| 46 |
+
"SELECT * FROM sessions WHERE session_id = ?",
|
| 47 |
+
(session_id,),
|
| 48 |
+
).fetchone()
|
| 49 |
+
if not r:
|
| 50 |
+
return None
|
| 51 |
+
return SessionMeta(
|
| 52 |
+
session_id=r["session_id"],
|
| 53 |
+
total_user_messages=r["total_user_messages"] or 0,
|
| 54 |
+
)
|
| 55 |
+
|
| 56 |
+
# ----------------- read (raw dict) -----------------
|
| 57 |
+
def get_raw(self, session_id: str) -> Optional[Dict[str, Any]]:
|
| 58 |
+
with sqlite_conn(self.db_path) as c:
|
| 59 |
+
r = c.execute(
|
| 60 |
+
"SELECT * FROM sessions WHERE session_id = ?",
|
| 61 |
+
(session_id,),
|
| 62 |
+
).fetchone()
|
| 63 |
+
return dict(r) if r else None
|
| 64 |
+
|
| 65 |
+
# ----------------- meta subset for manager -----------------
|
| 66 |
+
def get_meta(self, session_id: str) -> Optional[Dict[str, Any]]:
|
| 67 |
+
with sqlite_conn(self.db_path) as c:
|
| 68 |
+
r = c.execute(
|
| 69 |
+
"""
|
| 70 |
+
SELECT session_id, total_messages, total_user_messages, saved_memories,
|
| 71 |
+
total_assistant_messages, total_summaries, last_activity, is_active
|
| 72 |
+
FROM sessions
|
| 73 |
+
WHERE session_id = ?
|
| 74 |
+
""",
|
| 75 |
+
(session_id,),
|
| 76 |
+
).fetchone()
|
| 77 |
+
return dict(r) if r else None
|
| 78 |
+
|
| 79 |
+
# ----------------- list -----------------
|
| 80 |
+
def list(self, active_only: bool = True, limit: int = 50) -> List[Dict[str, Any]]:
|
| 81 |
+
sql = "SELECT * FROM sessions"
|
| 82 |
+
params: List[Any] = []
|
| 83 |
+
if active_only:
|
| 84 |
+
sql += " WHERE is_active = 1"
|
| 85 |
+
sql += " ORDER BY last_activity DESC LIMIT ?"
|
| 86 |
+
params.append(limit)
|
| 87 |
+
with sqlite_conn(self.db_path) as c:
|
| 88 |
+
rows = c.execute(sql, params).fetchall()
|
| 89 |
+
return [dict(r) for r in rows]
|
| 90 |
+
|
| 91 |
+
# ----------------- close -----------------
|
| 92 |
+
def close(self, session_id: str) -> bool:
|
| 93 |
+
with sqlite_conn(self.db_path) as c:
|
| 94 |
+
res = c.execute(
|
| 95 |
+
"UPDATE sessions SET is_active = 0, last_activity = ? WHERE session_id = ?",
|
| 96 |
+
(datetime.now().isoformat(), session_id),
|
| 97 |
+
)
|
| 98 |
+
return res.rowcount > 0
|
| 99 |
+
|
| 100 |
+
# ----------------- delete -----------------
|
| 101 |
+
def delete(self, session_id: str) -> bool:
|
| 102 |
+
# messages/summaries are ON DELETE CASCADE according to your schema
|
| 103 |
+
with sqlite_conn(self.db_path) as c:
|
| 104 |
+
res = c.execute("DELETE FROM sessions WHERE session_id = ?", (session_id,))
|
| 105 |
+
return res.rowcount > 0
|
| 106 |
+
|
| 107 |
+
# ----------------- cleanup -----------------
|
| 108 |
+
def cleanup_before(self, cutoff_iso: str) -> int:
|
| 109 |
+
with sqlite_conn(self.db_path) as c:
|
| 110 |
+
res = c.execute(
|
| 111 |
+
"DELETE FROM sessions WHERE last_activity < ?",
|
| 112 |
+
(cutoff_iso,),
|
| 113 |
+
)
|
| 114 |
+
return res.rowcount
|
| 115 |
+
|
| 116 |
+
# Optional: utility to bump last_activity
|
| 117 |
+
def touch(self, session_id: str) -> None:
|
| 118 |
+
with sqlite_conn(self.db_path) as c:
|
| 119 |
+
c.execute(
|
| 120 |
+
"UPDATE sessions SET last_activity = ? WHERE session_id = ?",
|
| 121 |
+
(datetime.now().isoformat(), session_id),
|
| 122 |
+
)
|
src/kallam/infra/summary_store.py
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# infra/summary_store.py
|
| 2 |
+
from datetime import datetime
|
| 3 |
+
from kallam.infra.db import sqlite_conn
|
| 4 |
+
|
| 5 |
+
class SummaryStore:
|
| 6 |
+
def __init__(self, db_path: str): self.db_path = db_path.replace("sqlite:///", "")
|
| 7 |
+
|
| 8 |
+
def list(self, session_id: str, limit: int | None = None):
|
| 9 |
+
q = "select timestamp, summary from summaries where session_id=? order by id desc"
|
| 10 |
+
params = [session_id]
|
| 11 |
+
if limit: q += " limit ?"; params.append(limit)
|
| 12 |
+
with sqlite_conn(self.db_path) as c:
|
| 13 |
+
rows = c.execute(q, params).fetchall()
|
| 14 |
+
return [{"timestamp": r["timestamp"], "summary": r["summary"]} for r in rows]
|
| 15 |
+
|
| 16 |
+
def add(self, session_id: str, summary: str):
|
| 17 |
+
now = datetime.now().isoformat()
|
| 18 |
+
with sqlite_conn(self.db_path) as c:
|
| 19 |
+
c.execute("insert into summaries (session_id, timestamp, summary) values (?,?,?)",
|
| 20 |
+
(session_id, now, summary))
|
| 21 |
+
c.execute("update sessions set total_summaries = total_summaries + 1, last_activity=? where session_id=?",
|
| 22 |
+
(now, session_id))
|