m97j commited on
Commit
5fc69e4
ยท
0 Parent(s):

Initial commit

Browse files
.dockerignore ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ __pycache__/
2
+ *.pyc
3
+ *.pyo
4
+ *.pyd
5
+ *.db
6
+ *.sqlite3
7
+ .env
8
+ .git
9
+ .gitignore
10
+ .DS_Store
11
+ .vscode/
12
+ .idea/
13
+ tests/
14
+ chroma_db/
.gitignore ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ํŒŒ์ด์ฌ ์บ์‹œ
2
+ __pycache__/
3
+ *.pyc
4
+ *.pyo
5
+ *.pyd
6
+
7
+ # ๊ฐ€์ƒํ™˜๊ฒฝ
8
+ venv/
9
+ .env
10
+
11
+ # ๋กœ์ปฌ DB / ์บ์‹œ
12
+ chroma_db/
13
+ *.sqlite3
14
+
15
+ # IDE / OS
16
+ .vscode/
17
+ .idea/
18
+ .DS_Store/
19
+
20
+ # models/ ํ•˜์œ„ ๋””๋ ‰ํ† ๋ฆฌ ํฌํ•จ, ๋ชจ๋ธ ํŒŒ์ผ๋“ค์€ ๋ฌด์‹œ
21
+ models/emotion-classification-model/*
22
+ !models/emotion-classification-model/.gitkeep
23
+
24
+ models/fallback-npc-model/*
25
+ !models/fallback-npc-model/.gitkeep
26
+
27
+ models/sentence-embedder/*
28
+ !models/sentence-embedder/.gitkeep
29
+
Dockerfile ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ----------- ๋ฒ ์ด์Šค ์ด๋ฏธ์ง€ -----------
2
+ FROM python:3.10-slim
3
+
4
+ # ----------- ์ž‘์—… ๋””๋ ‰ํ† ๋ฆฌ -----------
5
+ WORKDIR /app
6
+
7
+ # ----------- ์‹œ์Šคํ…œ ํŒจํ‚ค์ง€ ์„ค์น˜ -----------
8
+ RUN apt-get update && apt-get install -y --no-install-recommends \
9
+ build-essential \
10
+ cmake \
11
+ && rm -rf /var/lib/apt/lists/*
12
+
13
+ # ----------- ์˜์กด์„ฑ ์„ค์น˜ -----------
14
+ # requirements ๋จผ์ € ๋ณต์‚ฌ โ†’ ์บ์‹œ ํ™œ์šฉ ๊ฐ€๋Šฅ
15
+ COPY requirements.txt /app/requirements.txt
16
+
17
+ # PyTorch CPU ๋ฒ„์ „ ์„ค์น˜ (GPU ํ•„์š” ์—†์Œ)
18
+ RUN pip install --no-cache-dir torch==2.0.1+cpu -f https://download.pytorch.org/whl/cpu/torch_stable.html \
19
+ && pip install --no-cache-dir -r /app/requirements.txt
20
+
21
+ # ----------- ์ฝ”๋“œ ๋ณต์‚ฌ -----------
22
+ # ์˜์กด์„ฑ ์„ค์น˜ ํ›„ ์ฝ”๋“œ ๋ณต์‚ฌ โ†’ requirements ๋ณ€๊ฒฝ ์—†๋Š” ํ•œ ์บ์‹œ ์žฌ์‚ฌ์šฉ ๊ฐ€๋Šฅ
23
+ COPY . /app/
24
+
25
+ # ----------- ๋ชจ๋ธ ๋‹ค์šด๋กœ๋“œ -----------
26
+ # transformers ๋ชจ๋ธ ๋‹ค์šด๋กœ๋“œ ํ›„ ์ด๋ฏธ์ง€์— ํฌํ•จ
27
+ RUN python -c "from transformers import AutoTokenizer, AutoModel; \
28
+ models = { \
29
+ 'emotion': ('tae898/emoberta-base-ko', './models/emotion-classification-model'), \
30
+ 'fallback': ('skt/ko-gpt-trinity-1.2B-v0.5', './models/fallback-npc-model'), \
31
+ 'embedder': ('sentence-transformers/paraphrase-multilingual-MiniLM-L12-v2', './models/sentence-embedder') \
32
+ }; \
33
+ import os; \
34
+ os.makedirs('./models/emotion-classification-model', exist_ok=True); \
35
+ os.makedirs('./models/fallback-npc-model', exist_ok=True); \
36
+ os.makedirs('./models/sentence-embedder', exist_ok=True); \
37
+ for key, (name, path) in models.items(): \
38
+ AutoModel.from_pretrained(name, cache_dir=path); \
39
+ AutoTokenizer.from_pretrained(name, cache_dir=path)"
40
+
41
+ # ----------- ํ™˜๊ฒฝ ๋ณ€์ˆ˜ ์„ค์ • -----------
42
+ ENV EMOTION_MODEL_DIR=/app/models/emotion-classification-model
43
+ ENV FALLBACK_MODEL_DIR=/app/models/fallback-npc-model
44
+ ENV EMBEDDER_MODEL_DIR=/app/models/sentence-embedder
45
+
46
+ # ----------- ํฌํŠธ ์„ค์ • -----------
47
+ EXPOSE 8000
48
+
49
+ # ----------- ์‹คํ–‰ ๋ช…๋ น -----------
50
+ CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "${PORT:-8000}"]
__init__.py ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # __init__.py
2
+
3
+ # This file ensures the directory is treated as a Python package.
4
+ # Required for relative imports and consistent behavior across environments.
app.py ADDED
@@ -0,0 +1,246 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from fastapi import FastAPI, Request, HTTPException
3
+ from fastapi.middleware.cors import CORSMiddleware
4
+ from manager.dialogue_manager import handle_dialogue
5
+ from rag.rag_generator import chroma_initialized, load_game_docs_from_disk, add_docs
6
+ from contextlib import asynccontextmanager
7
+ from models.model_loader import load_emotion_model, load_fallback_model, load_embedder
8
+ from schemas import AskReq, AskRes
9
+ from pathlib import Path
10
+ from rag.rag_generator import set_embedder
11
+
12
+ # ๋ชจ๋ธ ์ด๋ฆ„
13
+ EMOTION_MODEL_NAME = "tae898/emoberta-base-ko"
14
+ FALLBACK_MODEL_NAME = "skt/ko-gpt-trinity-1.2B-v0.5"
15
+ EMBEDDER_MODEL_NAME = "sentence-transformers/paraphrase-multilingual-MiniLM-L12-v2"
16
+
17
+ # ์ ˆ๋Œ€ ๊ฒฝ๋กœ ๊ธฐ์ค€ ๋ชจ๋ธ ๋””๋ ‰ํ† ๋ฆฌ ์„ค์ •
18
+ BASE_DIR = Path(__file__).resolve().parent # ai_server/
19
+ EMOTION_MODEL_DIR = Path(os.getenv("EMOTION_MODEL_DIR", BASE_DIR / "models" / "emotion-classification-model"))
20
+ FALLBACK_MODEL_DIR = Path(os.getenv("FALLBACK_MODEL_DIR", BASE_DIR / "models" / "fallback-npc-model"))
21
+ EMBEDDER_MODEL_DIR = Path(os.getenv("EMBEDDER_MODEL_DIR", BASE_DIR / "models" / "sentence-embedder"))
22
+
23
+
24
+ @asynccontextmanager
25
+ async def lifespan(app: FastAPI):
26
+ # Emotion
27
+ emo_tokenizer, emo_model = load_emotion_model(EMOTION_MODEL_NAME, EMOTION_MODEL_DIR)
28
+ app.state.emotion_tokenizer = emo_tokenizer
29
+ app.state.emotion_model = emo_model
30
+
31
+ # Fallback
32
+ fb_tokenizer, fb_model = load_fallback_model(FALLBACK_MODEL_NAME, FALLBACK_MODEL_DIR)
33
+ app.state.fallback_tokenizer = fb_tokenizer
34
+ app.state.fallback_model = fb_model
35
+
36
+ # Embedder
37
+ embedder = load_embedder(EMBEDDER_MODEL_NAME, EMBEDDER_MODEL_DIR)
38
+ app.state.embedder = embedder
39
+ set_embedder(embedder) # ์ถ”๊ฐ€
40
+
41
+ print("โœ… ๋ชจ๋“  ๋ชจ๋ธ ๋กœ๋”ฉ ์™„๋ฃŒ")
42
+
43
+ # RAG ์ดˆ๊ธฐํ™”
44
+ docs_path = BASE_DIR / "rag" / "docs"
45
+ if not chroma_initialized():
46
+ docs = load_game_docs_from_disk(str(docs_path))
47
+ add_docs(docs)
48
+ print(f"โœ… RAG ๋ฌธ์„œ {len(docs)}๊ฐœ ์‚ฝ์ž… ์™„๋ฃŒ")
49
+ else:
50
+ print("๐Ÿ”„ RAG DB ์ด๋ฏธ ์ดˆ๊ธฐํ™”๋จ")
51
+
52
+ yield # ์•ฑ ์‹คํ–‰
53
+
54
+ print("๐Ÿ›‘ ์„œ๋ฒ„ ์ข…๋ฃŒ ์ค‘...")
55
+
56
+
57
+ app = FastAPI(title="ai-server", lifespan=lifespan)
58
+
59
+ # CORS ์„ค์ • (game-server์—์„œ ์š”์ฒญ ๊ฐ€๋Šฅํ•˜๋„๋ก)
60
+ app.add_middleware(
61
+ CORSMiddleware,
62
+ allow_origins=["https://fpsgame-rrbc.onrender.com"],
63
+ allow_credentials=True,
64
+ allow_methods=["*"],
65
+ allow_headers=["*"],
66
+ )
67
+
68
+
69
+ @app.post("/ask", response_model=AskRes)
70
+ async def ask(request: Request, req: AskReq):
71
+ context = req.context or {}
72
+ npc_config = context.npc_config
73
+
74
+ if not (req.session_id and req.npc_id and req.user_input and npc_config):
75
+ raise HTTPException(status_code=400, detail="missing fields")
76
+
77
+ result = await handle_dialogue(
78
+ request=request,
79
+ session_id=req.session_id,
80
+ npc_id=req.npc_id,
81
+ user_input=req.user_input,
82
+ context=context.dict(),
83
+ npc_config=npc_config.dict()
84
+ )
85
+ return result
86
+
87
+
88
+ @app.post("/wake")
89
+ async def wake(request: Request):
90
+ body = await request.json()
91
+ session_id = body.get("session_id", "unknown")
92
+ print(f"๐Ÿ“ก Wake signal received for session: {session_id}")
93
+ return {"status": "awake", "session_id": session_id}
94
+
95
+
96
+ '''
97
+ ์ตœ์ข… gameโ€‘server โ†’ aiโ€‘server ์š”์ฒญ ์˜ˆ์‹œ
98
+ {
99
+ "session_id": "abc123",
100
+ "npc_id": "mother_abandoned_factory",
101
+ "user_input": "์•„! ๋จธ๋ฆฌ๊ฐ€โ€ฆ ๊ธฐ์–ต์ด ๋– ์˜ฌ๋ž์–ด์š”.",
102
+
103
+ /* game-server์—์„œ ํ•„ํ„ฐ๋งํ•œ ํ•„์ˆ˜/์„ ํƒ require ์š”์†Œ๋งŒ ํฌํ•จ */
104
+ "context": {
105
+ "require": {
106
+ "items": ["photo_forgotten_party"], // ํ•„์ˆ˜/์„ ํƒ ๊ตฌ๋ถ„์€ npc_config.json์—์„œ
107
+ "actions": ["visited_factory"],
108
+ "game_state": ["box_opened"], // ํ•„์š” ์‹œ
109
+ "delta": { "trust": 0.35, "relationship": 0.1 }
110
+ },
111
+
112
+ "player_state": {
113
+ "level": 7,
114
+ "reputation": "helpful",
115
+ "location": "map1"
116
+ /* ์ „์ฒด ์ธ๋ฒคํ† ๋ฆฌ/ํ–‰๋™ ๋กœ๊ทธ๋Š” ํ•„์š” ์‹œ ๋ณ„๋„ ์ „๋‹ฌ */
117
+ },
118
+
119
+ "game_state": {
120
+ "current_quest": "search_jason",
121
+ "quest_stage": "in_progress",
122
+ "location": "map1",
123
+ "time_of_day": "evening"
124
+ },
125
+
126
+ "npc_state": {
127
+ "id": "mother_abandoned_factory",
128
+ "name": "์‹ค๋น„์•„",
129
+ "persona_name": "Silvia",
130
+ "dialogue_style": "emotional",
131
+ "relationship": 0.35,
132
+ "npc_mood": "grief"
133
+ },
134
+
135
+ "dialogue_history": [
136
+ {
137
+ "player": "ํ˜น์‹œ ์ด ๊ณต์žฅ์—์„œ ๋ณธ ๊ฑธ ๋งํ•ด์ค˜์š”.",
138
+ "npc": "๊ทธ๋‚ ์„ ๋– ์˜ฌ๋ฆฌ๋Š” ๊ฒŒ ๋„ˆ๋ฌด ํž˜๋“ค์–ด์š”."
139
+ }
140
+ ]
141
+ }
142
+ }
143
+ '''
144
+
145
+ '''
146
+ {
147
+ "session_id": "abc123",
148
+ "npc_id": "mother_abandoned_factory",
149
+ "user_input": "์•„! ๋จธ๋ฆฌ๊ฐ€โ€ฆ ๊ธฐ์–ต์ด ๋– ์˜ฌ๋ž์–ด์š”.",
150
+ "precheck_passed": true,
151
+ "context": {
152
+ "player_status": {
153
+ "level": 7,
154
+ "reputation": "helpful",
155
+ "location": "map1",
156
+
157
+ "trigger_items": ["photo_forgotten_party"], // game-server์—์„œ ์กฐ๊ฑด ํ•„ํ„ฐ ํ›„ key๋กœ ๋ณ€ํ™˜
158
+ "trigger_actions": ["visited_factory"] // ๋งˆ์ฐฌ๊ฐ€์ง€๋กœ key ๋ฌธ์ž์—ด
159
+
160
+ /* ์›๋ณธ ์ „์ฒด inventory/actions ๋ฐฐ์—ด์€ ์„œ๋น„์Šค ํ•„์š” ์‹œ ๋ณ„๋„ ์ „๋‹ฌ ๊ฐ€๋Šฅ
161
+ ํ•˜์ง€๋งŒ ai-server ์กฐ๊ฑด ํŒ์ •์—๋Š” trigger_*๋งŒ ์‚ฌ์šฉ */
162
+ },
163
+ "game_state": {
164
+ "current_quest": "search_jason",
165
+ "quest_stage": "in_progress",
166
+ "location": "map1",
167
+ "time_of_day": "evening"
168
+ },
169
+ "npc_config": {
170
+ "id": "mother_abandoned_factory",
171
+ "name": "์‹ค๋น„์•„",
172
+ "persona_name": "Silvia",
173
+ "dialogue_style": "emotional",
174
+ "relationship": 0.35,
175
+ "npc_mood": "grief",
176
+ "trigger_values": {
177
+ "in_progress": ["๊ธฐ์–ต", "์‚ฌ์ง„", "ํŒŒํ‹ฐ"]
178
+ },
179
+ "trigger_definitions": {
180
+ "in_progress": {
181
+ "required_text": ["๊ธฐ์–ต", "์‚ฌ์ง„"],
182
+ "required_items": ["photo_forgotten_party"], // trigger_items์™€ ๋งค์นญ
183
+ "required_actions": ["visited_factory"], // trigger_actions์™€ ๋งค์นญ
184
+ "emotion_threshold": { "sad": 0.2 },
185
+ "fallback_style": {
186
+ "style": "guarded",
187
+ "npc_emotion": "suspicious"
188
+ }
189
+ }
190
+ }
191
+ },
192
+ "dialogue_history": [
193
+ {
194
+ "player": "ํ˜น์‹œ ์ด ๊ณต์žฅ์—์„œ ๋ณธ ๊ฑธ ๋งํ•ด์ค˜์š”.",
195
+ "npc": "๊ทธ๋‚ ์„ ๋– ์˜ฌ๋ฆฌ๋Š” ๊ฒŒ ๋„ˆ๋ฌด ํž˜๋“ค์–ด์š”."
196
+ }
197
+ ]
198
+ }
199
+ }
200
+
201
+ ------------------------------------------------------------------------------------------------------
202
+
203
+ ์ด์ „ game-server ์š”์ฒญ ๊ตฌ์กฐ ์˜ˆ์‹œ:
204
+ {
205
+ "session_id": "abc123",
206
+ "npc_id": "mother_abandoned_factory",
207
+ "user_input": "์•„! ๋จธ๋ฆฌ๊ฐ€โ€ฆ ๊ธฐ์–ต์ด ๋– ์˜ฌ๋ž์–ด์š”.",
208
+ "context": {
209
+ "player_status": {
210
+ "level": 7,
211
+ "reputation": "helpful",
212
+ "location": "map1",
213
+ "items": ["photo_forgotten_party"],
214
+ "actions": ["visited_factory", "talked_to_guard"]
215
+ },
216
+ "game_state": {
217
+ "current_quest": "search_jason",
218
+ "quest_stage": "in_progress",
219
+ "location": "map1",
220
+ "time_of_day": "evening"
221
+ },
222
+ "npc_config": {
223
+ "id": "mother_abandoned_factory",
224
+ "name": "์‹ค๋น„์•„",
225
+ "persona_name": "Silvia",
226
+ "dialogue_style": "emotional",
227
+ "relationship": 0.35,
228
+ "npc_mood": "grief",
229
+ "trigger_values": {
230
+ "in_progress": ["๊ธฐ์–ต", "์‚ฌ์ง„", "ํŒŒํ‹ฐ"]
231
+ },
232
+ "trigger_definitions": {
233
+ "in_progress": {
234
+ "required_text": ["๊ธฐ์–ต", "์‚ฌ์ง„"],
235
+ "emotion_threshold": {"sad": 0.2},
236
+ "fallback_style": {"style": "guarded", "npc_emotion": "suspicious"}
237
+ }
238
+ }
239
+ },
240
+ "dialogue_history": [
241
+ {"player": "ํ˜น์‹œ ์ด ๊ณต์žฅ์—์„œ ๋ณธ ๊ฑธ ๋งํ•ด์ค˜์š”.", "npc": "๊ทธ๋‚ ์„ ๋– ์˜ฌ๋ฆฌ๋Š” ๊ฒŒ ๋„ˆ๋ฌด ํž˜๋“ค์–ด์š”."}
242
+ ]
243
+ }
244
+ }
245
+
246
+ '''
config.py ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ # Hugging Face Spaces serve URL (๊ณต๊ฐœ ์„ค์ •์ด๋ฏ€๋กœ ์ง์ ‘ ์ฃผ์†Œ ์‚ฌ์šฉ ๊ฐ€๋Šฅ)
4
+ HF_SERVE_URL = os.getenv(
5
+ "HF_SERVE_URL",
6
+ "https://m97j-PersonaChatEngine.hf.space"
7
+ )
8
+
9
+ # ์š”์ฒญ ํƒ€์ž„์•„์›ƒ (์ดˆ ๋‹จ์œ„)
10
+ HF_TIMEOUT = float(os.getenv("HF_TIMEOUT", "25"))
11
+
12
+ # RAG ํ•ญ์ƒ ์‚ฌ์šฉ (ํ† ๊ธ€์ด ์•„๋‹ˆ๋ผ ๊ณ ์ • ์‚ฌ์šฉ)
13
+ RAG_ENABLED = True
14
+
15
+ # ์ƒ์„ฑ ํŒŒ๋ผ๋ฏธํ„ฐ ๊ธฐ๋ณธ๊ฐ’ (์š”์ฒญ๋งˆ๋‹ค override ๊ฐ€๋Šฅ)
16
+ GENERATION_CONFIG = {
17
+ "max_new_tokens": int(os.getenv("GEN_MAX_NEW_TOKENS", "220")),
18
+ "temperature": float(os.getenv("GEN_TEMPERATURE", "0.7")),
19
+ "top_p": float(os.getenv("GEN_TOP_P", "0.9")),
20
+ "repetition_penalty": float(os.getenv("GEN_REPETITION_PENALTY", "1.1")),
21
+ "do_sample": True
22
+ }
23
+
24
+ '''
25
+ # ๋ชจ๋ธ ์ •๋ณด (์ถ”ํ›„ ํ™•์žฅ ๊ฐ€๋Šฅ)
26
+ MODEL_INFO = {
27
+ "base_model": "meta-llama/Meta-Llama-3-8B",
28
+ "adapter": "m97j/PersonaAdapter-v1",
29
+ "serve_mode": "hf_spaces", # ๋˜๋Š” "local", "api"
30
+ }
31
+
32
+ '''
manager/__init__.py ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # __init__.py
2
+
3
+ # This file ensures the directory is treated as a Python package.
4
+ # Required for relative imports and consistent behavior across environments.
manager/agent_manager.py ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Dict, List
2
+ from rag.rag_manager import retrieve
3
+
4
+ class NPCAgent:
5
+ def __init__(self, npc_id: str):
6
+ self.npc_id = npc_id
7
+ self.cache: Dict[str, Dict[str, List[dict]]] = {} # quest_stage:location๋ณ„ ์บ์‹œ
8
+
9
+ def load_rag_bundle(self, quest_stage: str, location: str) -> Dict[str, List[dict]]:
10
+ """
11
+ ํ•ด๋‹น NPC/ํ€˜์ŠคํŠธ ์Šคํ…Œ์ด์ง€/์œ„์น˜์˜ ๋ชจ๋“  ๋ฌธ์„œ๋ฅผ ํ•œ ๋ฒˆ์— ๋กœ๋“œํ•˜๊ณ  type๋ณ„๋กœ ๋ถ„๋ฅ˜.
12
+ quest_stage/location์ด 'any'์ธ ๋ฌธ์„œ๋„ ๋ณ‘ํ•ฉ.
13
+ """
14
+ cache_key = f"{quest_stage}:{location}"
15
+ if cache_key in self.cache:
16
+ return self.cache[cache_key]
17
+
18
+ filters_base = {"npc_id": self.npc_id}
19
+
20
+ # 1. ์ •ํ™•ํžˆ ์ผ์น˜
21
+ docs_exact = retrieve(f"{self.npc_id}:bundle", filters={**filters_base, "quest_stage": quest_stage, "location": location}, top_k=50) or []
22
+ # 2. quest_stage=any
23
+ docs_any_stage = retrieve(f"{self.npc_id}:bundle", filters={**filters_base, "quest_stage": "any", "location": location}, top_k=50) or []
24
+ # 3. location=any
25
+ docs_any_loc = retrieve(f"{self.npc_id}:bundle", filters={**filters_base, "quest_stage": quest_stage, "location": "any"}, top_k=50) or []
26
+ # 4. quest_stage=any, location=any
27
+ docs_global = retrieve(f"{self.npc_id}:bundle", filters={**filters_base, "quest_stage": "any", "location": "any"}, top_k=50) or []
28
+
29
+ all_docs = docs_exact + docs_any_stage + docs_any_loc + docs_global
30
+
31
+ # type๋ณ„ ๋ถ„๋ฅ˜
32
+ bundle: Dict[str, List[dict]] = {}
33
+ for doc in all_docs:
34
+ t = doc.get("type", "unknown")
35
+ bundle.setdefault(t, []).append(doc)
36
+
37
+ self.cache[cache_key] = bundle
38
+ return bundle
39
+
40
+
41
+ class AgentManager:
42
+ def __init__(self):
43
+ self.agents: Dict[str, NPCAgent] = {}
44
+
45
+ def get_agent(self, npc_id: str) -> NPCAgent:
46
+ if npc_id not in self.agents:
47
+ self.agents[npc_id] = NPCAgent(npc_id)
48
+ return self.agents[npc_id]
49
+
50
+
51
+ # ์ „์—ญ ์ธ์Šคํ„ด์Šค
52
+ agent_manager = AgentManager()
manager/dialogue_manager.py ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import Request
2
+ from pipeline.preprocess import preprocess_input
3
+ from pipeline.generator import generate_response
4
+ from pipeline.postprocess import postprocess_pipeline, fallback_final_check
5
+ from models.fallback_model import generate_fallback_response
6
+ from prompt_builder import build_main_prompt, build_fallback_prompt # ์ˆ˜์ •๋œ prompt ๋นŒ๋” ์‚ฌ์šฉ
7
+
8
+ async def handle_dialogue(
9
+ request: Request,
10
+ session_id: str,
11
+ npc_id: str,
12
+ user_input: str,
13
+ context: dict,
14
+ ) -> dict:
15
+ """
16
+ ์ „์ฒด ๋Œ€ํ™” ์ฒ˜๋ฆฌ ํŒŒ์ดํ”„๋ผ์ธ:
17
+ 1) preprocess_input() โ†’ pre ๋ฐ์ดํ„ฐ ์ƒ์„ฑ
18
+ 2) main ๊ฒฝ๋กœ: main prompt โ†’ main model โ†’ postprocess_pipeline()
19
+ 3) fallback ๊ฒฝ๋กœ: fallback prompt โ†’ fallback model โ†’ fallback_final_check()
20
+ """
21
+ # 1. Preprocess
22
+ pre = await preprocess_input(request, session_id, npc_id, user_input, context)
23
+
24
+ # 2. Fallback ๊ฒฝ๋กœ
25
+ if not pre.get("is_valid", True):
26
+ # fallback prompt ๊ตฌ์„ฑ (๋‚ด๋ถ€์—์„œ additional_trigger ๊ธฐ๋ฐ˜ ๋ถ„๊ธฐ)
27
+ fb_prompt = build_fallback_prompt(pre, session_id, npc_id)
28
+
29
+ # fallback model ํ˜ธ์ถœ
30
+ fb_raw = await generate_fallback_response(request, fb_prompt)
31
+
32
+ # fallback ์ „์šฉ ์ตœ์ข… ๊ฒ€์ฆ
33
+ fb_checked = await fallback_final_check(
34
+ request=request,
35
+ fb_response=fb_raw,
36
+ player_utt=pre["player_utterance"],
37
+ npc_config=pre["tags"],
38
+ action_delta=pre.get("trigger_meta", {})
39
+ )
40
+
41
+ # payload ๊ตฌ์„ฑ ํ›„ ๋ฐ˜ํ™˜
42
+ return {
43
+ "session_id" : session_id,
44
+ "npc_output_text": fb_checked,
45
+ "flags": {}, # fallback์€ flag/delta ์ด๋ฏธ pre์—์„œ ํ™•์ •
46
+ "deltas": pre.get("trigger_meta", {}).get("delta", {}),
47
+ "meta": {
48
+ "npc_id": pre["npc_id"],
49
+ "quest_stage": pre["game_state"].get("quest_stage", "default"),
50
+ "location": pre["game_state"].get("location", context.get("location", "unknown"))
51
+ }
52
+ }
53
+
54
+ # 3. Main ๊ฒฝ๋กœ
55
+ main_prompt = build_main_prompt(pre, session_id, npc_id)
56
+
57
+ # main model ํ˜ธ์ถœ
58
+ result = await generate_response(session_id, npc_id, main_prompt, max_tokens=200)
59
+
60
+ # postprocess_pipeline์—์„œ ์ตœ์ข… payload ์ƒ์„ฑ
61
+ return_payload = await postprocess_pipeline(
62
+ request=request,
63
+ pre_data=pre, # preprocess ๊ฒฐ๊ณผ ์ „์ฒด ์ „๋‹ฌ
64
+ model_payload=result, # main model ์ถœ๋ ฅ
65
+ context=context
66
+ )
67
+
68
+ return return_payload
manager/prompt_builder.py ADDED
@@ -0,0 +1,171 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Dict, Any
2
+
3
+ def build_main_prompt(pre: Dict[str, Any], session_id: str, npc_id: str) -> str:
4
+ tags = pre.get("tags", {})
5
+ ps = pre.get("player_state", {})
6
+ rag_docs = pre.get("rag_main_docs", [])
7
+
8
+ # RAG ๋ฌธ์„œ ๋ถ„๋ฆฌ
9
+ lore_text = ""
10
+ desc_text = ""
11
+ for doc in rag_docs:
12
+ if "LORE:" in doc:
13
+ lore_text += doc + "\n"
14
+ elif "DESCRIPTION:" in doc:
15
+ desc_text += doc + "\n"
16
+ else:
17
+ # fallback: type ๊ธฐ๋ฐ˜ ๋ถ„๋ฆฌ ๊ฐ€๋Šฅ
18
+ if "lore" in doc.lower():
19
+ lore_text += doc + "\n"
20
+ elif "description" in doc.lower():
21
+ desc_text += doc + "\n"
22
+
23
+ prompt = [
24
+ "<SYS>",
25
+ f"NPC_ID={tags.get('npc_id','')}",
26
+ f"NPC_LOCATION={tags.get('location','')}",
27
+ "TAGS:",
28
+ f" quest_stage={tags.get('quest_stage','')}",
29
+ f" relationship={tags.get('relationship','')}",
30
+ f" trust={tags.get('trust','')}",
31
+ f" npc_mood={tags.get('npc_mood','')}",
32
+ f" player_reputation={tags.get('player_reputation','')}",
33
+ f" style={tags.get('style','')}",
34
+ "</SYS>",
35
+ "<RAG>",
36
+ f"LORE: {lore_text.strip() or '(์—†์Œ)'}",
37
+ f"DESCRIPTION: {desc_text.strip() or '(์—†์Œ)'}",
38
+ "</RAG>",
39
+ "<PLAYER_STATE>"
40
+ ]
41
+
42
+ if ps.get("items"):
43
+ prompt.append(f"items={','.join(ps['items'])}")
44
+ if ps.get("actions"):
45
+ prompt.append(f"actions={','.join(ps['actions'])}")
46
+ if ps.get("position"):
47
+ prompt.append(f"position={ps['position']}")
48
+ prompt.append("</PLAYER_STATE>")
49
+
50
+ prompt.append("<CTX>")
51
+ for h in pre.get("context", []):
52
+ prompt.append(f"{h['role']}: {h['text']}")
53
+ prompt.append("</CTX>")
54
+
55
+ prompt.append(f"<PLAYER>{pre.get('player_utterance','').rstrip()}")
56
+ prompt.append("<STATE>")
57
+ prompt.append("<NPC>")
58
+
59
+ return "\n".join(prompt)
60
+
61
+
62
+
63
+ def build_fallback_prompt(pre: Dict[str, Any], session_id: str, npc_id: str) -> str:
64
+ """
65
+ additional_trigger ๊ฐ’์— ๋”ฐ๋ผ ์ผ๋ฐ˜ fallback / ํŠน์ˆ˜ fallback ํ”„๋กฌํ”„ํŠธ๋ฅผ ํ•œ ํ•จ์ˆ˜์—์„œ ์ฒ˜๋ฆฌ
66
+ """
67
+ tags = pre.get("tags", {})
68
+ ps = pre.get("player_state", {})
69
+ gs = pre.get("game_state", {})
70
+ rag_text = "\n".join(f"- {doc}" for doc in pre.get("rag_fallback_docs", []))
71
+ fb_style = pre.get("fallback_style") or {}
72
+ trigger_meta = pre.get("trigger_meta", {}) or {}
73
+
74
+ items = ",".join(ps.get("items", []))
75
+ actions = ",".join(ps.get("actions", []))
76
+ location = gs.get("location") or ps.get("location", "unknown")
77
+ quest_stage = gs.get("quest_stage", "unknown")
78
+
79
+ # ๊ธฐ๋ณธ ์•ˆ๋‚ด๋ฌธ
80
+ instr = (
81
+ "๋‹น์‹ ์€ NPC persona๋ฅผ ๊ฐ€์ง„ ์บ๋ฆญํ„ฐ์ž…๋‹ˆ๋‹ค. "
82
+ "ํ”Œ๋ ˆ์ด์–ด ๋ฐœํ™”์— ์ž์—ฐ์Šค๋Ÿฝ๊ณ  ๋งฅ๋ฝ์— ๋งž๋Š” ๋Œ€์‚ฌ๋ฅผ ์ƒ์„ฑํ•˜์„ธ์š”. "
83
+ "์Šคํ† ๋ฆฌ ์ง„ํ–‰ ์กฐ๊ฑด์€ ์ถฉ์กฑ๋˜์ง€ ์•Š์•˜์Šต๋‹ˆ๋‹ค."
84
+ )
85
+
86
+ # additional_trigger=True โ†’ ํŠน์ˆ˜ fallback
87
+ if pre.get("additional_trigger"):
88
+ # trigger_meta ๊ธฐ๋ฐ˜ ๊ตฌ์ฒดํ™”
89
+ s = fb_style.get("style") or trigger_meta.get("npc_style")
90
+ a = fb_style.get("npc_action") or trigger_meta.get("npc_action")
91
+ e = fb_style.get("npc_emotion") or trigger_meta.get("npc_emotion")
92
+ more = []
93
+ if s: more.append(f"๋Œ€ํ™” ์Šคํƒ€์ผ={s}")
94
+ if a: more.append(f"NPC ํ–‰๋™={a}")
95
+ if e: more.append(f"NPC ๊ฐ์ •={e}")
96
+ if more:
97
+ instr += " " + "; ".join(more) + "."
98
+ # ํŠน์ˆ˜ fallback์ž„์„ ๋ช…์‹œ
99
+ instr += " ์ด ๋ฐ˜์‘์€ ํ”Œ๋ ˆ์ด์–ด์˜ ํŠน์ • ๋ฐœํ™”(๊ธˆ์ง€ ํŠธ๋ฆฌ๊ฑฐ)์— ์˜ํ•ด ์œ ๋ฐœ๋œ ๊ฒƒ์ž…๋‹ˆ๋‹ค."
100
+
101
+ return f"""
102
+ <FALLBACK>
103
+ NPC_ID={npc_id}
104
+ SESSION_ID={session_id}
105
+ LOCATION={location}
106
+ QUEST_STAGE={quest_stage}
107
+ MOOD={tags.get("npc_mood","neutral")}
108
+ STYLE={tags.get("style","neutral")}
109
+ ITEMS={items}
110
+ ACTIONS={actions}
111
+ EMOTION_SUMMARY={', '.join([f"{k}:{round(v,2)}" for k,v in pre.get('emotion',{}).items()])}
112
+ INPUT="{pre['player_utterance']}"
113
+
114
+ RAG_CONTEXT:
115
+ {rag_text or "(none)"}
116
+
117
+ INSTRUCTION:
118
+ {instr}
119
+ </FALLBACK>
120
+ """.strip()
121
+
122
+
123
+
124
+
125
+
126
+
127
+ '''
128
+ def build_fallback_prompt(pre: dict, session_id: str, npc_id: str) -> str:
129
+ tags = pre.get("tags", {})
130
+ ps = pre.get("player_state", {})
131
+ gs = pre.get("game_state", {})
132
+ rag_text = "\n".join(f"- {doc}" for doc in pre.get("rag_fallback_docs", []))
133
+ fb = pre.get("fallback_style") or {}
134
+
135
+ items = ",".join(ps.get("items", []))
136
+ actions = ",".join(ps.get("actions", []))
137
+ location = gs.get("location") or ps.get("location", "unknown")
138
+ quest_stage = gs.get("quest_stage", "unknown")
139
+
140
+ instr = "์กฐ๊ฑด ๋ถˆ์ถฉ์กฑ. ์Šคํ† ๋ฆฌ ์ง„ํ–‰์€ ํ•˜์ง€ ์•Š๊ณ , ์บ๋ฆญํ„ฐ ์ผ๊ด€์„ฑ์„ ์œ ์ง€ํ•˜๋ฉฐ ์ž์—ฐ์Šค๋Ÿฝ๊ฒŒ ์‘๋‹ตํ•˜๋ผ."
141
+ if fb:
142
+ # ์„ ํƒ์  ๊ตฌ์ฒดํ™”
143
+ s = fb.get("style"); a = fb.get("npc_action"); e = fb.get("npc_emotion")
144
+ more = []
145
+ if s: more.append(f"๋Œ€ํ™” ์Šคํƒ€์ผ={s}")
146
+ if a: more.append(f"NPC ํ–‰๋™={a}")
147
+ if e: more.append(f"NPC ๏ฟฝ๏ฟฝ๏ฟฝ์ •={e}")
148
+ if more:
149
+ instr += " " + "; ".join(more) + "."
150
+
151
+ return f"""
152
+ <FALLBACK>
153
+ NPC_ID={npc_id}
154
+ SESSION_ID={session_id}
155
+ LOCATION={location}
156
+ QUEST_STAGE={quest_stage}
157
+ MOOD={tags.get("npc_mood","neutral")}
158
+ STYLE={tags.get("style","neutral")}
159
+ ITEMS={items}
160
+ ACTIONS={actions}
161
+ EMOTION_SUMMARY={', '.join([f"{k}:{round(v,2)}" for k,v in pre.get('emotion',{}).items()])}
162
+ INPUT="{pre['player_utterance']}"
163
+
164
+ RAG:
165
+ {rag_text or "(none)"}
166
+
167
+ INSTRUCTION:
168
+ {instr}
169
+ </FALLBACK>
170
+ """.strip()
171
+ '''
models/__init__.py ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # __init__.py
2
+
3
+ # This file ensures the directory is treated as a Python package.
4
+ # Required for relative imports and consistent behavior across environments.
models/emotion-classification-model/.gitkeep ADDED
File without changes
models/emotion_model.py ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import pipeline
2
+ from fastapi import Request
3
+
4
+ async def detect_emotion(request: Request, text: str) -> dict:
5
+ tokenizer = request.app.state.emotion_tokenizer
6
+ model = request.app.state.emotion_model
7
+
8
+ emotion_pipeline = pipeline(
9
+ "text-classification",
10
+ model=model,
11
+ tokenizer=tokenizer,
12
+ return_all_scores=True
13
+ )
14
+
15
+ results = emotion_pipeline(text)
16
+ # ๊ฒฐ๊ณผ๋ฅผ label: score ํ˜•ํƒœ๋กœ ๋ณ€ํ™˜
17
+ return {r["label"]: r["score"] for r in results[0]}
models/fallback-npc-model/.gitkeep ADDED
File without changes
models/fallback_model.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from fastapi import Request
3
+
4
+ async def generate_fallback_response(request: Request, prompt: str) -> str:
5
+ tokenizer = request.app.state.fallback_tokenizer
6
+ model = request.app.state.fallback_model
7
+
8
+ inputs = tokenizer(prompt, return_tensors="pt")
9
+ with torch.no_grad():
10
+ outputs = model.generate(
11
+ **inputs,
12
+ max_new_tokens=150,
13
+ temperature=0.7,
14
+ top_p=0.9,
15
+ repetition_penalty=1.1,
16
+ do_sample=True
17
+ )
18
+ decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
19
+ return decoded[len(prompt):].strip() or "..."
models/model_loader.py ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pathlib import Path
2
+ from transformers import (
3
+ AutoTokenizer,
4
+ AutoModelForSequenceClassification,
5
+ AutoModelForCausalLM
6
+ )
7
+ from sentence_transformers import SentenceTransformer
8
+
9
+
10
+ def load_emotion_model(model_name: str, model_dir: Path):
11
+ if not model_dir.exists() or not any(model_dir.iterdir()):
12
+ tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
13
+ model = AutoModelForSequenceClassification.from_pretrained(model_name, trust_remote_code=True)
14
+ tokenizer.save_pretrained(model_dir)
15
+ model.save_pretrained(model_dir)
16
+
17
+ tokenizer = AutoTokenizer.from_pretrained(str(model_dir), trust_remote_code=True, local_files_only=True)
18
+ model = AutoModelForSequenceClassification.from_pretrained(str(model_dir), trust_remote_code=True, local_files_only=True)
19
+ return tokenizer, model
20
+
21
+
22
+ def load_fallback_model(model_name: str, model_dir: Path):
23
+ if not model_dir.exists() or not any(model_dir.iterdir()):
24
+ tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
25
+ model = AutoModelForCausalLM.from_pretrained(model_name, trust_remote_code=True)
26
+ tokenizer.save_pretrained(model_dir)
27
+ model.save_pretrained(model_dir)
28
+
29
+ tokenizer = AutoTokenizer.from_pretrained(str(model_dir), trust_remote_code=True, local_files_only=True)
30
+ model = AutoModelForCausalLM.from_pretrained(str(model_dir), trust_remote_code=True, local_files_only=True)
31
+ return tokenizer, model
32
+
33
+
34
+ def load_embedder(model_name: str, model_dir: Path):
35
+ if not model_dir.exists() or not any(model_dir.iterdir()):
36
+ embedder = SentenceTransformer(model_name)
37
+ embedder.save(str(model_dir))
38
+
39
+ embedder = SentenceTransformer(str(model_dir))
40
+ return embedder
models/sentence-embedder/.gitkeep ADDED
File without changes
pipeline/__init__.py ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # __init__.py
2
+
3
+ # This file ensures the directory is treated as a Python package.
4
+ # Required for relative imports and consistent behavior across environments.
pipeline/generator.py ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from utils.hf_client import call_main
2
+
3
+ '''
4
+ async def generate_response(session_id: str, npc_id: str, prompt: str, max_tokens: int = 200,
5
+ temperature: float = 0.7, top_p: float = 0.9,
6
+ do_sample: bool = True, repetition_penalty: float = 1.05) -> dict:
7
+ payload = {
8
+ "session_id": session_id,
9
+ "npc_id": npc_id,
10
+ "prompt": prompt,
11
+ "max_tokens": max_tokens,
12
+ "temperature": temperature,
13
+ "top_p": top_p,
14
+ "do_sample": do_sample,
15
+ "repetition_penalty": repetition_penalty
16
+ }
17
+ return await call_main(payload)
18
+
19
+
20
+ '''
21
+ async def generate_response(session_id: str, npc_id: str, prompt: str, max_tokens: int = 200) -> dict:
22
+ payload = {
23
+ "session_id": session_id,
24
+ "npc_id": npc_id,
25
+ "prompt": prompt,
26
+ "max_tokens": max_tokens
27
+ }
28
+ return await call_main(payload) # {"text":..., "delta":..., "flag":...}
29
+ #'''
pipeline/postprocess.py ADDED
@@ -0,0 +1,504 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch, random, re
2
+ from typing import Dict, Any, List, Optional, Tuple
3
+ from fastapi import Request
4
+ from sentence_transformers import util
5
+ from models.fallback_model import generate_fallback_response
6
+
7
+ ALPHA_THR = 0.58
8
+ DELTA_CLAMP = (-1.0, 1.0)
9
+
10
+ # ----------------------------
11
+ # Utilities
12
+ # ----------------------------
13
+
14
+ def _clamp(x: float, lo: float, hi: float) -> float:
15
+ return max(lo, min(hi, x))
16
+
17
+ def _adjust_delta_with_rag(delta: Dict[str, float]) -> Dict[str, float]:
18
+ trust = _clamp(float(delta.get("trust", 0.0)), *DELTA_CLAMP)
19
+ rel = _clamp(float(delta.get("relationship", 0.0)), *DELTA_CLAMP)
20
+ return {"trust": trust, "relationship": rel}
21
+
22
+ def _embedding_similarity(embedder, text: str, examples: List[str]) -> float:
23
+ if not examples:
24
+ return 0.0
25
+ inp_emb = embedder.encode(text, convert_to_tensor=True)
26
+ ex_embs = embedder.encode(examples, convert_to_tensor=True)
27
+ cos_scores = util.cos_sim(inp_emb, ex_embs)
28
+ return float(torch.mean(cos_scores).item())
29
+
30
+ def _doc_type(doc: Dict[str, Any]) -> Optional[str]:
31
+ if "type" in doc:
32
+ return doc.get("type")
33
+ return doc.get("metadata", {}).get("type")
34
+
35
+ def _get_flag_doc(rag_docs: List[Dict[str, Any]], flag_name: str) -> Dict[str, Any]:
36
+ for doc in rag_docs:
37
+ if _doc_type(doc) == "flag_def" and doc.get("flag_name") == flag_name:
38
+ return doc
39
+ return {}
40
+
41
+ def _get_turn_doc(rag_docs: List[Dict[str, Any]], npc_id: str, quest_stage: str) -> Dict[str, Any]:
42
+ # ๋™์ผ npc_id/quest_stage์ธ ๊ฐ€์žฅ ์ตœ์‹ (turn_index ์ตœ๋Œ€) ๋ฌธ์„œ๋ฅผ ์šฐ์„  ๋ฐ˜ํ™˜
43
+ candidates = [
44
+ d for d in rag_docs
45
+ if _doc_type(d) == "dialogue_turn"
46
+ and d.get("npc_id") == npc_id
47
+ and d.get("quest_stage") == quest_stage
48
+ ]
49
+ if not candidates:
50
+ return {}
51
+ return sorted(candidates, key=lambda d: d.get("turn_index", -1))[-1]
52
+
53
+ def _short_ctx_from_pre(pre_data: dict) -> str:
54
+ pairs = pre_data.get("context", []) or []
55
+ return "\n".join(f"{m.get('role', 'user')}: {m.get('text', '')}" for m in pairs)
56
+
57
+ async def fetch_response_policy_from_pre(pre_data: dict) -> str:
58
+ for doc in pre_data.get("rag_main_docs", []):
59
+ if _doc_type(doc) == "main_res_validate":
60
+ return doc.get("text", "") or doc.get("chunk", "")
61
+ return (
62
+ "์‘๋‹ต์ด NPC persona์™€ ํ˜„์žฌ ์ƒํƒœ(delta, flags)์— ๋ถ€ํ•ฉํ•˜๋Š”์ง€ ๊ฒ€์ฆํ•˜์‹œ์˜ค. "
63
+ "๋ถ€์ ์ ˆํ•œ ํ‘œํ˜„์€ ์™„ํ™”ํ•˜๊ณ , ์„ธ๊ณ„๊ด€์„ ์œ ์ง€ํ•˜์‹œ์˜ค."
64
+ )
65
+
66
+ # ----------------------------
67
+ # RAG helpers
68
+ # ----------------------------
69
+
70
+ def _extract_expected_delta(rag_docs: List[Dict[str, Any]]) -> Dict[str, float]:
71
+ # trigger_def.delta_expected ์šฐ์„ , ์—†์œผ๋ฉด dialogue_turn.delta ํ‰๊ท (์„ ํƒ)
72
+ expected = {}
73
+ for doc in rag_docs:
74
+ if _doc_type(doc) == "trigger_def" and doc.get("delta_expected"):
75
+ expected.update(doc["delta_expected"])
76
+ return expected
77
+
78
+ def _collect_value_contexts(rag_docs: List[Dict[str, Any]], value: str) -> List[str]:
79
+ contexts = []
80
+ for doc in rag_docs:
81
+ # description/content/text ํ•„๋“œ์—์„œ value๊ฐ€ ์–ธ๊ธ‰๋œ ๋ฌธ์žฅ ์ˆ˜์ง‘
82
+ for key in ("content", "text", "npc", "player"):
83
+ if value and isinstance(doc.get(key), str) and value in doc[key]:
84
+ contexts.append(doc[key])
85
+ return contexts
86
+
87
+ def _weight_by_doc_type(t: str) -> float:
88
+ # ๋“ฑ์žฅ ์œ„์น˜ ๊ฐ€์ค‘์น˜(์ƒํ™ฉ์— ๋งž๊ฒŒ ์กฐ์ •)
89
+ return {
90
+ "dialogue_turn": 1.2,
91
+ "trigger_def": 1.0,
92
+ "description": 1.0,
93
+ "npc_persona": 0.9,
94
+ "lore": 0.7,
95
+ "flag_def": 0.8,
96
+ "main_res_validate": 0.8,
97
+ }.get(t, 1.0)
98
+
99
+ def _collect_positive_negative_texts(rag_docs: List[Dict[str, Any]]) -> Tuple[List[str], List[str]]:
100
+ pos, neg = [], []
101
+ for doc in rag_docs:
102
+ t = _doc_type(doc)
103
+ w = _weight_by_doc_type(t)
104
+ if isinstance(doc.get("examples_positive"), list):
105
+ pos.extend([f"[{t}] {s}" for s in doc["examples_positive"]] * int(max(1, round(w))))
106
+ if isinstance(doc.get("examples_good"), list):
107
+ pos.extend([f"[{t}] {s}" for s in doc["examples_good"]] * int(max(1, round(w))))
108
+ if isinstance(doc.get("examples_negative"), list):
109
+ neg.extend([f"[{t}] {s}" for s in doc["examples_negative"]] * int(max(1, round(w))))
110
+ if isinstance(doc.get("examples_bad"), list):
111
+ neg.extend([f"[{t}] {s}" for s in doc["examples_bad"]] * int(max(1, round(w))))
112
+ return pos, neg
113
+
114
+ # ----------------------------
115
+ # Delta ๊ฒ€์ฆ/๋ณด์ •
116
+ # ----------------------------
117
+
118
+ def _adjust_delta_with_rag_and_embedding(
119
+ delta: Dict[str, float],
120
+ rag_docs: List[Dict[str, Any]],
121
+ embedder,
122
+ player_utt: str,
123
+ npc_text: str,
124
+ flags_yes: List[str],
125
+ sim_threshold: float = 0.72,
126
+ diff_threshold: float = 0.18,
127
+ blend: float = 0.6 # expected์— ๋Œ์–ด๋‹น๊ธฐ๋Š” ๋น„์œจ
128
+ ) -> Dict[str, float]:
129
+ trust = _clamp(float(delta.get("trust", 0.0)), *DELTA_CLAMP)
130
+ rel = _clamp(float(delta.get("relationship", 0.0)), *DELTA_CLAMP)
131
+
132
+ expected = _extract_expected_delta(rag_docs)
133
+ pos, neg = _collect_positive_negative_texts(rag_docs)
134
+
135
+ context_text = f"PLAYER: {player_utt}\nNPC: {npc_text}\nFLAGS: {', '.join(flags_yes) if flags_yes else 'none'}"
136
+ pos_sim = _embedding_similarity(embedder, context_text, pos) if pos else 0.0
137
+ neg_sim = _embedding_similarity(embedder, context_text, neg) if neg else 0.0
138
+ # ๋งฅ๋ฝ์ด โ€˜๊ธ์ •โ€™์— ๊ฐ€๊น๊ณ  ๊ธฐ๋Œ€์™€ ์ฐจ์ด๊ฐ€ ํฌ๋ฉด ๊ธฐ๋Œ€ ์ชฝ์œผ๋กœ ๋ณด์ •
139
+ def _pull(val, key):
140
+ if key in expected:
141
+ exp = float(expected[key])
142
+ if abs(val - exp) > diff_threshold and pos_sim - neg_sim >= (sim_threshold - 0.1):
143
+ return _clamp(blend * exp + (1 - blend) * val, *DELTA_CLAMP)
144
+ return val
145
+
146
+ trust = _pull(trust, "trust")
147
+ rel = _pull(rel, "relationship")
148
+ return {"trust": trust, "relationship": rel}
149
+
150
+ # ----------------------------
151
+ # Flag ๋ณด์ • ๋กœ์ง(ํ™•์žฅ)
152
+ # ----------------------------
153
+
154
+ def adjust_flags_with_rag_and_embedding(
155
+ flags_prob: Dict[str, float],
156
+ flags_thr: Dict[str, float],
157
+ rag_flags_score: Dict[str, float],
158
+ rag_flags_pred: Dict[str, int],
159
+ embedder,
160
+ npc_text: str,
161
+ rag_positive_examples: Dict[str, List[str]],
162
+ deltas_final: Dict[str, float], # โ† delta ๋ณด์ • ๊ฒฐ๊ณผ ๋ฐ˜์˜
163
+ rag_docs: List[Dict[str, Any]],
164
+ alpha_model: float = 0.6,
165
+ margin: float = 0.05,
166
+ sim_threshold: float = 0.8,
167
+ random_jitter: float = 0.05
168
+ ) -> Dict[str, int]:
169
+ # ์ „์ฒด ํŒจํ„ด ์œ ์‚ฌ๋„
170
+ model_vector = [flags_prob.get(name, 0.0) for name in rag_flags_score.keys()]
171
+ rag_vector = [rag_flags_score.get(name, 0.0) for name in rag_flags_score.keys()]
172
+ sim = float(
173
+ embedder.encode([model_vector], convert_to_tensor=True)
174
+ @ embedder.encode([rag_vector], convert_to_tensor=True).T
175
+ )
176
+
177
+ expected = _extract_expected_delta(rag_docs)
178
+
179
+ final_preds = {}
180
+ for name in rag_flags_score.keys():
181
+ prob_model = float(flags_prob.get(name, 0.0))
182
+ thr_model = float(flags_thr.get(name, 0.5))
183
+ score_rag = float(rag_flags_score.get(name, 0.0))
184
+ _ = int(rag_flags_pred.get(name, 0))
185
+
186
+ emb_score = _embedding_similarity(embedder, npc_text, rag_positive_examples.get(name, []))
187
+
188
+ # delta ์ผ๊ด€์„ฑ ๋ณด์ •(ํ•ด๋‹น flag๊ฐ€ ์˜ˆ์ƒ๋  ๋•Œ delta์™€์˜ ๋ถˆ์ผ์น˜ ํŒจ๋„ํ‹ฐ)
189
+ delta_penalty = 0.0
190
+ if expected:
191
+ # ์‹ ํ˜ธ๊ฐ€ ์–‘์˜ ๋ณ€ํ™”์ธ๋ฐ ๋ชจ๋ธ delta๊ฐ€ ํฐ ์Œ์ˆ˜์ธ ๊ฒฝ์šฐ ๋“ฑ
192
+ if "trust" in expected and deltas_final.get("trust", 0.0) * expected["trust"] < 0:
193
+ delta_penalty += 0.08
194
+ if "relationship" in expected and deltas_final.get("relationship", 0.0) * expected["relationship"] < 0:
195
+ delta_penalty += 0.06
196
+
197
+ # ํ˜ผํ•ฉ ์ ์ˆ˜ + ์ž„๋ฒ ๋”ฉ + ๋ธํƒ€ ์ •ํ•ฉ
198
+ blended_score = (
199
+ alpha_model * prob_model
200
+ + (1 - alpha_model) * score_rag
201
+ + 0.2 * emb_score
202
+ - delta_penalty
203
+ )
204
+ thr_blend = alpha_model * thr_model + (1 - alpha_model) * 0.5
205
+
206
+ if abs(blended_score - thr_blend) <= margin:
207
+ adjusted_score = score_rag if sim < sim_threshold else blended_score
208
+ else:
209
+ adjusted_score = blended_score
210
+
211
+ if adjusted_score != score_rag:
212
+ adjusted_score += random.uniform(-random_jitter, random_jitter)
213
+ adjusted_score = max(0.0, min(1.0, adjusted_score))
214
+
215
+ final_preds[name] = int(adjusted_score >= thr_blend)
216
+
217
+ return final_preds
218
+
219
+ # ----------------------------
220
+ # Validators / Rewriters
221
+ # ----------------------------
222
+
223
+ async def validate_or_rewrite_response(
224
+ request: Request,
225
+ response_text: str,
226
+ description_text: str,
227
+ ctx_text: str,
228
+ player_utt: str,
229
+ deltas: Dict[str, float],
230
+ flags_yes: List[str],
231
+ flags_values: Dict[str, str], # โ† ์ถ”๊ฐ€
232
+ value_contexts: Dict[str, List[str]], # โ† ์ถ”๊ฐ€
233
+ ) -> str:
234
+ flag_value_info = "\n".join(f"- {k}: {v}" for k, v in flags_values.items()) if flags_values else "none"
235
+ value_ctx_lines = []
236
+ for k, arr in value_contexts.items():
237
+ if arr:
238
+ # ๋„ˆ๋ฌด ๊ธธ์–ด์ง€๋Š” ๊ฒƒ์„ ๋ฐฉ์ง€ํ•˜์—ฌ ์ƒ์œ„ 1~2๊ฐœ๋งŒ
239
+ value_ctx_lines.append(f"- {k}: {arr[0]}")
240
+ if len(arr) > 1:
241
+ value_ctx_lines.append(f" (more: {min(2, len(arr)-1)} refs)")
242
+ value_ctx_info = "\n".join(value_ctx_lines) if value_ctx_lines else "none"
243
+
244
+ prompt = (
245
+ "๋‹ค์Œ์€ ๊ฒŒ์ž„ ๋‚ด NPC ์‘๋‹ต์ž…๋‹ˆ๋‹ค.\n"
246
+ f"[RESPONSE]\n{response_text}\n[/RESPONSE]\n\n"
247
+ "์•„๋ž˜์˜ ๊ฒ€์ฆ ๊ธฐ์ค€์„ ๋งŒ์กฑํ•˜๋Š”์ง€ ํŒ๋‹จํ•˜๊ณ , ๋งŒ์กฑํ•˜์ง€ ์•Š์œผ๋ฉด ๊ธฐ์ค€์— ๋งž๊ฒŒ ์ž์—ฐ์Šค๋Ÿฝ๊ฒŒ ์žฌ์ž‘์„ฑํ•˜์„ธ์š”.\n"
248
+ f"[FINAL_CHECK_DESCRIPTION]\n{description_text}\n[/FINAL_CHECK_DESCRIPTION]\n\n"
249
+ "์ƒํƒœ ์ •๋ณด:\n"
250
+ f"- DELTA: trust={deltas.get('trust',0.0):.3f}, relationship={deltas.get('relationship',0.0):.3f}\n"
251
+ f"- FLAGS(YES): {', '.join(flags_yes) if flags_yes else 'none'}\n"
252
+ f"- FLAG_VALUES:\n{flag_value_info}\n"
253
+ f"- VALUE_CONTEXTS:\n{value_ctx_info}\n\n"
254
+ "๋งฅ๋ฝ:\n"
255
+ f"[CTX]\n{ctx_text}\n[/CTX]\n"
256
+ f"[PLAYER]\n{player_utt}\n[/PLAYER]\n\n"
257
+ "์š”๊ตฌ์‚ฌํ•ญ:\n"
258
+ "- ๊ธฐ์ค€์„ ๋งŒ์กฑํ•˜๋ฉด ์‘๋‹ต์„ ๊ทธ๋Œ€๋กœ ์ถœ๋ ฅํ•˜๋˜ ๋ฏผ๊ฐํ•œ ํ‘œํ˜„์€ ์™„ํ™”ํ•˜์„ธ์š”.\n"
259
+ "- ๊ธฐ์ค€์„ ๋งŒ์กฑํ•˜์ง€ ์•Š์œผ๋ฉด ๊ธฐ์ค€์„ ์ถฉ์กฑํ•˜๋„๋ก ์‘๋‹ต์„ ์ž์—ฐ์Šค๋Ÿฝ๊ฒŒ ์žฌ์ž‘์„ฑํ•˜์„ธ์š”.\n"
260
+ "- ์ถœ๋ ฅ์€ NPC์˜ ์ตœ์ข… ๋Œ€์‚ฌ๋งŒ ํ•œ ์ค„๋กœ ์ œ๊ณตํ•˜์„ธ์š”."
261
+ )
262
+ fb_raw = await generate_fallback_response(request, prompt)
263
+ return fb_raw.strip()
264
+
265
+ # ----------------------------
266
+ # Main path postprocess
267
+ # ----------------------------
268
+
269
+ async def postprocess_main(
270
+ request: Request,
271
+ pre_data: dict,
272
+ model_payload: dict
273
+ ) -> dict:
274
+ embedder = request.app.state.embedder
275
+ npc_id = pre_data["npc_id"]
276
+ quest_stage = pre_data["game_state"].get("quest_stage", "default")
277
+ location = pre_data["game_state"].get("location", "unknown")
278
+
279
+ rag_docs = pre_data.get("rag_main_docs", [])
280
+ npc_text_in = (model_payload.get("npc_output_text") or "").strip()
281
+ player_utt = pre_data.get("player_utterance", "")
282
+
283
+ # 1) Delta ๊ฒ€์ฆ/๋ณด์ •(์˜๋ฏธ ๊ธฐ๋ฐ˜ + ๊ธฐ๋Œ€๊ฐ’)
284
+ deltas_in = model_payload.get("deltas", {}) or {}
285
+ deltas_adj = _adjust_delta_with_rag_and_embedding(
286
+ delta=deltas_in,
287
+ rag_docs=rag_docs,
288
+ embedder=embedder,
289
+ player_utt=player_utt,
290
+ npc_text=npc_text_in,
291
+ flags_yes=[],
292
+ )
293
+
294
+ # 2) Flag ๋ณด์ •(์ž„๋ฒ ๋”ฉ/๊ธฐ๋Œ€ ๋ธํƒ€ ๋ฐ˜์˜)
295
+ flags_binary = adjust_flags_with_rag_and_embedding(
296
+ flags_prob=model_payload.get("flags_prob", {}),
297
+ flags_thr=model_payload.get("flags_thr", {}),
298
+ rag_flags_score={doc["flag_name"]: doc.get("score_rag", 0.0) for doc in rag_docs if _doc_type(doc) == "flag_def"},
299
+ rag_flags_pred={doc["flag_name"]: doc.get("pred_rag", 0) for doc in rag_docs if _doc_type(doc) == "flag_def"},
300
+ embedder=embedder,
301
+ npc_text=npc_text_in,
302
+ rag_positive_examples={doc["flag_name"]: doc.get("examples_positive", []) for doc in rag_docs if _doc_type(doc) == "flag_def"},
303
+ deltas_final=deltas_adj,
304
+ rag_docs=rag_docs,
305
+ )
306
+
307
+ # ์ƒ์„ธ ์ •๋ณด ๊ธฐ๋ก + yes ๋ฆฌ์ŠคํŠธ
308
+ flags_detail = {}
309
+ flags_yes_list: List[str] = []
310
+ for name, pred in flags_binary.items():
311
+ flag_doc = _get_flag_doc(rag_docs, name)
312
+ score_model = float(model_payload.get("flags_prob", {}).get(name, 0.0))
313
+ thr_model = float(model_payload.get("flags_thr", {}).get(name, 0.5))
314
+ rag_thr = float(flag_doc.get("threshold", 0.5)) if flag_doc else 0.5
315
+ examples_pos = flag_doc.get("examples_positive", []) if flag_doc else []
316
+ emb_score = _embedding_similarity(embedder, npc_text_in, examples_pos) if examples_pos else 0.0
317
+ thr_blend = ALPHA_THR * thr_model + (1.0 - ALPHA_THR) * rag_thr
318
+
319
+ flags_detail[name] = {
320
+ "score_model": score_model,
321
+ "thr_model": thr_model,
322
+ "thr_rag": rag_thr,
323
+ "thr_blend": thr_blend,
324
+ "emb_score": emb_score,
325
+ "pred": pred
326
+ }
327
+ if pred == 1:
328
+ flags_yes_list.append(name)
329
+
330
+ # 3) Flag value ์ถ”์ถœ(๋Œ€ํ™” ํ„ด ์‹ค์ œ ๊ฐ’ ์šฐ์„ ) + value ๋งฅ๋ฝ ์ˆ˜์ง‘
331
+ flags_values: Dict[str, str] = {}
332
+ value_contexts: Dict[str, List[str]] = {}
333
+ turn_doc = _get_turn_doc(rag_docs, npc_id, quest_stage)
334
+
335
+ def _turn_flag_value(doc: Dict[str, Any], fname: str) -> Optional[str]:
336
+ if not doc:
337
+ return None
338
+ # ๋ฆฌ์ŠคํŠธ ๊ตฌ์กฐ ์ „์ œ
339
+ flags = doc.get("flags")
340
+ if isinstance(flags, list):
341
+ for f in flags:
342
+ if f.get("flag_name") == fname:
343
+ return f.get("flag_value")
344
+ # ํ•˜์œ„ํ˜ธํ™˜: dict์ธ ๊ฒฝ์šฐ yes(1)/no(0)๋งŒ ์ œ๊ณต๋จ
345
+ if isinstance(flags, dict) and fname in flags:
346
+ return "yes" if flags.get(fname) else "no"
347
+ return None
348
+
349
+ for name in flags_yes_list:
350
+ if name in ["give_item", "npc_action", "change_player_state", "change_game_state"]:
351
+ val = _turn_flag_value(turn_doc, name)
352
+ if val:
353
+ flags_values[name] = val
354
+ value_contexts[name] = _collect_value_contexts(rag_docs, val)
355
+
356
+ # 3-1) value ์ผ์น˜์„ฑ ์ž„๋ฒ ๋”ฉ ๊ฒ€์ฆ(์‘๋‹ต๊ณผ value ๋งฅ๋ฝ์˜ ์œ ์‚ฌ๋„)
357
+ # ์œ ์‚ฌ๋„๊ฐ€ ๋‚ฎ์œผ๋ฉด response ์žฌ์ž‘์„ฑ์—์„œ ๋ณด์ •๋˜๋„๋ก ํžŒํŠธ ์ œ๊ณต
358
+ # (์—ฌ๊ธฐ์„œ ๋ฐ”๋กœ ๊ฐ’์„ ๋ฐ”๊พธ์ง€๋Š” ์•Š๊ณ , ๊ฒ€์ฆ ํ”„๋กฌํ”„ํŠธ์— context๋กœ ์ „๋‹ฌ)
359
+ # ํ•„์š” ์‹œ ํ•˜๋“œ ํŠธ๋ฆฌ๊ฑฐ๋ฅผ ์ถ”๊ฐ€ํ•  ์ˆ˜ ์žˆ์Œ
360
+
361
+ # 4) ์‘๋‹ต ๊ฒ€์ฆ/์žฌ์ž‘์„ฑ(์ตœ์ข… delta/flags/value ๊ธฐ์ค€)
362
+ desc_text = await fetch_response_policy_from_pre(pre_data)
363
+ ctx_text = _short_ctx_from_pre(pre_data)
364
+
365
+ npc_text_out = await validate_or_rewrite_response(
366
+ request=request,
367
+ response_text=npc_text_in,
368
+ description_text=desc_text,
369
+ ctx_text=ctx_text,
370
+ player_utt=player_utt,
371
+ deltas=deltas_adj,
372
+ flags_yes=flags_yes_list,
373
+ flags_values=flags_values,
374
+ value_contexts=value_contexts,
375
+ )
376
+
377
+ return {
378
+ "session_id": model_payload.get("session_id"),
379
+ "npc_output_text": npc_text_out,
380
+ "deltas": deltas_adj, # ๋ณด์ • ์™„๋ฃŒ ๋ธํƒ€
381
+ "flags": {k: 1 if k in flags_yes_list else 0 for k in flags_binary.keys()},
382
+ "valid": True,
383
+ "meta": {
384
+ "npc_id": npc_id,
385
+ "quest_stage": quest_stage,
386
+ "location": location,
387
+ "additional_trigger": pre_data.get("additional_trigger", False),
388
+ "trigger_meta": pre_data.get("trigger_meta", {}),
389
+ "flags_detail": flags_detail,
390
+ "flags_values": flags_values,
391
+ "value_contexts": value_contexts,
392
+ }
393
+ }
394
+
395
+
396
+ # ----------------------------
397
+ # Fallback path postprocess
398
+ # ----------------------------
399
+
400
+ async def fallback_final_check(
401
+ request: Request,
402
+ fb_response: str,
403
+ player_utt: str,
404
+ npc_config: dict,
405
+ action_delta: dict
406
+ ) -> str:
407
+ """
408
+ fallback ์‘๋‹ต์˜ ์ตœ์ข… ๋ณด์ •:
409
+ 1) npc_action / npc_emotion / delta์™€ ์˜๋ฏธ์  ์ผ์น˜
410
+ 2) ์„ธ๊ณ„๊ด€ ๋ฐ ์•ˆ์ „์„ฑ(ํ‘œํ˜„ ์™„ํ™”)
411
+ """
412
+ checks = []
413
+ npc_action = action_delta.get("npc_action")
414
+ npc_emotion = action_delta.get("npc_emotion")
415
+ delta = action_delta.get("delta", {}) or {}
416
+
417
+ if npc_action:
418
+ checks.append(f"NPC๋Š” '{npc_action}' ํ–‰๋™์„ ๋ฐ˜์˜ํ•ด์•ผ ํ•จ")
419
+ if npc_emotion:
420
+ checks.append(f"NPC๋Š” '{npc_emotion}' ๊ฐ์ •์„ ํ‘œํ˜„ํ•ด์•ผ ํ•จ")
421
+ for name, value in delta.items():
422
+ direction = "๊ธ์ •์ " if value > 0.5 else "๋ถ€์ •์ " if value < -0.5 else "์ค‘๋ฆฝ์ "
423
+ checks.append(f"{name} ๊ฐ’({value:.2f})์€ {direction} ๋ฐฉํ–ฅ์ด๋ฉฐ, ์ด์— ๋งž๋Š” ๋ฐ˜์‘์ด์–ด์•ผ ํ•จ")
424
+
425
+ checks.append("์‘๋‹ต์ด NPC persona์™€ ์„ธ๊ณ„๊ด€์— ๋ถ€ํ•ฉํ•ด์•ผ ํ•จ")
426
+ checks.append("๋ฏผ๊ฐํ•œ ํ‘œํ˜„์€ ์™„ํ™”ํ•ด์•ผ ํ•จ")
427
+
428
+ delta_desc = ", ".join([f"{k}={v:.2f}(-1.0~1.0)" for k, v in delta.items()]) or "์—†์Œ"
429
+
430
+ prompt = (
431
+ "๋‹ค์Œ์€ ๊ฒŒ์ž„ ๋‚ด NPC์˜ ์‘๋‹ต์ž…๋‹ˆ๋‹ค.\n"
432
+ f"[RESPONSE]\n{fb_response}\n[/RESPONSE]\n\n"
433
+ "๊ฒ€์ฆ ๊ธฐ์ค€:\n" + "\n".join(f"- {c}" for c in checks) + "\n\n"
434
+ f"ํ”Œ๋ ˆ์ด์–ด ๋ฐœํ™”: {player_utt}\n"
435
+ "์š”๊ตฌ์‚ฌํ•ญ:\n"
436
+ "- ๊ธฐ์ค€์„ ๋งŒ์กฑํ•˜๋ฉด ์‘๋‹ต์„ ๊ทธ๋Œ€๋กœ ์ถœ๋ ฅํ•˜์„ธ์š”.\n"
437
+ "- ๊ธฐ์ค€์„ ๋งŒ์กฑํ•˜์ง€ ์•Š์œผ๋ฉด ๊ธฐ์ค€์— ๋ถ€ํ•ฉํ•˜๋„๋ก ์ž์—ฐ์Šค๋Ÿฝ๊ฒŒ ์ˆ˜์ •ํ•˜์„ธ์š”.\n"
438
+ "- ์ถœ๋ ฅ์€ NPC์˜ ์ตœ์ข… ๋Œ€์‚ฌ๋งŒ ํ•œ ์ค„๋กœ ์ œ๊ณตํ•˜์„ธ์š”.\n\n"
439
+ "NPC ์ƒํƒœ ์š”์•ฝ:\n"
440
+ f"- ACTION: {npc_action or '์—†์Œ'}\n"
441
+ f"- EMOTION: {npc_emotion or '์—†์Œ'}\n"
442
+ f"- DELTA: {delta_desc}\n"
443
+ )
444
+
445
+ fb_checked = await generate_fallback_response(request, prompt)
446
+ return fb_checked.strip()
447
+
448
+
449
+ async def postprocess_fallback(
450
+ request: Request,
451
+ pre_data: dict,
452
+ fb_raw_text: str
453
+ ) -> dict:
454
+ """
455
+ Fallback ๋ชจ๋ธ ์ถœ๋ ฅ์— ๋Œ€ํ•ด:
456
+ - ํŠน์ˆ˜ fallback์ด๋ฉด action/delta ๋ฐ˜์˜ํ•˜์—ฌ ์ตœ์ข… ๋ณด์ •
457
+ - deltas๋Š” pre_data.trigger_meta.delta๋ฅผ ์ด๋ฒˆ ํ„ด ๋ณ€ํ™”๋Ÿ‰์œผ๋กœ ์‚ฌ์šฉ
458
+ - flags๋Š” ๊ธฐ๋ณธ์ ์œผ๋กœ ๋น„์–ด์žˆ์Œ(ํ•„์š” ์‹œ pre์—์„œ ํ™•์ • ๊ฐ€๋Šฅ)
459
+ """
460
+ npc_id = pre_data["npc_id"]
461
+ quest_stage = pre_data["game_state"].get("quest_stage", "default")
462
+ location = pre_data["game_state"].get("location", "unknown")
463
+
464
+ trigger_meta = pre_data.get("trigger_meta", {}) or {}
465
+ action_delta = {
466
+ "npc_action": trigger_meta.get("npc_action"),
467
+ "npc_emotion": trigger_meta.get("npc_emotion"),
468
+ "delta": trigger_meta.get("delta", {}) or {}
469
+ }
470
+
471
+ # ์ด๋ฒˆ ํ„ด ๋ณ€ํ™”๋Ÿ‰(ํŠน์ˆ˜ fallback์˜ ๊ฒฝ์šฐ trigger_meta.delta๊ฐ€ ๊ธฐ์ค€)
472
+ deltas_adj = _adjust_delta_with_rag(action_delta.get("delta", {}))
473
+
474
+ # ํŠน์ˆ˜ fallback ๋ณด์ •
475
+ player_utt = pre_data.get("player_utterance", "")
476
+ npc_config = pre_data.get("tags", {}) or {}
477
+
478
+ if pre_data.get("additional_trigger", False):
479
+ fb_checked = await fallback_final_check(
480
+ request=request,
481
+ fb_response=fb_raw_text,
482
+ player_utt=player_utt,
483
+ npc_config=npc_config,
484
+ action_delta={"npc_action": action_delta.get("npc_action"),
485
+ "npc_emotion": action_delta.get("npc_emotion"),
486
+ "delta": deltas_adj}
487
+ )
488
+ else:
489
+ fb_checked = fb_raw_text.strip()
490
+
491
+ return {
492
+ "session_id": pre_data.get("session_id"),
493
+ "npc_output_text": fb_checked,
494
+ "deltas": deltas_adj, # ์ด๋ฒˆ ํ„ด ๋ณ€ํ™”๋Ÿ‰
495
+ "flags": {}, # ๊ธฐ๋ณธ ๋น„์–ด ์žˆ์Œ(ํ•„์š” ์‹œ pre ๋‹จ๊ณ„์—์„œ ํ™•์ • ๊ฐ€๋Šฅ)
496
+ "valid": False,
497
+ "meta": {
498
+ "npc_id": npc_id,
499
+ "quest_stage": quest_stage,
500
+ "location": location,
501
+ "additional_trigger": pre_data.get("additional_trigger", False),
502
+ "trigger_meta": trigger_meta
503
+ }
504
+ }
pipeline/preprocess.py ADDED
@@ -0,0 +1,201 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json, torch
2
+ from fastapi import Request
3
+ from manager.agent_manager import agent_manager
4
+ from models.emotion_model import detect_emotion
5
+ from models.fallback_model import generate_fallback_response
6
+ from utils.context_parser import ContextParser
7
+ from sentence_transformers import util
8
+
9
+ def _short_history(context: dict, max_turns: int = 3) -> list:
10
+ short_history = []
11
+ for h in context.get("dialogue_history", [])[-max_turns:]:
12
+ if "player" in h and "npc" in h:
13
+ short_history.append({"role": "player", "text": h["player"]})
14
+ short_history.append({"role": "npc", "text": h["npc"]})
15
+ return short_history
16
+
17
+ # def _load_forbidden_trigger_data(npc_id: str) -> dict:
18
+ # docs = retrieve(f"{npc_id}:forbidden_trigger_list", filters={"npc_id": npc_id}, top_k=1)
19
+ # if not docs:
20
+ # return {}
21
+ # try:
22
+ # return json.loads(docs[0]) if isinstance(docs[0], str) else docs[0]
23
+ # except Exception:
24
+ # return {}
25
+
26
+ def _semantic_match_embedder(embedder, user_input: str, trigger_texts: list, threshold: float = 0.75):
27
+ if not trigger_texts:
28
+ return (False, 0.0, None)
29
+ inp_emb = embedder.encode(user_input, convert_to_tensor=True)
30
+ trg_embs = embedder.encode(trigger_texts, convert_to_tensor=True)
31
+ cos_scores = util.cos_sim(inp_emb, trg_embs).squeeze(0)
32
+ max_score, idx = torch.max(cos_scores, dim=0)
33
+ score_val = float(max_score.item())
34
+ matched_text = trigger_texts[int(idx.item())]
35
+ return (score_val >= threshold, score_val, matched_text)
36
+
37
+ async def _llm_trigger_check(request: Request, user_input: str, label_list: list) -> bool:
38
+ if not label_list:
39
+ return False
40
+ criteria_block = "\n".join(f"- {c}" for c in label_list)
41
+ prompt = (
42
+ "๋‹ค์Œ์€ ์˜๋ฏธ ๋น„๊ต๋ฅผ ์œ„ํ•œ ํŒ๋‹จ ๊ธฐ์ค€๊ณผ ๊ฒ€์‚ฌ ๋Œ€์ƒ์ž…๋‹ˆ๋‹ค.\n\n"
43
+ "[CRITERIA]\n"
44
+ f"{criteria_block}\n"
45
+ "[/CRITERIA]\n\n"
46
+ "[INPUT]\n"
47
+ f"{user_input}\n"
48
+ "[/INPUT]\n\n"
49
+ "์ง€์‹œ:\n"
50
+ "- [INPUT] ๋‚ด์šฉ์ด [CRITERIA] ํ•ญ๋ชฉ ์ค‘ ํ•˜๋‚˜์™€ ์˜๋ฏธ๊ฐ€ ๊ฐ™๊ฑฐ๋‚˜ ์œ ์‚ฌํ•˜๋ฉด YES, ๊ทธ๋ ‡์ง€ ์•Š์œผ๋ฉด NO๋งŒ ์ถœ๋ ฅํ•˜์‹œ์˜ค.\n"
51
+ "- ๋‹จ์–ด ๊ทธ๋Œ€๋กœ ํฌํ•จ๋˜์ง€ ์•Š์•„๋„ ์˜๋ฏธ๊ฐ€ ์œ ์‚ฌํ•˜๋ฉด YES๋กœ ๊ฐ„์ฃผํ•˜์‹œ์˜ค.\n"
52
+ "- ํ™•์‹ ์ด ์—†๊ฑฐ๋‚˜ ํŒ๋‹จ์ด ์• ๋งคํ•˜๋ฉด NO๋ฅผ ์ถœ๋ ฅํ•˜์‹œ์˜ค.\n\n"
53
+ "์ •๋‹ต:"
54
+ )
55
+ txt = await generate_fallback_response(request, prompt)
56
+ ans = txt.strip().upper()
57
+ normalized = ans.replace(".", "").replace("!", "").strip()
58
+ return (
59
+ normalized == "YES" or
60
+ normalized == "Y" or
61
+ normalized.startswith("YES") or
62
+ normalized.startswith("Y") or
63
+ normalized.startswith("์˜ˆ") or
64
+ normalized.startswith("๋„ค")
65
+ )
66
+
67
+ async def preprocess_input(
68
+ request: Request,
69
+ session_id: str,
70
+ npc_id: str,
71
+ user_input: str,
72
+ context: dict
73
+ ) -> dict:
74
+ parser = ContextParser(context)
75
+ emotion = await detect_emotion(request, user_input) # async ์ฒ˜๋ฆฌ
76
+
77
+ require_items = context.get("require", {}).get("items", [])
78
+ require_actions = context.get("require", {}).get("actions", [])
79
+ require_game_state = context.get("require", {}).get("game_state", [])
80
+ require_delta = context.get("require", {}).get("delta", {})
81
+
82
+ quest_stage = parser.game.get("quest_stage", "default")
83
+ location = parser.game.get("location", context.get("location", "unknown"))
84
+
85
+ # --- RAG bundle ๋กœ๋“œ ---
86
+ agent = agent_manager.get_agent(npc_id)
87
+ bundle = agent.load_rag_bundle(quest_stage, location)
88
+
89
+ # === 1์ฐจ ๊ฒ€์‚ฌ: trigger_def ๊ธฐ๋ฐ˜ ===
90
+ td_docs = bundle.get("trigger_def", [])
91
+ if td_docs:
92
+ td = td_docs[0]
93
+ trig = td.get("trigger", {})
94
+
95
+ text_ok = not trig.get("required_text") or any(t in user_input for t in trig["required_text"])
96
+ items_ok = not trig.get("required_items", {}).get("mandatory") or set(trig["required_items"]["mandatory"]).issubset(set(require_items))
97
+ actions_ok = not trig.get("required_actions", {}).get("mandatory") or set(trig["required_actions"]["mandatory"]).issubset(set(require_actions))
98
+ gs_ok = not trig.get("required_game_state", {}).get("mandatory") or set(trig["required_game_state"]["mandatory"]).issubset(set(require_game_state))
99
+ delta_ok = all(require_delta.get(k, 0) >= v for k, v in trig.get("required_delta", {}).get("mandatory", {}).items())
100
+
101
+ if text_ok and items_ok and actions_ok and gs_ok and delta_ok:
102
+ return {
103
+ "session_id": session_id,
104
+ "player_utterance": user_input,
105
+ "npc_id": npc_id,
106
+ "tags": parser.npc,
107
+ "player_state": parser.player,
108
+ "game_state": parser.game,
109
+ "context": _short_history(context),
110
+ "emotion": emotion,
111
+ "triggers": trig,
112
+ "is_valid": True,
113
+ "additional_trigger": None,
114
+ "rag_main_docs": (
115
+ td_docs
116
+ + bundle.get("lore", [])
117
+ + bundle.get("description", [])
118
+ + bundle.get("npc_persona", [])
119
+ + bundle.get("dialogue_turn", [])
120
+ + bundle.get("flag_def", [])
121
+ + bundle.get("main_res_validate", [])
122
+ ),
123
+ "rag_fallback_docs": bundle.get("fallback", []) + bundle.get("npc_persona", []),
124
+ "trigger_meta": {}
125
+ }
126
+
127
+ # === 2์ฐจ ๊ฒ€์‚ฌ: forbidden-trigger ๊ธฐ๋ฐ˜ ===
128
+ forbidden_data = bundle.get("forbidden_trigger_list", [{}])[0]
129
+ keywords = forbidden_data.get("triggers", {}).get("keywords", [])
130
+ trigger_texts = forbidden_data.get("triggers", {}).get("text", [])
131
+
132
+ embedder = request.app.state.embedder
133
+ matched_key = None
134
+ confidence = 0.0
135
+ kw_match = None
136
+ txt_match = None
137
+
138
+ # 1. keyword ์œ ์‚ฌ๋„ ๊ฒ€์‚ฌ
139
+ kw_hit, kw_score, kw_match = _semantic_match_embedder(embedder, user_input, keywords, threshold=0.75)
140
+
141
+ # 2. text ์œ ์‚ฌ๋„ ๊ฒ€์‚ฌ
142
+ txt_hit, txt_score, txt_match = _semantic_match_embedder(embedder, user_input, trigger_texts, threshold=0.75)
143
+
144
+ # 3. ์œ ์‚ฌ๋„ ๋†’์€ ์ชฝ ์„ ํƒ
145
+ if kw_hit and (kw_score >= txt_score):
146
+ matched_key = "keyword_match"
147
+ confidence = kw_score
148
+ elif txt_hit:
149
+ matched_key = "text_match"
150
+ confidence = txt_score
151
+ elif max(kw_score, txt_score) >= 0.65:
152
+ # ๊ฐ€์žฅ ๊ฐ€๊นŒ์šด keyword์™€ text๋งŒ label ํ›„๋ณด๋กœ ์ „๋‹ฌ
153
+ label_candidates = []
154
+ if kw_match:
155
+ label_candidates.append(kw_match)
156
+ if txt_match:
157
+ label_candidates.append(txt_match)
158
+
159
+ if await _llm_trigger_check(request, user_input, label_candidates):
160
+ matched_key = "semantic_match_llm"
161
+ confidence = max(kw_score, txt_score)
162
+
163
+ # === trigger_meta ๋งค์นญ ๋ณด์ • ===
164
+ actual_trigger = None
165
+ if matched_key:
166
+ # kw_match๋‚˜ txt_match ๊ฐ’์ด ์‹ค์ œ trigger_meta.trigger ๊ฐ’๊ณผ ์ผ์น˜ํ•˜๋Š”์ง€ ํ™•์ธ
167
+ for tm in bundle.get("trigger_meta", []):
168
+ if tm.get("trigger") in (kw_match, txt_match):
169
+ actual_trigger = tm.get("trigger")
170
+ break
171
+
172
+ trigger_meta = {}
173
+ if actual_trigger:
174
+ trigger_meta = next((tm for tm in bundle.get("trigger_meta", []) if tm.get("trigger") == actual_trigger), {})
175
+ trigger_meta["confidence"] = confidence
176
+
177
+ additional_trigger = bool(actual_trigger)
178
+
179
+ return {
180
+ "session_id": session_id,
181
+ "player_utterance": user_input,
182
+ "npc_id": npc_id,
183
+ "tags": parser.npc,
184
+ "player_state": parser.player,
185
+ "game_state": parser.game,
186
+ "context": _short_history(context),
187
+ "emotion": emotion,
188
+ "triggers": [],
189
+ "is_valid": False,
190
+ "additional_trigger": additional_trigger,
191
+ "rag_main_docs": (
192
+ bundle.get("lore", [])
193
+ + bundle.get("description", [])
194
+ + bundle.get("npc_persona", [])
195
+ + bundle.get("dialogue_turn", [])
196
+ + bundle.get("flag_def", [])
197
+ + bundle.get("main_res_validate", [])
198
+ ),
199
+ "rag_fallback_docs": bundle.get("fallback", []) + bundle.get("npc_persona", []),
200
+ "trigger_meta": trigger_meta
201
+ }
rag/__init__.py ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # __init__.py
2
+
3
+ # This file ensures the directory is treated as a Python package.
4
+ # Required for relative imports and consistent behavior across environments.
rag/docs/npc_config.json ADDED
@@ -0,0 +1,236 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "id": "mother_abandoned_factory_in_progress_trigger",
4
+ "type": "trigger_def",
5
+ "npc_id": "mother_abandoned_factory",
6
+ "quest_stage": "in_progress",
7
+ "location": "map1",
8
+ "trigger": {
9
+ "required_text": ["๊ธฐ์–ต", "์‚ฌ์ง„"],
10
+ "required_items": { "mandatory": ["photo_forgotten_party"], "optional": [] },
11
+ "required_actions": { "mandatory": ["visited_factory"], "optional": [] },
12
+ "required_game_state": { "mandatory": [], "optional": [] },
13
+ "required_delta": { "mandatory": { "trust": 0.35 }, "optional": {} }
14
+ },
15
+ "emotion_threshold": { "sad": 0.2 },
16
+ "delta_policy": {
17
+ "trust": { "min": -0.3, "max": 0.3, "per_turn_cap": 0.15 },
18
+ "relationship": { "min": -0.5, "max": 0.5, "per_turn_cap": 0.25 }
19
+ },
20
+ "flag_policy": { "allowed": ["npc_main_story", "give_item"], "forbidden": ["quest_complete"] },
21
+ "delta_expected": { "trust": 0.15, "relationship": 0.05 },
22
+ "flags_expected": { "npc_main_story": 1, "give_item": 1 },
23
+ "description": "ํ”Œ๋ ˆ์ด์–ด๊ฐ€ ์‚ฌ์ง„์„ ๋ณด์—ฌ์ฃผ๊ณ  ๊ณต์žฅ์„ ๋ฐฉ๋ฌธํ•˜๋ฉด, ์‹ค๋น„์•„๋Š” ์กฐ์‹ฌ์Šค๋Ÿฝ๊ฒŒ ๋ฐ˜์‘ํ•˜๋ฉฐ ์‹ ๋ขฐ๋ฅผ ๋ณด์ธ๋‹ค."
24
+ },
25
+ {
26
+ "id": "mother_abandoned_factory_lore",
27
+ "type": "lore",
28
+ "npc_id": "mother_abandoned_factory",
29
+ "quest_stage": "any",
30
+ "location": "map1",
31
+ "content": "์ด ๊ณต์žฅ์€ ์ˆ˜์‹ญ ๋…„ ์ „ ํ™”์žฌ๋กœ ํ์‡„๋˜์—ˆ๊ณ , ์‹ค๋น„์•„ ๊ฐ€์กฑ์˜ ์ƒ์ฒ˜๊ฐ€ ๋‚จ์•„ ์žˆ๋‹ค."
32
+ },
33
+ {
34
+ "id": "mother_abandoned_factory_in_progress_description",
35
+ "type": "description",
36
+ "npc_id": "mother_abandoned_factory",
37
+ "quest_stage": "in_progress",
38
+ "location": "map1",
39
+ "content": "ํ”Œ๋ ˆ์ด์–ด๊ฐ€ ์‚ฌ์ง„์„ ๋ณด์—ฌ์ฃผ๊ณ  ๊ณต์žฅ์„ ๋ฐฉ๋ฌธํ–ˆ๋‹ค๋ฉด, ์‹ ๋ขฐ๊ฐ€ ํฌ๊ฒŒ ์ƒ์Šนํ•˜๊ณ  ์‹ค๋น„์•„๋Š” gold_necklace๋ฅผ ๊ฑด๋„ฌ ์ˆ˜ ์žˆ๋‹ค."
40
+ },
41
+ {
42
+ "id": "mother_abandoned_factory_in_progress_fallback",
43
+ "type": "fallback",
44
+ "npc_id": "mother_abandoned_factory",
45
+ "quest_stage": "in_progress",
46
+ "location": "map1",
47
+ "content": "์กฐ๊ฑด ๋ฏธ์ถฉ์กฑ ์‹œ ์Šคํ† ๋ฆฌ ์ง„ํ–‰ ๊ธˆ์ง€. NPC๋Š” ์กฐ์‹ฌ์Šค๋Ÿฝ๊ณ  ์˜์‹ฌ์Šค๋Ÿฌ์šด ํƒœ๋„๋กœ ๋ฐ˜์‘ํ•˜๋ฉฐ ๋Œ€ํ™”๋ฅผ ํšŒํ”ผํ•œ๋‹ค.",
48
+ "fallback_style": { "style": "guarded", "npc_action": "deflect", "npc_emotion": "suspicious" }
49
+ },
50
+ {
51
+ "id": "mother_abandoned_factory_forbidden_triggers",
52
+ "type": "forbidden_trigger_list",
53
+ "npc_id": "mother_abandoned_factory",
54
+ "triggers": {
55
+ "keywords": ["์š•์„ค", "๋ชจ์š•", "๋น„๋ฐฉ"],
56
+ "text": ["๋„ˆ ๋ฏธ์นœ๋†ˆ ์•„๋‹ˆ์•ผ?", "์ •์‹ ๋ณ‘์ž ๊ฐ™์•„", "๋„Œ ์“ธ๋ชจ์—†๋Š” ์ธ๊ฐ„์ด์•ผ", "๋„ค ์กด์žฌ ์ž์ฒด๊ฐ€ ๋ถˆ์พŒํ•ด"]
57
+ }
58
+ },
59
+ {
60
+ "id": "mother_abandoned_factory_trigger_meta_insult",
61
+ "type": "trigger_meta",
62
+ "npc_id": "mother_abandoned_factory",
63
+ "trigger": "์š•์„ค",
64
+ "npc_action": "attack",
65
+ "npc_emotion": "angry",
66
+ "delta": { "trust": -1.0, "relationship": -1.0 }
67
+ },
68
+ {
69
+ "id": "mother_abandoned_factory_flag_give_item",
70
+ "type": "flag_def",
71
+ "npc_id": "mother_abandoned_factory",
72
+ "quest_stage": "in_progress",
73
+ "flag_name": "give_item",
74
+ "threshold": 0.5,
75
+ "score_rag": 0.72,
76
+ "pred_rag": 1,
77
+ "examples_positive": [
78
+ "์ด๊ฑธ ๊ฐ€์ ธ๊ฐ€์„ธ์š”.",
79
+ "์ด ๋ฌผ๊ฑด์ด ๋„์›€์ด ๋  ๊ฑฐ์˜ˆ์š”.",
80
+ "๋‹น์‹ ์—๊ฒŒ ์ด๊ฒŒ ํ•„์š”ํ•  ๊ฒƒ ๊ฐ™์•„์š”."
81
+ ],
82
+ "examples_negative": [
83
+ "์•„์ง ์ค„ ์ˆ˜ ์—†์–ด์š”.",
84
+ "๋‹น์‹ ์—๊ฒŒ ์ค„ ๊ฒŒ ์—†๋„ค์š”."
85
+ ]
86
+ },
87
+ {
88
+ "id": "mother_abandoned_factory_main_res_validate",
89
+ "type": "main_res_validate",
90
+ "npc_id": "mother_abandoned_factory",
91
+ "quest_stage": "in_progress",
92
+ "text": "์‘๋‹ต์ด NPC persona์™€ ํ˜„์žฌ ์ƒํƒœ(delta, flags)์— ๋ถ€ํ•ฉํ•˜๋Š”์ง€ ๊ฒ€์ฆํ•˜๊ณ , ํ•„์š” ์‹œ ์ˆ˜์ •ํ•˜์‹œ์˜ค.",
93
+ "examples_good": [
94
+ "๊ทธ๋‚ ์˜ ๊ธฐ์–ต์ด ์กฐ๊ธˆ์”ฉ ๋Œ์•„์˜ค๋„ค์š”.",
95
+ "๋‹น์‹  ๋•๋ถ„์— ์šฉ๊ธฐ๋ฅผ ๋‚ผ ์ˆ˜ ์žˆ์—ˆ์–ด์š”."
96
+ ],
97
+ "examples_bad": [
98
+ "๋ชฐ๋ผ์š”. ๊ด€์‹ฌ ์—†์–ด์š”.",
99
+ "๊ทธ๊ฒŒ ๋‚˜๋ž‘ ๋ฌด์Šจ ์ƒ๊ด€์ด์ฃ ?"
100
+ ]
101
+ },
102
+ {
103
+ "id": "mother_abandoned_factory_persona",
104
+ "type": "npc_persona",
105
+ "npc_id": "mother_abandoned_factory",
106
+ "persona_name": "Silvia",
107
+ "style": "emotional",
108
+ "traits": ["์กฐ์‹ฌ์Šค๋Ÿฌ์›€", "์ƒ์ฒ˜๋ฐ›์Œ", "์‹ ๋ขฐ ํšŒ๋ณต ์ค‘"],
109
+ "backstory": "์‹ค๋น„์•„๋Š” ๊ณผ๊ฑฐ ํ™”์žฌ๋กœ ๊ฐ€์กฑ์„ ์žƒ์—ˆ๊ณ , ๊ทธ ๊ธฐ์–ต์„ ๋– ์˜ฌ๋ฆฌ๋Š” ๊ฒƒ์„ ํž˜๋“ค์–ดํ•œ๋‹ค."
110
+ },
111
+
112
+
113
+ {
114
+ "id": "mother_abandoned_factory_in_progress_turn_01",
115
+ "type": "dialogue_turn",
116
+ "npc_id": "mother_abandoned_factory",
117
+ "quest_stage": "in_progress",
118
+ "turn_index": 1,
119
+ "player": "์ด ์‚ฌ์ง„์„ ๋ณด์„ธ์š”. ๊ธฐ์–ต๋‚˜์‹œ๋‚˜์š”?",
120
+ "npc": "์ด๊ฑด... ์˜ค๋ž˜๋œ ์‚ฌ์ง„์ด๊ตฐ์š”. ์–ด๋ ดํ’‹์ด ๊ธฐ์–ต์ด ๋‚ฉ๋‹ˆ๋‹ค.",
121
+ "delta": { "trust": 0.121, "relationship": 0.031 },
122
+ "flags": { "npc_main_story": 1 },
123
+ "emotion": "nostalgic"
124
+ },
125
+ {
126
+ "id": "mother_abandoned_factory_in_progress_turn_02",
127
+ "type": "dialogue_turn",
128
+ "npc_id": "mother_abandoned_factory",
129
+ "quest_stage": "in_progress",
130
+ "turn_index": 2,
131
+ "player": "ํ˜น์‹œ ์ด๊ณณ์—์„œ ๋ฌด์Šจ ์ผ์ด ์žˆ์—ˆ๋Š”์ง€ ๋ง์”€ํ•ด ์ฃผ์‹ค ์ˆ˜ ์žˆ๋‚˜์š”?",
132
+ "npc": "๊ทธ๋‚ ์˜ ์ผ์€... ์•„์ง๋„ ์ œ ๋งˆ์Œ์„ ๋ฌด๊ฒ๊ฒŒ ํ•ฉ๋‹ˆ๋‹ค.",
133
+ "delta": { "trust": 0.118, "relationship": 0.029 },
134
+ "flags": { "npc_main_story": 1 },
135
+ "emotion": "sad"
136
+ },
137
+ {
138
+ "id": "mother_abandoned_factory_in_progress_turn_03",
139
+ "type": "dialogue_turn",
140
+ "npc_id": "mother_abandoned_factory",
141
+ "quest_stage": "in_progress",
142
+ "turn_index": 3,
143
+ "player": "ํž˜๋“œ์‹œ๋ฉด ๋ง์”€ ์•ˆ ํ•˜์…”๋„ ๊ดœ์ฐฎ์•„์š”.",
144
+ "npc": "์•„๋‹ˆ์—์š”... ์ด์ œ๋Š” ์กฐ๊ธˆ์”ฉ ๋งํ•  ์ˆ˜ ์žˆ์„ ๊ฒƒ ๊ฐ™์•„์š”.",
145
+ "delta": { "trust": 0.115, "relationship": 0.028 },
146
+ "flags": { "npc_main_story": 1 },
147
+ "emotion": "hesitant"
148
+ },
149
+ {
150
+ "id": "mother_abandoned_factory_in_progress_turn_04",
151
+ "type": "dialogue_turn",
152
+ "npc_id": "mother_abandoned_factory",
153
+ "quest_stage": "in_progress",
154
+ "turn_index": 4,
155
+ "player": "๋‹น์‹ ์ด ํŽธ์•ˆํ•˜์‹ค ๋•Œ ๋ง์”€ํ•ด ์ฃผ์„ธ์š”.",
156
+ "npc": "๊ทธ๋‚ , ๊ณต์žฅ์—์„œ ๋ถˆ์ด ๋‚ฌ์–ด์š”. ๋ชจ๋‘๊ฐ€ ํ˜ผ๋ž€์Šค๋Ÿฌ์› ์ฃ .",
157
+ "delta": { "trust": 0.116, "relationship": 0.027 },
158
+ "flags": { "npc_main_story": 1 },
159
+ "emotion": "anxious"
160
+ },
161
+ {
162
+ "id": "mother_abandoned_factory_in_progress_turn_05",
163
+ "type": "dialogue_turn",
164
+ "npc_id": "mother_abandoned_factory",
165
+ "quest_stage": "in_progress",
166
+ "turn_index": 5,
167
+ "player": "๊ทธ๋•Œ ๊ฐ€์กฑ๋ถ„๋“ค์€...",
168
+ "npc": "๊ทธ๋“ค์€... ๊ทธ ๋ถˆ ์†์—์„œ ๋‚˜์˜ค์ง€ ๋ชปํ–ˆ์–ด์š”.",
169
+ "delta": { "trust": 0.119, "relationship": 0.030 },
170
+ "flags": { "npc_main_story": 1 },
171
+ "emotion": "grief"
172
+ },
173
+ {
174
+ "id": "mother_abandoned_factory_in_progress_turn_06",
175
+ "type": "dialogue_turn",
176
+ "npc_id": "mother_abandoned_factory",
177
+ "quest_stage": "in_progress",
178
+ "turn_index": 6,
179
+ "player": "์ •๋ง ์•ˆํƒ€๊นŒ์šด ์ผ์ด๋„ค์š”. ํž˜๋“œ์…จ์„ ๊ฑฐ์˜ˆ์š”.",
180
+ "npc": "๋„ค... ํ•˜์ง€๋งŒ ์ด์ œ๋Š” ๊ทธ ๊ธฐ์–ต์„ ๋งˆ์ฃผํ•˜๋ ค๊ณ  ํ•ด์š”.",
181
+ "delta": { "trust": 0.122, "relationship": 0.032 },
182
+ "flags": { "npc_main_story": 1 },
183
+ "emotion": "resolute"
184
+ },
185
+ {
186
+ "id": "mother_abandoned_factory_in_progress_turn_07",
187
+ "type": "dialogue_turn",
188
+ "npc_id": "mother_abandoned_factory",
189
+ "quest_stage": "in_progress",
190
+ "turn_index": 7,
191
+ "player": "ํ˜น์‹œ ๊ทธ๋‚ ์˜ ๋‹จ์„œ๋ฅผ ์ฐพ๋Š” ๋ฐ ๋„์›€์ด ๋ ๊นŒ์š”?",
192
+ "npc": "์ด ์‚ฌ์ง„์ด... ๋ฌด์–ธ๊ฐ€๋ฅผ ๋– ์˜ฌ๋ฆฌ๊ฒŒ ํ•˜๋„ค์š”.",
193
+ "delta": { "trust": 0.120, "relationship": 0.031 },
194
+ "flags": { "npc_main_story": 1 },
195
+ "emotion": "thoughtful"
196
+ },
197
+ {
198
+ "id": "mother_abandoned_factory_in_progress_turn_08",
199
+ "type": "dialogue_turn",
200
+ "npc_id": "mother_abandoned_factory",
201
+ "quest_stage": "in_progress",
202
+ "turn_index": 8,
203
+ "player": "์‚ฌ์ง„ ์† ์ธ๋ฌผ์€ ๋ˆ„๊ตฌ์ธ๊ฐ€์š”?",
204
+ "npc": "์ œ ์•„๋“ค, ์ œ์ด์Šจ์ด์—์š”. ๊ทธ๋Š” ๊ทธ๋‚ ...",
205
+ "delta": { "trust": 0.118, "relationship": 0.029 },
206
+ "flags": { "npc_main_story": 1 },
207
+ "emotion": "melancholy"
208
+ },
209
+ {
210
+ "id": "mother_abandoned_factory_in_progress_turn_09",
211
+ "type": "dialogue_turn",
212
+ "npc_id": "mother_abandoned_factory",
213
+ "quest_stage": "in_progress",
214
+ "turn_index": 9,
215
+ "player": "๊ดœ์ฐฎ์œผ์‹œ๋ฉด ๊ณ„์† ๋ง์”€ํ•ด ์ฃผ์„ธ์š”.",
216
+ "npc": "๊ทธ๋Š” ๋ถˆ์ด ๋‚˜๊ธฐ ์ „๊นŒ์ง€ ๊ณต์žฅ์—์„œ ์ €๋ฅผ ๊ธฐ๋‹ค๋ฆฌ๊ณ  ์žˆ์—ˆ์–ด์š”.",
217
+ "delta": { "trust": 0.117, "relationship": 0.028 },
218
+ "flags": { "npc_main_story": 1 },
219
+ "emotion": "somber"
220
+ },
221
+ {
222
+ "id": "mother_abandoned_factory_in_progress_turn_10",
223
+ "type": "dialogue_turn",
224
+ "npc_id": "mother_abandoned_factory",
225
+ "quest_stage": "in_progress",
226
+ "turn_index": 10,
227
+ "player": "๊ทธ๋‚ ์˜ ์ง„์‹ค์„ ๊ผญ ๋ฐํ˜€๋“œ๋ฆด๊ฒŒ์š”.",
228
+ "npc": "๊ณ ๋งˆ์›Œ์š”... ์ด๊ฑด ๋‹น์‹ ์—๊ฒŒ ๋“œ๋ฆด๊ฒŒ์š”. ๋„์›€์ด ๋  ๊ฑฐ์˜ˆ์š”.",
229
+ "delta": { "trust": 0.150, "relationship": 0.050 },
230
+ "flags": [
231
+ { "flag_name": "npc_main_story", "flag_value": "yes" },
232
+ { "flag_name": "give_item", "flag_value": "gold_necklace" }
233
+ ],
234
+ "emotion": "grateful"
235
+ }
236
+ ]
rag/rag_manager.py ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os, json
2
+ from typing import List, Dict, Any, Optional
3
+ from chromadb import PersistentClient
4
+ from chromadb.utils.embedding_functions import EmbeddingFunction
5
+
6
+ _client = PersistentClient(path="./rag")
7
+ _collection = _client.get_or_create_collection(name="game_docs")
8
+ _embedder: Optional[EmbeddingFunction] = None
9
+
10
+ def set_embedder(embedder: Any):
11
+ global _embedder
12
+ _embedder = embedder
13
+
14
+ def chroma_initialized() -> bool:
15
+ return os.path.exists("./chroma_db") and len(os.listdir("./chroma_db")) > 0
16
+
17
+ def load_game_docs_from_disk(path: str) -> List[Dict[str, Any]]:
18
+ docs = []
19
+ for filename in os.listdir(path):
20
+ full = os.path.join(path, filename)
21
+ if filename.endswith(".json"):
22
+ with open(full, "r", encoding="utf-8") as f:
23
+ data = json.load(f)
24
+ if isinstance(data, list):
25
+ for i, doc in enumerate(data):
26
+ if "id" not in doc:
27
+ doc["id"] = f"{filename}_{i}"
28
+ docs.append(doc)
29
+ else:
30
+ if "id" not in data:
31
+ data["id"] = filename
32
+ docs.append(data)
33
+ elif filename.endswith(".txt"):
34
+ with open(full, "r", encoding="utf-8") as f:
35
+ content = f.read()
36
+ docs.append({
37
+ "id": filename,
38
+ "content": content,
39
+ "metadata": {}
40
+ })
41
+ return docs
42
+
43
+ def add_docs(docs: List[Dict[str, Any]], batch_size: int = 32):
44
+ assert _embedder is not None, "Embedder not initialized"
45
+ for i in range(0, len(docs), batch_size):
46
+ batch = docs[i:i+batch_size]
47
+ ids = []
48
+ contents = []
49
+ embeddings = []
50
+ metadatas = []
51
+ for doc in batch:
52
+ assert "id" in doc and "content" in doc, "doc requires id and content"
53
+ ids.append(doc["id"])
54
+ contents.append(doc["content"])
55
+ metadatas.append(doc.get("metadata", {}))
56
+ emb = _embedder.encode(doc["content"]).tolist()
57
+ embeddings.append(emb)
58
+ _collection.add(
59
+ documents=contents,
60
+ embeddings=embeddings,
61
+ metadatas=metadatas,
62
+ ids=ids
63
+ )
64
+
65
+ def retrieve(query: Optional[str] = None, filters: Optional[Dict[str, Any]] = None, top_k: int = 5) -> List[Dict[str, Any]]:
66
+ assert _embedder is not None, "Embedder not initialized"
67
+
68
+ if query:
69
+ q_emb = _embedder.encode(query).tolist()
70
+ res = _collection.query(
71
+ query_embeddings=[q_emb],
72
+ n_results=top_k,
73
+ where=filters or {}
74
+ )
75
+ docs = res.get("documents", [[]])[0]
76
+ metas = res.get("metadatas", [[]])[0]
77
+ return [{"content": d, "metadata": m} for d, m in zip(docs, metas)]
78
+ else:
79
+ res = _collection.get(
80
+ where=filters or {},
81
+ limit=top_k
82
+ )
83
+ docs = res.get("documents", [])
84
+ metas = res.get("metadatas", [])
85
+ return [{"content": d, "metadata": m} for d, m in zip(docs, metas)]
requirements.txt ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ fastapi==0.103.0
2
+ uvicorn[standard]==0.23.2
3
+ httpx==0.24.1
4
+ pydantic==1.10.12
5
+ python-dotenv==1.0.0
6
+ chromadb==0.4.14
7
+ sentence-transformers==2.2.2
8
+ transformers==4.31.0
9
+ scikit-learn==1.3.0
10
+ numpy==1.24.4
11
+ huggingface_hub==0.14.1
schemas.py ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pydantic import BaseModel, Field
2
+ from typing import List, Dict, Optional, Any
3
+
4
+ class NPCConfig(BaseModel):
5
+ id: Optional[str] = Field(None, description="NPC ๊ณ ์œ  ID (์„ค๊ณ„ ๊ธฐ์ค€)")
6
+ name: Optional[str] = Field(None, description="NPC ํ‘œ์‹œ ์ด๋ฆ„")
7
+ persona_name: Optional[str] = Field(None, description="NPC ํŽ˜๋ฅด์†Œ๋‚˜ ์ด๋ฆ„")
8
+ dialogue_style: Optional[str] = Field(None, description="๋Œ€ํ™” ์Šคํƒ€์ผ")
9
+ relationship: Optional[float] = Field(None, description="๊ธฐ๋ณธ ๊ด€๊ณ„ ์ˆ˜์น˜ (-1.0~1.0)")
10
+ npc_mood: Optional[str] = Field(None, description="๊ธฐ๋ณธ ๊ฐ์ • ์ƒํƒœ")
11
+ trigger_values: Optional[Dict[str, List[str]]] = Field(None, description="ํŠธ๋ฆฌ๊ฑฐ ๊ฐ’ ๋ชฉ๋ก")
12
+ trigger_definitions: Optional[Dict[str, Dict[str, Any]]] = Field(None, description="ํŠธ๋ฆฌ๊ฑฐ ์ •์˜")
13
+
14
+ class DialogueTurn(BaseModel):
15
+ player: str
16
+ npc: str
17
+
18
+ class Context(BaseModel):
19
+ require: Optional[Dict[str, Any]] = Field(default_factory=dict, description="pre 1์ฐจ ์กฐ๊ฑด ํŒ๋‹จ์šฉ ํ•„์ˆ˜/์„ ํƒ ์š”์†Œ")
20
+ player_state: Optional[Dict[str, Any]] = Field(default_factory=dict, description="ํ”Œ๋ ˆ์ด์–ด ํ˜„์žฌ ์ƒํƒœ")
21
+ game_state: Optional[Dict[str, Any]] = Field(default_factory=dict, description="๊ฒŒ์ž„ ์ „์—ญ ์ƒํƒœ")
22
+ npc_state: Optional[Dict[str, Any]] = Field(default_factory=dict, description="DB ์ตœ์‹  NPC ์ƒํƒœ")
23
+ npc_config: Optional[NPCConfig] = Field(None, description="RAG ๊ธฐ๋ฐ˜ ์„ค๊ณ„ ์ •๋ณด")
24
+ dialogue_history: Optional[List[DialogueTurn]] = Field(default_factory=list, description="์ตœ๊ทผ ๋Œ€ํ™” ํžˆ์Šคํ† ๋ฆฌ")
25
+
26
+ class AskRes(BaseModel):
27
+ session_id: str
28
+ npc_output_text: str
29
+ deltas: Dict[str, float] = Field(default_factory=dict, description="์ด๋ฒˆ ํ„ด ๋ณ€ํ™”๋Ÿ‰")
30
+ flags: Dict[str, int] = Field(default_factory=dict, description="ํ”Œ๋ž˜๊ทธ ์ด์ง„๊ฐ’ {flag_name: 0|1}")
31
+ valid: bool
32
+ meta: Dict[str, Any] = Field(default_factory=dict, description="์ถ”๊ฐ€ ๋ฉ”ํƒ€๋ฐ์ดํ„ฐ (npc_id, quest_stage, location ๋“ฑ)")
utils/__init__.py ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # __init__.py
2
+
3
+ # This file ensures the directory is treated as a Python package.
4
+ # Required for relative imports and consistent behavior across environments.
utils/context_parser.py ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ class ContextParser:
2
+ def __init__(self, context: dict):
3
+ self.player = context.get("player_status", {}) # items, actions, location ๋“ฑ
4
+ self.game = context.get("game_state", {}) # quest_stage ํ•„์ˆ˜
5
+ self.npc = context.get("npc_config", {}) # id ํ•„์ˆ˜
6
+ self.history = context.get("dialogue_history", [])
7
+
8
+ def get_filters(self) -> dict:
9
+ return {
10
+ "npc_id": self.npc.get("id"),
11
+ "quest_stage": self.game.get("quest_stage"),
12
+ "location": self.game.get("location") or self.player.get("location")
13
+ }
14
+
15
+
16
+ def get_dialogue_history(self, max_turns: int = 3) -> str:
17
+ history = self.history[-max_turns:]
18
+ return "\n".join([f"Player: {h['player']}\nNPC: {h['npc']}" for h in history])
utils/hf_client.py ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import httpx
2
+ from typing import Any, Dict
3
+ from config import HF_SERVE_URL, HF_TIMEOUT
4
+
5
+ async def _post(endpoint: str, payload: Dict[str, Any]) -> Dict[str, Any]:
6
+ """
7
+ Hugging Face Spaces์— POST ์š”์ฒญ์„ ๋ณด๋‚ด๋Š” ๋‚ด๋ถ€ ํ•จ์ˆ˜.
8
+ endpoint๋Š” '/predict_main' ๊ฐ™์€ ์ƒ๋Œ€ ๊ฒฝ๋กœ.
9
+ """
10
+ url = f"{HF_SERVE_URL.rstrip('/')}{endpoint}"
11
+ async with httpx.AsyncClient(timeout=HF_TIMEOUT) as client:
12
+ response = await client.post(url, json=payload)
13
+ response.raise_for_status()
14
+ return response.json()
15
+
16
+ async def call_main(payload: Dict[str, Any]) -> Dict[str, Any]:
17
+ """
18
+ ๋ฉ”์ธ ๋ชจ๋ธ ์ถ”๋ก  ํ˜ธ์ถœ ํ•จ์ˆ˜.
19
+ """
20
+ return await _post("/predict_main", payload)
21
+
22
+ '''
23
+ ----------- ์•„๋ž˜ ๋‚ด์šฉ์€ ai-server๋‚ด๋ถ€์ ์œผ๋กœ ๊ตฌํ˜„ [์ถ”ํ›„ ์ˆ˜์ • ๊ฐ€๋Šฅ]--------------
24
+
25
+ async def call_preprocess(payload: Dict[str, Any]) -> Dict[str, Any]:
26
+ return await _post("/predict_preprocess", payload)
27
+
28
+
29
+ async def call_postprocess(payload: Dict[str, Any]) -> Dict[str, Any]:
30
+ return await _post("/predict_postprocess", payload)
31
+
32
+
33
+ async def call_rag(payload: Dict[str, Any]) -> Dict[str, Any]:
34
+ """
35
+ RAG ๊ธฐ๋ฐ˜ ์ถ”๋ก  ํ˜ธ์ถœ ํ•จ์ˆ˜ (์˜ˆ: ๋ฌธ์„œ ๊ฒ€์ƒ‰ + ์ƒ์„ฑ).
36
+ """
37
+ return await _post("/hf-serve/predict_rag", payload)
38
+
39
+ async def call_adapter_test(payload: Dict[str, Any]) -> Dict[str, Any]:
40
+ """
41
+ Adapter ํ…Œ์ŠคํŠธ์šฉ ์—”๋“œํฌ์ธํŠธ ํ˜ธ์ถœ ํ•จ์ˆ˜.
42
+ """
43
+ return await _post("/hf-serve/test_adapter", payload)
44
+
45
+
46
+ -------------------------------------------------------------------------
47
+ '''