Update backend/api.py
Browse files- backend/api.py +7 -6
backend/api.py
CHANGED
|
@@ -13,7 +13,7 @@ app = FastAPI()
|
|
| 13 |
# === CORS untuk frontend (misal dari Vercel) ===
|
| 14 |
app.add_middleware(
|
| 15 |
CORSMiddleware,
|
| 16 |
-
allow_origins=["*"], #
|
| 17 |
allow_credentials=True,
|
| 18 |
allow_methods=["*"],
|
| 19 |
allow_headers=["*"],
|
|
@@ -34,6 +34,7 @@ try:
|
|
| 34 |
tokenizer = AutoTokenizer.from_pretrained(str(TOKENIZER_PATH))
|
| 35 |
session = ort.InferenceSession(str(MODEL_PATH), providers=["CPUExecutionProvider"])
|
| 36 |
print("✅ Model dan tokenizer berhasil dimuat!")
|
|
|
|
| 37 |
except Exception as e:
|
| 38 |
print("❌ ERROR saat memuat model/tokenizer:", e)
|
| 39 |
traceback.print_exc()
|
|
@@ -49,8 +50,6 @@ id2label = {
|
|
| 49 |
6: "skills",
|
| 50 |
}
|
| 51 |
|
| 52 |
-
label2id = {v: k for k, v in id2label.items()}
|
| 53 |
-
|
| 54 |
# === Kamus respon SAMA dengan training ===
|
| 55 |
responses = {
|
| 56 |
"about_me": "I am a passionate developer specializing in AI and web development.",
|
|
@@ -81,10 +80,11 @@ async def chatbot(req: ChatRequest):
|
|
| 81 |
# === Tokenisasi input ===
|
| 82 |
inputs = tokenizer(req.text, return_tensors="np", padding=True, truncation=True, max_length=128)
|
| 83 |
|
| 84 |
-
# ===
|
| 85 |
-
|
|
|
|
| 86 |
|
| 87 |
-
# === Inferensi ===
|
| 88 |
ort_outputs = session.run(None, ort_inputs)
|
| 89 |
logits = ort_outputs[0]
|
| 90 |
pred_id = np.argmax(logits, axis=1)[0]
|
|
@@ -98,5 +98,6 @@ async def chatbot(req: ChatRequest):
|
|
| 98 |
return {"reply": reply, "intent": intent}
|
| 99 |
|
| 100 |
except Exception as e:
|
|
|
|
| 101 |
traceback.print_exc()
|
| 102 |
return {"reply": "⚠️ Internal server error.", "intent": intent}
|
|
|
|
| 13 |
# === CORS untuk frontend (misal dari Vercel) ===
|
| 14 |
app.add_middleware(
|
| 15 |
CORSMiddleware,
|
| 16 |
+
allow_origins=["*"], # Bisa dibatasi ke domain Vercel kamu untuk keamanan
|
| 17 |
allow_credentials=True,
|
| 18 |
allow_methods=["*"],
|
| 19 |
allow_headers=["*"],
|
|
|
|
| 34 |
tokenizer = AutoTokenizer.from_pretrained(str(TOKENIZER_PATH))
|
| 35 |
session = ort.InferenceSession(str(MODEL_PATH), providers=["CPUExecutionProvider"])
|
| 36 |
print("✅ Model dan tokenizer berhasil dimuat!")
|
| 37 |
+
print("📥 Model expects inputs:", [i.name for i in session.get_inputs()])
|
| 38 |
except Exception as e:
|
| 39 |
print("❌ ERROR saat memuat model/tokenizer:", e)
|
| 40 |
traceback.print_exc()
|
|
|
|
| 50 |
6: "skills",
|
| 51 |
}
|
| 52 |
|
|
|
|
|
|
|
| 53 |
# === Kamus respon SAMA dengan training ===
|
| 54 |
responses = {
|
| 55 |
"about_me": "I am a passionate developer specializing in AI and web development.",
|
|
|
|
| 80 |
# === Tokenisasi input ===
|
| 81 |
inputs = tokenizer(req.text, return_tensors="np", padding=True, truncation=True, max_length=128)
|
| 82 |
|
| 83 |
+
# === Ambil hanya input yang cocok dengan model ONNX ===
|
| 84 |
+
expected_inputs = [i.name for i in session.get_inputs()]
|
| 85 |
+
ort_inputs = {k: v.astype(np.int64) for k, v in inputs.items() if k in expected_inputs}
|
| 86 |
|
| 87 |
+
# === Inferensi ONNX ===
|
| 88 |
ort_outputs = session.run(None, ort_inputs)
|
| 89 |
logits = ort_outputs[0]
|
| 90 |
pred_id = np.argmax(logits, axis=1)[0]
|
|
|
|
| 98 |
return {"reply": reply, "intent": intent}
|
| 99 |
|
| 100 |
except Exception as e:
|
| 101 |
+
print("❌ Runtime error:", e)
|
| 102 |
traceback.print_exc()
|
| 103 |
return {"reply": "⚠️ Internal server error.", "intent": intent}
|