Update backend/api.py
Browse files- backend/api.py +43 -25
backend/api.py
CHANGED
|
@@ -8,38 +8,42 @@ from pathlib import Path
|
|
| 8 |
import traceback
|
| 9 |
|
| 10 |
# === Inisialisasi FastAPI ===
|
| 11 |
-
app = FastAPI()
|
| 12 |
|
| 13 |
-
# === CORS
|
| 14 |
app.add_middleware(
|
| 15 |
CORSMiddleware,
|
| 16 |
-
allow_origins=["*"],
|
| 17 |
allow_credentials=True,
|
| 18 |
allow_methods=["*"],
|
| 19 |
allow_headers=["*"],
|
| 20 |
)
|
| 21 |
|
| 22 |
-
# === Path ===
|
| 23 |
BASE_DIR = Path(__file__).resolve().parent
|
| 24 |
MODEL_PATH = BASE_DIR / "models" / "bert_chatbot.onnx"
|
| 25 |
TOKENIZER_PATH = BASE_DIR / "models" / "bert-base-multilingual-cased"
|
| 26 |
|
| 27 |
-
# === Global variable ===
|
| 28 |
tokenizer = None
|
| 29 |
session = None
|
| 30 |
|
| 31 |
# === Load model dan tokenizer ===
|
| 32 |
-
|
| 33 |
-
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
|
| 37 |
-
|
| 38 |
-
|
| 39 |
-
|
| 40 |
-
|
| 41 |
-
|
| 42 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 43 |
id2label = {
|
| 44 |
0: "about_me",
|
| 45 |
1: "career_goal",
|
|
@@ -50,7 +54,7 @@ id2label = {
|
|
| 50 |
6: "skills",
|
| 51 |
}
|
| 52 |
|
| 53 |
-
# === Kamus respon
|
| 54 |
responses = {
|
| 55 |
"about_me": "I am a passionate developer specializing in AI and web development.",
|
| 56 |
"skills": "My main skills are HTML5, CSS3, JavaScript, Laravel, Node.js, Database, TensorFlow, PyTorch, Firebase, and Jupyter Notebook.",
|
|
@@ -65,32 +69,46 @@ responses = {
|
|
| 65 |
class ChatRequest(BaseModel):
|
| 66 |
text: str
|
| 67 |
|
|
|
|
| 68 |
@app.get("/")
|
| 69 |
async def root():
|
| 70 |
-
return {"message": "π Chatbot API
|
| 71 |
|
|
|
|
| 72 |
@app.post("/chatbot")
|
| 73 |
async def chatbot(req: ChatRequest):
|
|
|
|
|
|
|
|
|
|
| 74 |
intent = "fallback"
|
| 75 |
|
| 76 |
-
|
|
|
|
| 77 |
return {"reply": responses["fallback"], "intent": "error_loading"}
|
| 78 |
|
| 79 |
try:
|
| 80 |
# === Tokenisasi input ===
|
| 81 |
-
inputs = tokenizer(
|
| 82 |
-
|
| 83 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 84 |
expected_inputs = [i.name for i in session.get_inputs()]
|
| 85 |
ort_inputs = {k: v.astype(np.int64) for k, v in inputs.items() if k in expected_inputs}
|
| 86 |
|
| 87 |
-
# ===
|
| 88 |
ort_outputs = session.run(None, ort_inputs)
|
| 89 |
logits = ort_outputs[0]
|
| 90 |
-
pred_id = np.argmax(logits, axis=1)[0]
|
| 91 |
|
| 92 |
-
# ===
|
|
|
|
| 93 |
intent = id2label.get(pred_id, "fallback")
|
|
|
|
|
|
|
| 94 |
reply = responses.get(intent, responses["fallback"])
|
| 95 |
|
| 96 |
print(f"π§ Input: {req.text} | Intent: {intent} | Reply: {reply}")
|
|
|
|
| 8 |
import traceback
|
| 9 |
|
| 10 |
# === Inisialisasi FastAPI ===
|
| 11 |
+
app = FastAPI(title="Portfolio Chatbot API", version="1.0")
|
| 12 |
|
| 13 |
+
# === CORS (boleh dibatasi ke domain Vercel kamu nanti) ===
|
| 14 |
app.add_middleware(
|
| 15 |
CORSMiddleware,
|
| 16 |
+
allow_origins=["*"], # contoh: ["https://your-frontend.vercel.app"]
|
| 17 |
allow_credentials=True,
|
| 18 |
allow_methods=["*"],
|
| 19 |
allow_headers=["*"],
|
| 20 |
)
|
| 21 |
|
| 22 |
+
# === Path model dan tokenizer ===
|
| 23 |
BASE_DIR = Path(__file__).resolve().parent
|
| 24 |
MODEL_PATH = BASE_DIR / "models" / "bert_chatbot.onnx"
|
| 25 |
TOKENIZER_PATH = BASE_DIR / "models" / "bert-base-multilingual-cased"
|
| 26 |
|
| 27 |
+
# === Global variable untuk model dan tokenizer ===
|
| 28 |
tokenizer = None
|
| 29 |
session = None
|
| 30 |
|
| 31 |
# === Load model dan tokenizer ===
|
| 32 |
+
def load_model():
|
| 33 |
+
global tokenizer, session
|
| 34 |
+
try:
|
| 35 |
+
print("π Loading tokenizer dan ONNX model...")
|
| 36 |
+
tokenizer = AutoTokenizer.from_pretrained(str(TOKENIZER_PATH))
|
| 37 |
+
session = ort.InferenceSession(str(MODEL_PATH), providers=["CPUExecutionProvider"])
|
| 38 |
+
print("β
Model dan tokenizer berhasil dimuat!")
|
| 39 |
+
print("π₯ Model expects inputs:", [i.name for i in session.get_inputs()])
|
| 40 |
+
except Exception as e:
|
| 41 |
+
print("β ERROR saat memuat model/tokenizer:", e)
|
| 42 |
+
traceback.print_exc()
|
| 43 |
+
|
| 44 |
+
load_model()
|
| 45 |
+
|
| 46 |
+
# === Label mapping (HARUS sama seperti saat training) ===
|
| 47 |
id2label = {
|
| 48 |
0: "about_me",
|
| 49 |
1: "career_goal",
|
|
|
|
| 54 |
6: "skills",
|
| 55 |
}
|
| 56 |
|
| 57 |
+
# === Kamus respon sesuai training ===
|
| 58 |
responses = {
|
| 59 |
"about_me": "I am a passionate developer specializing in AI and web development.",
|
| 60 |
"skills": "My main skills are HTML5, CSS3, JavaScript, Laravel, Node.js, Database, TensorFlow, PyTorch, Firebase, and Jupyter Notebook.",
|
|
|
|
| 69 |
class ChatRequest(BaseModel):
|
| 70 |
text: str
|
| 71 |
|
| 72 |
+
# === Root endpoint ===
|
| 73 |
@app.get("/")
|
| 74 |
async def root():
|
| 75 |
+
return {"message": "π Portfolio Chatbot API is running successfully!"}
|
| 76 |
|
| 77 |
+
# === Chatbot endpoint ===
|
| 78 |
@app.post("/chatbot")
|
| 79 |
async def chatbot(req: ChatRequest):
|
| 80 |
+
"""
|
| 81 |
+
Endpoint utama untuk memproses input teks dan mengembalikan intent serta respon.
|
| 82 |
+
"""
|
| 83 |
intent = "fallback"
|
| 84 |
|
| 85 |
+
# Pastikan model sudah termuat
|
| 86 |
+
if session is None or tokenizer is None:
|
| 87 |
return {"reply": responses["fallback"], "intent": "error_loading"}
|
| 88 |
|
| 89 |
try:
|
| 90 |
# === Tokenisasi input ===
|
| 91 |
+
inputs = tokenizer(
|
| 92 |
+
req.text,
|
| 93 |
+
return_tensors="np", # output dalam format numpy
|
| 94 |
+
padding=True,
|
| 95 |
+
truncation=True,
|
| 96 |
+
max_length=128
|
| 97 |
+
)
|
| 98 |
+
|
| 99 |
+
# === Siapkan input sesuai nama yang diminta oleh model ===
|
| 100 |
expected_inputs = [i.name for i in session.get_inputs()]
|
| 101 |
ort_inputs = {k: v.astype(np.int64) for k, v in inputs.items() if k in expected_inputs}
|
| 102 |
|
| 103 |
+
# === Jalankan inferensi ONNX ===
|
| 104 |
ort_outputs = session.run(None, ort_inputs)
|
| 105 |
logits = ort_outputs[0]
|
|
|
|
| 106 |
|
| 107 |
+
# === Prediksi intent ===
|
| 108 |
+
pred_id = int(np.argmax(logits, axis=1)[0])
|
| 109 |
intent = id2label.get(pred_id, "fallback")
|
| 110 |
+
|
| 111 |
+
# === Ambil respon ===
|
| 112 |
reply = responses.get(intent, responses["fallback"])
|
| 113 |
|
| 114 |
print(f"π§ Input: {req.text} | Intent: {intent} | Reply: {reply}")
|