deepsodha commited on
Commit
4094d81
Β·
verified Β·
1 Parent(s): 451cb7d

Update retailgpt_evaluator/app.py

Browse files
Files changed (1) hide show
  1. retailgpt_evaluator/app.py +1 -18
retailgpt_evaluator/app.py CHANGED
@@ -1,7 +1,6 @@
1
  import streamlit as st
2
  from shared.hf_helpers import build_pipeline
3
  from retailgpt_evaluator.leaderboard import build_leaderboard
4
- from retailgpt_evaluator.evaluate import evaluate_all # βœ… NEW import
5
  import yaml, pandas as pd, os
6
  from pathlib import Path
7
 
@@ -19,21 +18,7 @@ def main():
19
  with open(config_path) as f:
20
  cfg = yaml.safe_load(f)
21
 
22
- # -------------------------------
23
- # Evaluation Trigger Button
24
- # -------------------------------
25
- st.markdown("### πŸ› οΈ Run Evaluation")
26
- if st.button("βš™οΈ Run evaluate.py now"):
27
- with st.spinner("Running model evaluations..."):
28
- try:
29
- evaluate_all()
30
- st.success("βœ… Evaluation complete! Leaderboard updated.")
31
- except Exception as e:
32
- st.error(f"❌ Evaluation failed: {e}")
33
-
34
- # -------------------------------
35
  # Show leaderboard if exists
36
- # -------------------------------
37
  if os.path.exists("models/retail_eval_results.json"):
38
  df = build_leaderboard()
39
  st.subheader("πŸ“Š Model Leaderboard")
@@ -41,14 +26,12 @@ def main():
41
  else:
42
  st.warning("Run `evaluate.py` first to generate metrics.")
43
 
44
- # -------------------------------
45
  # Model chat interface
46
- # -------------------------------
47
  st.markdown("---")
48
  model_name = st.selectbox("Choose a model to chat with:", cfg["models"])
49
  pipe = build_pipeline(model_name)
50
 
51
- query = st.text_area("Customer query:", "I want to return a damaged product.")
52
  if st.button("Ask Model"):
53
  result = pipe(query, max_new_tokens=cfg["demo"]["max_new_tokens"])
54
  st.markdown("### 🧠 Model Response")
 
1
  import streamlit as st
2
  from shared.hf_helpers import build_pipeline
3
  from retailgpt_evaluator.leaderboard import build_leaderboard
 
4
  import yaml, pandas as pd, os
5
  from pathlib import Path
6
 
 
18
  with open(config_path) as f:
19
  cfg = yaml.safe_load(f)
20
 
 
 
 
 
 
 
 
 
 
 
 
 
 
21
  # Show leaderboard if exists
 
22
  if os.path.exists("models/retail_eval_results.json"):
23
  df = build_leaderboard()
24
  st.subheader("πŸ“Š Model Leaderboard")
 
26
  else:
27
  st.warning("Run `evaluate.py` first to generate metrics.")
28
 
 
29
  # Model chat interface
 
30
  st.markdown("---")
31
  model_name = st.selectbox("Choose a model to chat with:", cfg["models"])
32
  pipe = build_pipeline(model_name)
33
 
34
+ query = st.text_area("Customer query:", "Which is the best country for retail?.")
35
  if st.button("Ask Model"):
36
  result = pipe(query, max_new_tokens=cfg["demo"]["max_new_tokens"])
37
  st.markdown("### 🧠 Model Response")