Spaces:
Sleeping
Sleeping
Update main.py
Browse files
main.py
CHANGED
|
@@ -100,26 +100,37 @@ async def analyze(data: ReviewInput, x_api_key: str = Header(None)):
|
|
| 100 |
if len(data.text.split()) < 20:
|
| 101 |
raise HTTPException(status_code=400, detail="β οΈ Review too short for analysis (min. 20 words).")
|
| 102 |
|
| 103 |
-
global log_store
|
| 104 |
|
| 105 |
try:
|
|
|
|
| 106 |
summary = (
|
| 107 |
summarize_review(data.text, max_len=40, min_len=8)
|
| 108 |
if data.verbosity.lower() == "brief"
|
| 109 |
else smart_summarize(data.text, n_clusters=2 if data.intelligence else 1)
|
| 110 |
)
|
| 111 |
|
|
|
|
| 112 |
sentiment_pipeline = pipeline("sentiment-analysis", model=data.model)
|
| 113 |
sentiment = sentiment_pipeline(data.text)[0]
|
|
|
|
| 114 |
emotion_raw = detect_emotion(data.text)
|
| 115 |
-
emotion =
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 116 |
churn_risk = assess_churn_risk(sentiment["label"], emotion)
|
|
|
|
|
|
|
| 117 |
industry = detect_industry(data.text) if not data.industry or "auto" in data.industry.lower() else data.industry
|
| 118 |
product_category = detect_product_category(data.text) if not data.product_category or "auto" in data.product_category.lower() else data.product_category
|
| 119 |
|
|
|
|
| 120 |
pain_points = extract_pain_points(data.text) if data.aspects else []
|
| 121 |
|
| 122 |
-
#
|
| 123 |
log_store.append({
|
| 124 |
"timestamp": datetime.now(),
|
| 125 |
"product": product_category,
|
|
@@ -129,6 +140,7 @@ async def analyze(data: ReviewInput, x_api_key: str = Header(None)):
|
|
| 129 |
if len(log_store) > 1000:
|
| 130 |
log_store = log_store[-1000:]
|
| 131 |
|
|
|
|
| 132 |
response = {
|
| 133 |
"summary": summary,
|
| 134 |
"sentiment": sentiment,
|
|
@@ -172,7 +184,7 @@ async def bulk_analyze(data: BulkReviewInput, token: str = Query(None)):
|
|
| 172 |
if token != VALID_API_KEY:
|
| 173 |
raise HTTPException(status_code=401, detail="β Unauthorized: Invalid API token")
|
| 174 |
|
| 175 |
-
global log_store
|
| 176 |
|
| 177 |
try:
|
| 178 |
results = []
|
|
@@ -188,7 +200,15 @@ async def bulk_analyze(data: BulkReviewInput, token: str = Query(None)):
|
|
| 188 |
|
| 189 |
summary = smart_summarize(review_text, n_clusters=2 if data.intelligence else 1)
|
| 190 |
sentiment = sentiment_pipeline(review_text)[0]
|
| 191 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 192 |
churn = assess_churn_risk(sentiment["label"], emotion)
|
| 193 |
pain = extract_pain_points(review_text) if data.aspects else []
|
| 194 |
|
|
@@ -209,11 +229,12 @@ async def bulk_analyze(data: BulkReviewInput, token: str = Query(None)):
|
|
| 209 |
"pain_points": pain
|
| 210 |
}
|
| 211 |
|
|
|
|
| 212 |
if data.follow_up and i < len(data.follow_up):
|
| 213 |
follow_q = data.follow_up[i]
|
| 214 |
result["follow_up"] = answer_followup(review_text, follow_q)
|
| 215 |
|
| 216 |
-
# β
Log churn
|
| 217 |
log_store.append({
|
| 218 |
"timestamp": datetime.now(),
|
| 219 |
"product": prod,
|
|
@@ -223,6 +244,7 @@ async def bulk_analyze(data: BulkReviewInput, token: str = Query(None)):
|
|
| 223 |
|
| 224 |
results.append(result)
|
| 225 |
|
|
|
|
| 226 |
if len(log_store) > 1000:
|
| 227 |
log_store = log_store[-1000:]
|
| 228 |
|
|
|
|
| 100 |
if len(data.text.split()) < 20:
|
| 101 |
raise HTTPException(status_code=400, detail="β οΈ Review too short for analysis (min. 20 words).")
|
| 102 |
|
| 103 |
+
global log_store
|
| 104 |
|
| 105 |
try:
|
| 106 |
+
# === Generate Summary ===
|
| 107 |
summary = (
|
| 108 |
summarize_review(data.text, max_len=40, min_len=8)
|
| 109 |
if data.verbosity.lower() == "brief"
|
| 110 |
else smart_summarize(data.text, n_clusters=2 if data.intelligence else 1)
|
| 111 |
)
|
| 112 |
|
| 113 |
+
# === Sentiment + Emotion ===
|
| 114 |
sentiment_pipeline = pipeline("sentiment-analysis", model=data.model)
|
| 115 |
sentiment = sentiment_pipeline(data.text)[0]
|
| 116 |
+
|
| 117 |
emotion_raw = detect_emotion(data.text)
|
| 118 |
+
emotion = (
|
| 119 |
+
emotion_raw[0]["label"]
|
| 120 |
+
if isinstance(emotion_raw, list) and isinstance(emotion_raw[0], dict)
|
| 121 |
+
else str(emotion_raw)
|
| 122 |
+
)
|
| 123 |
+
|
| 124 |
churn_risk = assess_churn_risk(sentiment["label"], emotion)
|
| 125 |
+
|
| 126 |
+
# === Auto-detect metadata ===
|
| 127 |
industry = detect_industry(data.text) if not data.industry or "auto" in data.industry.lower() else data.industry
|
| 128 |
product_category = detect_product_category(data.text) if not data.product_category or "auto" in data.product_category.lower() else data.product_category
|
| 129 |
|
| 130 |
+
# === Optional: Pain Points ===
|
| 131 |
pain_points = extract_pain_points(data.text) if data.aspects else []
|
| 132 |
|
| 133 |
+
# === Log entry ===
|
| 134 |
log_store.append({
|
| 135 |
"timestamp": datetime.now(),
|
| 136 |
"product": product_category,
|
|
|
|
| 140 |
if len(log_store) > 1000:
|
| 141 |
log_store = log_store[-1000:]
|
| 142 |
|
| 143 |
+
# === Final API Response ===
|
| 144 |
response = {
|
| 145 |
"summary": summary,
|
| 146 |
"sentiment": sentiment,
|
|
|
|
| 184 |
if token != VALID_API_KEY:
|
| 185 |
raise HTTPException(status_code=401, detail="β Unauthorized: Invalid API token")
|
| 186 |
|
| 187 |
+
global log_store
|
| 188 |
|
| 189 |
try:
|
| 190 |
results = []
|
|
|
|
| 200 |
|
| 201 |
summary = smart_summarize(review_text, n_clusters=2 if data.intelligence else 1)
|
| 202 |
sentiment = sentiment_pipeline(review_text)[0]
|
| 203 |
+
|
| 204 |
+
# β
Fix emotion return shape
|
| 205 |
+
emotion_raw = detect_emotion(review_text)
|
| 206 |
+
emotion = (
|
| 207 |
+
emotion_raw[0]["label"]
|
| 208 |
+
if isinstance(emotion_raw, list) and isinstance(emotion_raw[0], dict)
|
| 209 |
+
else str(emotion_raw)
|
| 210 |
+
)
|
| 211 |
+
|
| 212 |
churn = assess_churn_risk(sentiment["label"], emotion)
|
| 213 |
pain = extract_pain_points(review_text) if data.aspects else []
|
| 214 |
|
|
|
|
| 229 |
"pain_points": pain
|
| 230 |
}
|
| 231 |
|
| 232 |
+
# β
Optional follow-up
|
| 233 |
if data.follow_up and i < len(data.follow_up):
|
| 234 |
follow_q = data.follow_up[i]
|
| 235 |
result["follow_up"] = answer_followup(review_text, follow_q)
|
| 236 |
|
| 237 |
+
# β
Log churn entry
|
| 238 |
log_store.append({
|
| 239 |
"timestamp": datetime.now(),
|
| 240 |
"product": prod,
|
|
|
|
| 244 |
|
| 245 |
results.append(result)
|
| 246 |
|
| 247 |
+
# β
Cap log size
|
| 248 |
if len(log_store) > 1000:
|
| 249 |
log_store = log_store[-1000:]
|
| 250 |
|