|
|
import streamlit as st |
|
|
import pandas as pd |
|
|
import json |
|
|
import joblib |
|
|
from huggingface_hub import hf_hub_download |
|
|
|
|
|
|
|
|
import huggingface_hub |
|
|
st.write(f"Hugging Face Hub version: {huggingface_hub.__version__}") |
|
|
|
|
|
|
|
|
def download_model_from_huggingface(repo_id, filename): |
|
|
"""Downloads a model file from Hugging Face repository.""" |
|
|
try: |
|
|
|
|
|
model_file = hf_hub_download(repo_id, filename) |
|
|
return model_file |
|
|
except Exception as e: |
|
|
st.error(f"Error downloading model {filename}: {e}") |
|
|
return None |
|
|
|
|
|
|
|
|
def load_model(model_file): |
|
|
"""Loads a model from a file.""" |
|
|
try: |
|
|
|
|
|
return joblib.load(model_file) |
|
|
except Exception as e: |
|
|
st.error(f"Error loading model: {e}") |
|
|
return None |
|
|
|
|
|
|
|
|
REPO_ID = "totoro74/Intelligent_Customer_Analyzer" |
|
|
|
|
|
|
|
|
try: |
|
|
|
|
|
bert_topic_model_file = download_model_from_huggingface(REPO_ID, "models/bertopic_model.joblib") |
|
|
bert_topic_model = load_model(bert_topic_model_file) |
|
|
|
|
|
|
|
|
recommendation_model_file = download_model_from_huggingface(REPO_ID, "models/recommendation_model.joblib") |
|
|
recommendation_model = load_model(recommendation_model_file) |
|
|
|
|
|
except Exception as e: |
|
|
st.error(f"β οΈ Error loading models: {e}") |
|
|
|
|
|
|
|
|
st.title("π Intelligent Customer Feedback Analyzer") |
|
|
st.write("Analyze customer feedback for sentiment, topics, and get personalized recommendations.") |
|
|
|
|
|
|
|
|
uploaded_file = st.file_uploader("π Upload a Feedback File (CSV, JSON, TXT)", type=["csv", "json", "txt"]) |
|
|
|
|
|
|
|
|
def extract_feedback(file): |
|
|
"""Extracts text data from CSV, JSON, or TXT files.""" |
|
|
try: |
|
|
if file.type == "text/csv": |
|
|
df = pd.read_csv(file) |
|
|
feedback_text = df.iloc[:, 0].dropna().astype(str).tolist() |
|
|
return feedback_text |
|
|
elif file.type == "application/json": |
|
|
json_data = json.load(file) |
|
|
feedback_text = [item.get('feedback', '') for item in json_data if isinstance(item, dict)] |
|
|
return feedback_text |
|
|
elif file.type == "text/plain": |
|
|
return file.getvalue().decode("utf-8").split("\n") |
|
|
else: |
|
|
return ["Unsupported file type"] |
|
|
except Exception as e: |
|
|
st.error(f"Error processing file: {e}") |
|
|
return [] |
|
|
|
|
|
|
|
|
if uploaded_file: |
|
|
feedback_text_list = extract_feedback(uploaded_file) |
|
|
|
|
|
if feedback_text_list and bert_topic_model and recommendation_model: |
|
|
for feedback_text in feedback_text_list: |
|
|
with st.expander(f'π Analyze Feedback: "{feedback_text[:30]}..."'): |
|
|
try: |
|
|
|
|
|
topics = bert_topic_model.predict([feedback_text]) |
|
|
st.write(f"**Predicted Topic(s):** {topics}") |
|
|
|
|
|
|
|
|
recommendations = recommendation_model.predict([feedback_text]) |
|
|
st.write(f"**Recommended Actions:** {recommendations}") |
|
|
|
|
|
except Exception as e: |
|
|
st.error(f"Error analyzing feedback: {e}") |
|
|
else: |
|
|
st.error("β οΈ Unable to analyze feedback. Please check if models are correctly loaded.") |
|
|
else: |
|
|
st.info("π Please upload a feedback file to analyze.") |
|
|
|