clockclock's picture
Update app.py
aad4cfe verified
raw
history blame
10.7 kB
# app.py
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from scipy import stats
from sklearn.preprocessing import StandardScaler
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report, roc_auc_score
import warnings
import gradio as gr
import os
import git
# --- Main Class ---
warnings.filterwarnings('ignore')
plt.style.use('default')
sns.set_palette("husl")
class EnhancedAIvsRealGazeAnalyzer:
def __init__(self):
self.questions = ['Q1', 'Q2', 'Q3', 'Q4', 'Q5', 'Q6']
self.correct_answers = {'Pair1': 'B', 'Pair2': 'B', 'Pair3': 'B', 'Pair4': 'B', 'Pair5': 'B', 'Pair6': 'B'}
self.combined_data = None
self.model = None
self.scaler = None
self.feature_names = []
self.time_metrics = []
self.numeric_cols = []
def _find_and_standardize_participant_col(self, df, filename):
participant_col = next((c for c in df.columns if 'participant' in str(c).lower()), None)
if not participant_col:
raise ValueError(f"Could not find a 'participant' column in the file: {filename}")
df = df.rename(columns={participant_col: 'participant_id'})
df['participant_id'] = df['participant_id'].astype(str)
return df
def load_and_process_data(self, base_path, response_file_path):
print("--- Starting Robust Data Loading ---")
response_df = pd.read_excel(response_file_path)
response_df = self._find_and_standardize_participant_col(response_df, "GenAI Response.xlsx")
for pair, ans in self.correct_answers.items():
if pair in response_df.columns:
response_df[f'{pair}_Correct'] = (response_df[pair].astype(str).str.strip().str.upper() == ans)
response_long = response_df.melt(id_vars=['participant_id'], value_vars=self.correct_answers.keys(), var_name='Pair')
correctness_long = response_df.melt(id_vars=['participant_id'], value_vars=[f'{p}_Correct' for p in self.correct_answers.keys()], var_name='Pair_Correct_Col', value_name='Correct')
correctness_long['Pair'] = correctness_long['Pair_Correct_Col'].str.replace('_Correct', '')
response_long = response_long.merge(correctness_long[['participant_id', 'Pair', 'Correct']], on=['participant_id', 'Pair'])
all_metrics_dfs = []
for q in self.questions:
file_path = f"{base_path}/Filtered_GenAI_Metrics_cleaned_{q}.xlsx"
if os.path.exists(file_path):
print(f"Processing {file_path}...")
metrics_df = pd.read_excel(file_path, sheet_name=0)
metrics_df = self._find_and_standardize_participant_col(metrics_df, f"{q} Metrics")
metrics_df['Question'] = q
all_metrics_dfs.append(metrics_df)
if not all_metrics_dfs: raise ValueError("No aggregated metrics files were found.")
self.combined_data = pd.concat(all_metrics_dfs, ignore_index=True)
q_to_pair = {f'Q{i+1}': f'Pair{i+1}' for i in range(6)}
self.combined_data['Pair'] = self.combined_data['Question'].map(q_to_pair)
self.combined_data = self.combined_data.merge(response_long, on=['participant_id', 'Pair'], how='left')
self.combined_data['Answer_Correctness'] = self.combined_data['Correct'].map({True: 'Correct', False: 'Incorrect'})
self.numeric_cols = self.combined_data.select_dtypes(include=np.number).columns.tolist()
self.time_metrics = [c for c in self.numeric_cols if any(k in c.lower() for k in ['time', 'duration', 'fixation'])]
print(f"--- Data Loading Successful ---")
return self
def run_prediction_model(self, test_size, n_estimators):
leaky_features = ['participant_id', 'Correct', 'Total_Correct', 'Overall_Accuracy']
self.feature_names = [col for col in self.numeric_cols if col not in leaky_features]
features = self.combined_data[self.feature_names].copy()
target = self.combined_data['Answer_Correctness'].map({'Correct': 1, 'Incorrect': 0})
valid_indices = target.notna()
features, target = features[valid_indices], target[valid_indices]
features = features.fillna(features.median()).fillna(0)
if len(target.unique()) < 2: return "Not enough data to train.", None, None, gr.Markdown("Model not trained yet.")
X_train, X_test, y_train, y_test = train_test_split(features, target, test_size=test_size, random_state=42, stratify=target)
self.scaler = StandardScaler().fit(X_train)
self.model = RandomForestClassifier(n_estimators=int(n_estimators), random_state=42, class_weight='balanced').fit(self.scaler.transform(X_train), y_train)
report = classification_report(y_test, self.model.predict(self.scaler.transform(X_test)), target_names=['Incorrect', 'Correct'], output_dict=True)
auc_score = roc_auc_score(y_test, self.model.predict_proba(self.scaler.transform(X_test))[:, 1])
summary_md = f"### Model Performance\n- **AUC Score:** **{auc_score:.4f}**\n- **Overall Accuracy:** {report['accuracy']:.3f}"
report_df = pd.DataFrame(report).transpose().round(3)
feature_importance = pd.DataFrame({'Feature': self.feature_names, 'Importance': self.model.feature_importances_}).sort_values('Importance', ascending=False).head(15)
fig, ax = plt.subplots(figsize=(10, 8)); sns.barplot(data=feature_importance, x='Importance', y='Feature', ax=ax, palette='viridis'); ax.set_title(f'Top 15 Predictive Features (n_estimators={int(n_estimators)})', fontsize=14); plt.tight_layout()
# <<< FIX: Updated status message >>>
return summary_md, report_df, fig, gr.Markdown("βœ… **Model trained successfully.**")
def analyze_rq1_metric(self, metric):
if not metric or metric not in self.combined_data.columns: return None, "Metric not found."
correct = self.combined_data.loc[self.combined_data['Answer_Correctness'] == 'Correct', metric].dropna()
incorrect = self.combined_data.loc[self.combined_data['Answer_Correctness'] == 'Incorrect', metric].dropna()
if len(correct) < 2 or len(incorrect) < 2: return None, "Not enough data for both groups to compare."
t_stat, p_val = stats.ttest_ind(incorrect, correct, equal_var=False, nan_policy='omit')
fig, ax = plt.subplots(figsize=(8, 6)); sns.boxplot(data=self.combined_data, x='Answer_Correctness', y=metric, ax=ax, palette=['#66b3ff','#ff9999']); ax.set_title(f'Comparison of "{metric}" by Answer Correctness', fontsize=14); ax.set_xlabel("Answer Correctness"); ax.set_ylabel(metric); plt.tight_layout()
summary = f"""### Analysis for: **{metric}**\n- **Mean (Correct Answers):** {correct.mean():.4f}\n- **Mean (Incorrect Answers):** {incorrect.mean():.4f}\n- **T-test p-value:** {p_val:.4f}\n\n**Conclusion:**\n- {'There is a **statistically significant** difference (p < 0.05).' if p_val < 0.05 else 'There is **no statistically significant** difference (p >= 0.05).'}"""
return fig, summary
# --- DATA SETUP & GRADIO APP ---
def setup_and_load_data():
repo_url = "https://github.com/RextonRZ/GenAIEyeTrackingCleanedDataset"
repo_dir = "GenAIEyeTrackingCleanedDataset"
if not os.path.exists(repo_dir):
print(f"Cloning repository {repo_url}...")
git.Repo.clone_from(repo_url, repo_dir)
else:
print("Data repository already exists.")
base_path = repo_dir
response_file_path = os.path.join(repo_dir, "GenAI Response.xlsx")
analyzer = EnhancedAIvsRealGazeAnalyzer().load_and_process_data(base_path, response_file_path)
return analyzer
analyzer = setup_and_load_data()
with gr.Blocks(theme=gr.themes.Soft()) as demo:
gr.Markdown("# Interactive Dashboard: AI vs. Real Gaze Analysis")
with gr.Tabs():
with gr.TabItem("πŸ“Š RQ1: Viewing Time vs. Correctness"):
with gr.Row():
with gr.Column(scale=1):
rq1_metric_dropdown=gr.Dropdown(choices=analyzer.time_metrics, label="Select a Time-Based Metric", value=analyzer.time_metrics[0] if analyzer.time_metrics else None)
rq1_summary_output=gr.Markdown(label="Statistical Summary")
with gr.Column(scale=2):
rq1_plot_output=gr.Plot(label="Metric Comparison")
with gr.TabItem("πŸ€– RQ2: Predicting Correctness from Gaze"):
with gr.Row():
with gr.Column(scale=1):
gr.Markdown("#### Tune Model Hyperparameters")
rq2_test_size_slider=gr.Slider(minimum=0.1, maximum=0.5, step=0.05, value=0.3, label="Test Set Size")
rq2_estimators_slider=gr.Slider(minimum=10, maximum=200, step=10, value=100, label="Number of Trees")
# <<< FIX: Updated initial status message >>>
rq2_status = gr.Markdown("Train a model to see performance metrics.")
with gr.Column(scale=2):
rq2_summary_output=gr.Markdown(label="Model Performance Summary")
rq2_table_output=gr.Dataframe(label="Classification Report", interactive=False)
rq2_plot_output=gr.Plot(label="Feature Importance")
# --- WIRING FOR ALL TABS ---
outputs_rq2 = [rq2_summary_output, rq2_table_output, rq2_plot_output, rq2_status]
rq1_metric_dropdown.change(fn=analyzer.analyze_rq1_metric, inputs=rq1_metric_dropdown, outputs=[rq1_plot_output, rq1_summary_output])
rq2_test_size_slider.release(fn=analyzer.run_prediction_model, inputs=[rq2_test_size_slider, rq2_estimators_slider], outputs=outputs_rq2)
rq2_estimators_slider.release(fn=analyzer.run_prediction_model, inputs=[rq2_test_size_slider, rq2_estimators_slider], outputs=outputs_rq2)
# Pre-load the initial state of the dashboard
def initial_load():
rq1_fig, rq1_summary = analyzer.analyze_rq1_metric(analyzer.time_metrics[0] if analyzer.time_metrics else None)
model_summary, report_df, feature_fig, status_md = analyzer.run_prediction_model(0.3, 100)
return {
rq1_plot_output: rq1_fig,
rq1_summary_output: rq1_summary,
rq2_summary_output: model_summary,
rq2_table_output: report_df,
rq2_plot_output: feature_fig,
rq2_status: status_md
}
demo.load(
fn=initial_load,
outputs=[
rq1_plot_output, rq1_summary_output,
rq2_summary_output, rq2_table_output, rq2_plot_output, rq2_status
]
)
if __name__ == "__main__":
demo.launch()