clockclock's picture
Update app.py
ab34e07 verified
raw
history blame
16.1 kB
# app.py
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import seaborn as sns
from scipy import stats
from sklearn.preprocessing import StandardScaler
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report, roc_auc_score
import warnings
import gradio as gr
import os
import git
# --- Main Class ---
warnings.filterwarnings('ignore')
plt.style.use('default')
sns.set_palette("husl")
class EnhancedAIvsRealGazeAnalyzer:
def __init__(self):
self.questions = ['Q1', 'Q2', 'Q3', 'Q4', 'Q5', 'Q6']
self.correct_answers = {'Pair1': 'B', 'Pair2': 'B', 'Pair3': 'B', 'Pair4': 'B', 'Pair5': 'B', 'Pair6': 'B'}
self.combined_data = None
self.fixation_data = {}
self.participant_list = []
self.model = None
self.scaler = None
self.feature_names = []
def _find_and_standardize_participant_col(self, df, filename):
"""Finds, renames, and type-converts the participant ID column."""
participant_col = next((c for c in df.columns if 'participant' in str(c).lower()), None)
if not participant_col:
raise ValueError(f"Could not find a 'participant' column in the file: {filename}")
df.rename(columns={participant_col: 'participant_id'}, inplace=True)
df['participant_id'] = df['participant_id'].astype(str)
return df
def load_and_process_data(self, base_path, response_file_path):
print("--- Starting Robust Data Loading ---")
# 1. Load and Standardize Response Data
print("Loading response sheet...")
response_df = pd.read_excel(response_file_path)
response_df = self._find_and_standardize_participant_col(response_df, "GenAI Response.xlsx")
for pair, ans in self.correct_answers.items():
if pair in response_df.columns:
response_df[f'{pair}_Correct'] = (response_df[pair].astype(str).str.strip().str.upper() == ans)
response_long = response_df.melt(id_vars=['participant_id'], value_vars=self.correct_answers.keys(), var_name='Pair')
correctness_long = response_df.melt(id_vars=['participant_id'], value_vars=[f'{p}_Correct' for p in self.correct_answers.keys()], var_name='Pair_Correct_Col', value_name='Correct')
correctness_long['Pair'] = correctness_long['Pair_Correct_Col'].str.replace('_Correct', '')
response_long = response_long.merge(correctness_long[['participant_id', 'Pair', 'Correct']], on=['participant_id', 'Pair'])
# 2. Load and Standardize Metrics & Fixation Data
all_metrics_dfs = []
for q in self.questions:
file_path = f"{base_path}/Filtered_GenAI_Metrics_cleaned_{q}.xlsx"
print(f"Processing {file_path}...")
if os.path.exists(file_path):
xls = pd.ExcelFile(file_path)
# Metrics Data
metrics_df = pd.read_excel(xls, sheet_name=0)
metrics_df = self._find_and_standardize_participant_col(metrics_df, f"{q} Metrics")
metrics_df['Question'] = q
all_metrics_dfs.append(metrics_df)
# Fixation Data
if 'Fixation-based AOI' in xls.sheet_names:
fix_df = pd.read_excel(xls, sheet_name='Fixation-based AOI')
fix_df = self._find_and_standardize_participant_col(fix_df, f"{q} Fixations")
fix_df.dropna(subset=['Fixation point X', 'Fixation point Y', 'Gaze event duration (ms)'], inplace=True)
fix_df['Question'] = q
for participant, group in fix_df.groupby('participant_id'):
self.fixation_data[(participant, q)] = group.reset_index(drop=True)
if not all_metrics_dfs: raise ValueError("No aggregated metrics files were found.")
self.combined_data = pd.concat(all_metrics_dfs, ignore_index=True)
# 3. Merge with Confidence
print("Merging all data sources...")
q_to_pair = {f'Q{i+1}': f'Pair{i+1}' for i in range(6)}
self.combined_data['Pair'] = self.combined_data['Question'].map(q_to_pair)
self.combined_data = self.combined_data.merge(response_long, on=['participant_id', 'Pair'], how='left')
self.combined_data['Answer_Correctness'] = self.combined_data['Correct'].map({True: 'Correct', False: 'Incorrect'})
# 4. Finalize class attributes
self.numeric_cols = self.combined_data.select_dtypes(include=np.number).columns.tolist()
self.time_metrics = [c for c in self.numeric_cols if any(k in c.lower() for k in ['time', 'duration', 'fixation'])]
self.participant_list = sorted(self.combined_data['participant_id'].unique().tolist())
print("--- Data Loading Successful ---")
return self
def run_prediction_model(self, test_size, n_estimators):
leaky_features = ['Total_Correct', 'Overall_Accuracy', 'Correct', 'participant_id']
self.feature_names = [col for col in self.numeric_cols if col not in leaky_features and col in self.combined_data.columns]
features = self.combined_data[self.feature_names].copy()
target = self.combined_data['Answer_Correctness'].map({'Correct': 1, 'Incorrect': 0})
valid_indices = target.notna()
features, target = features[valid_indices], target[valid_indices]
features = features.fillna(features.median()).fillna(0)
if len(target.unique()) < 2: return "Not enough data to train model.", None, None
X_train, X_test, y_train, y_test = train_test_split(features, target, test_size=test_size, random_state=42, stratify=target)
self.scaler = StandardScaler().fit(X_train)
X_train_scaled = self.scaler.transform(X_train)
self.model = RandomForestClassifier(n_estimators=n_estimators, random_state=42, class_weight='balanced').fit(X_train_scaled, y_train)
report = classification_report(y_test, self.model.predict(self.scaler.transform(X_test)), target_names=['Incorrect', 'Correct'], output_dict=True)
auc_score = roc_auc_score(y_test, self.model.predict_proba(self.scaler.transform(X_test))[:, 1])
summary_md = f"### Model Performance\n- **AUC Score:** **{auc_score:.4f}**\n- **Overall Accuracy:** {report['accuracy']:.3f}"
report_df = pd.DataFrame(report).transpose().round(3)
feature_importance = pd.DataFrame({'Feature': self.feature_names, 'Importance': self.model.feature_importances_}).sort_values('Importance', ascending=False).head(15)
fig, ax = plt.subplots(figsize=(10, 8)); sns.barplot(data=feature_importance, x='Importance', y='Feature', ax=ax, palette='viridis'); ax.set_title(f'Top 15 Predictive Features (n_estimators={n_estimators})', fontsize=14); plt.tight_layout()
return summary_md, report_df, fig
def _recalculate_features_from_fixations(self, fixations_df):
feature_vector = pd.Series(0.0, index=self.feature_names)
if fixations_df.empty: return feature_vector.fillna(0).values.reshape(1, -1)
if 'AOI name' in fixations_df.columns:
for aoi_name, group in fixations_df.groupby('AOI name'):
col_name = f'Total fixation duration on {aoi_name}'
if col_name in feature_vector.index:
feature_vector[col_name] = group['Gaze event duration (ms)'].sum()
feature_vector['Total Recording Duration'] = fixations_df['Gaze event duration (ms)'].sum()
return feature_vector.fillna(0).values.reshape(1, -1)
def generate_gaze_playback(self, participant, question, fixation_num):
trial_key = (str(participant), question)
if not participant or not question or trial_key not in self.fixation_data:
return "Please select a valid trial with fixation data.", None, gr.Slider(interactive=False)
all_fixations = self.fixation_data[trial_key]
fixation_num = int(fixation_num)
slider_max = len(all_fixations)
if fixation_num > slider_max: fixation_num = slider_max
current_fixations = all_fixations.iloc[:fixation_num]
partial_features = self._recalculate_features_from_fixations(current_fixations)
prediction_prob = self.model.predict_proba(self.scaler.transform(partial_features))[0]
prob_correct = prediction_prob[1]
fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(10, 8), gridspec_kw={'height_ratios': [4, 1]})
fig.suptitle(f"Gaze Playback for {participant} - {question}", fontsize=16, weight='bold')
ax1.set_title(f"Displaying Fixations 1 through {fixation_num}/{slider_max}")
ax1.set_xlim(0, 1920); ax1.set_ylim(1080, 0)
ax1.set_aspect('equal'); ax1.tick_params(left=False, right=False, bottom=False, top=False, labelleft=False, labelbottom=False)
ax1.add_patch(patches.Rectangle((0, 0), 1920/2, 1080, facecolor='black', alpha=0.05))
ax1.add_patch(patches.Rectangle((1920/2, 0), 1920/2, 1080, facecolor='blue', alpha=0.05))
ax1.text(1920*0.25, 50, "Image A", ha='center', fontsize=14, alpha=0.5)
ax1.text(1920*0.75, 50, "Image B", ha='center', fontsize=14, alpha=0.5)
if not current_fixations.empty:
points = current_fixations[['Fixation point X', 'Fixation point Y']]
ax1.plot(points['Fixation point X'], points['Fixation point Y'], marker='o', color='grey', alpha=0.5, linestyle='-')
ax1.scatter(points.iloc[-1]['Fixation point X'], points.iloc[-1]['Fixation point Y'], s=150, c='red', zorder=10, edgecolors='black')
ax2.set_xlim(0, 1); ax2.set_yticks([])
ax2.set_title("Live Prediction Confidence (Answer is 'Correct')")
bar_color = 'green' if prob_correct > 0.5 else 'red'
ax2.barh([0], [prob_correct], color=bar_color, height=0.5)
ax2.axvline(0.5, color='black', linestyle='--', linewidth=1)
ax2.text(prob_correct, 0, f" {prob_correct:.1%} ", va='center', ha='left' if prob_correct < 0.9 else 'right', color='white', weight='bold')
plt.tight_layout(rect=[0, 0, 1, 0.95])
trial_info = self.combined_data[(self.combined_data['participant_id'] == str(participant)) & (self.combined_data['Question'] == question)].iloc[0]
summary_text = f"**Actual Answer:** `{trial_info['Answer_Correctness']}`"
return summary_text, fig, gr.Slider(maximum=slider_max, value=fixation_num, interactive=True)
def analyze_rq1_metric(self, metric): # Added this back just in case
if not metric or metric not in self.combined_data.columns: return None, "Metric not found."
correct = self.combined_data.loc[self.combined_data['Answer_Correctness'] == 'Correct', metric].dropna()
incorrect = self.combined_data.loc[self.combined_data['Answer_Correctness'] == 'Incorrect', metric].dropna()
if len(correct) < 2 or len(incorrect) < 2: return None, "Not enough data for both groups to compare."
t_stat, p_val = stats.ttest_ind(incorrect, correct, equal_var=False, nan_policy='omit')
fig, ax = plt.subplots(figsize=(8, 6)); sns.boxplot(data=self.combined_data, x='Answer_Correctness', y=metric, ax=ax, palette=['#66b3ff','#ff9999']); ax.set_title(f'Comparison of "{metric}" by Answer Correctness', fontsize=14); ax.set_xlabel("Answer Correctness"); ax.set_ylabel(metric); plt.tight_layout()
summary = f"""### Analysis for: **{metric}**\n- **Mean (Correct Answers):** {correct.mean():.4f}\n- **Mean (Incorrect Answers):** {incorrect.mean():.4f}\n- **T-test p-value:** {p_val:.4f}\n\n**Conclusion:**\n- {'There is a **statistically significant** difference (p < 0.05).' if p_val < 0.05 else 'There is **no statistically significant** difference (p >= 0.05).'}"""
return fig, summary
# --- DATA SETUP & GRADIO APP ---
def setup_and_load_data():
repo_url = "https://github.com/RextonRZ/GenAIEyeTrackingCleanedDataset"
repo_dir = "GenAIEyeTrackingCleanedDataset"
if not os.path.exists(repo_dir): git.Repo.clone_from(repo_url, repo_dir)
else: print("Data repository already exists.")
base_path = repo_dir
response_file_path = os.path.join(repo_dir, "GenAI Response.xlsx")
analyzer = EnhancedAIvsRealGazeAnalyzer().load_and_process_data(base_path, response_file_path)
return analyzer
analyzer = setup_and_load_data()
with gr.Blocks(theme=gr.themes.Soft()) as demo:
gr.Markdown("# Interactive Dashboard: AI vs. Real Gaze Analysis")
with gr.Tabs():
with gr.TabItem("πŸ“Š RQ1: Viewing Time vs. Correctness"):
with gr.Row():
with gr.Column(scale=1):
rq1_metric_dropdown=gr.Dropdown(choices=analyzer.time_metrics, label="Select a Time-Based Metric", value=analyzer.time_metrics[0] if analyzer.time_metrics else None)
rq1_summary_output=gr.Markdown(label="Statistical Summary")
with gr.Column(scale=2):
rq1_plot_output=gr.Plot(label="Metric Comparison")
with gr.TabItem("πŸ€– RQ2: Predicting Correctness from Gaze"):
with gr.Row():
with gr.Column(scale=1):
gr.Markdown("#### Tune Model Hyperparameters")
rq2_test_size_slider=gr.Slider(minimum=0.1, maximum=0.5, step=0.05, value=0.3, label="Test Set Size")
rq2_estimators_slider=gr.Slider(minimum=10, maximum=200, step=10, value=100, label="Number of Trees")
with gr.Column(scale=2):
rq2_summary_output=gr.Markdown(label="Model Performance Summary")
rq2_table_output=gr.Dataframe(label="Classification Report", interactive=False)
rq2_plot_output=gr.Plot(label="Feature Importance")
with gr.TabItem("πŸ‘οΈ Gaze Playback & Real-Time Prediction"):
gr.Markdown("### See the Prediction Evolve with Every Glance!")
with gr.Row():
with gr.Column(scale=1):
playback_participant=gr.Dropdown(choices=analyzer.participant_list, label="Select Participant")
playback_question=gr.Dropdown(choices=analyzer.questions, label="Select Question")
gr.Markdown("Use the slider to play back fixations one by one.")
playback_slider=gr.Slider(minimum=0, maximum=1, step=1, value=0, label="Fixation Number", interactive=False)
playback_summary=gr.Markdown(label="Trial Info")
with gr.Column(scale=2):
playback_plot=gr.Plot(label="Gaze Playback & Live Prediction")
outputs_rq2 = [rq2_summary_output, rq2_table_output, rq2_plot_output]
outputs_playback = [playback_summary, playback_plot, playback_slider]
rq1_metric_dropdown.change(fn=analyzer.analyze_rq1_metric, inputs=rq1_metric_dropdown, outputs=[rq1_plot_output, rq1_summary_output])
rq2_test_size_slider.release(fn=analyzer.run_prediction_model, inputs=[rq2_test_size_slider, rq2_estimators_slider], outputs=outputs_rq2)
rq2_estimators_slider.release(fn=analyzer.run_prediction_model, inputs=[rq2_test_size_slider, rq2_estimators_slider], outputs=outputs_rq2)
playback_inputs = [playback_participant, playback_question, playback_slider]
playback_participant.change(lambda: 0, None, playback_slider).then(fn=analyzer.generate_gaze_playback, inputs=playback_inputs, outputs=outputs_playback)
playback_question.change(lambda: 0, None, playback_slider).then(fn=analyzer.generate_gaze_playback, inputs=playback_inputs, outputs=outputs_playback)
playback_slider.release(fn=analyzer.generate_gaze_playback, inputs=playback_inputs, outputs=outputs_playback)
demo.load(fn=analyzer.analyze_rq1_metric, inputs=rq1_metric_dropdown, outputs=[rq1_plot_output, rq1_summary_output])
demo.load(fn=analyzer.run_prediction_model, inputs=[rq2_test_size_slider, rq2_estimators_slider], outputs=outputs_rq2)
if __name__ == "__main__":
demo.launch()