|
|
import gradio as gr
|
|
|
from gradio_leaderboard import Leaderboard, ColumnFilter, SelectColumns
|
|
|
import pandas as pd
|
|
|
from apscheduler.schedulers.background import BackgroundScheduler
|
|
|
from huggingface_hub import snapshot_download
|
|
|
from functools import lru_cache
|
|
|
import logging
|
|
|
import os
|
|
|
|
|
|
from src.about import CITATION_BUTTON_LABEL, CITATION_BUTTON_TEXT, EVALUATION_QUEUE_TEXT, INTRODUCTION_TEXT, \
|
|
|
LLM_BENCHMARKS_TEXT, TITLE
|
|
|
from src.tasks import TASK_DESCRIPTIONS, MEASURE_DESCRIPTION
|
|
|
from src.display.css_html_js import custom_css
|
|
|
from src.display.utils import BENCHMARK_COLS, COLS, EVAL_COLS, EVAL_TYPES, AutoEvalColumn, ModelType, fields, \
|
|
|
WeightType, Precision
|
|
|
from src.envs import API, EVAL_REQUESTS_PATH, EVAL_RESULTS_PATH, QUEUE_REPO, REPO_ID, RESULTS_REPO, TOKEN
|
|
|
from src.populate import get_evaluation_queue_df, get_leaderboard_df
|
|
|
from src.submission.submit import add_new_eval
|
|
|
import matplotlib.pyplot as plt
|
|
|
import re
|
|
|
import plotly.express as px
|
|
|
import plotly.graph_objects as go
|
|
|
import numpy as np
|
|
|
|
|
|
import requests
|
|
|
|
|
|
|
|
|
logging.basicConfig(level=logging.INFO)
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
|
|
|
|
BASELINES = {
|
|
|
"TE": 71.00, "SA": 66.38, "HS": 80.88, "AT": 82.40, "WIC": 85.00,
|
|
|
"LS": 38.82, "SU": 38.91, "NER": 88.00, "REL": 62.99
|
|
|
}
|
|
|
|
|
|
|
|
|
REFERENCES = {
|
|
|
"NER": 79.11, "REL": 63.32, "LS": 59.25, "SU": 33.04
|
|
|
}
|
|
|
|
|
|
TASK_METADATA_MULTIPLECHOICE = {
|
|
|
"TE": {"icon": "📊", "name": "Textual Entailment", "tooltip": ""},
|
|
|
"SA": {"icon": "😃", "name": "Sentiment Analysis", "tooltip": ""},
|
|
|
"HS": {"icon": "⚠️", "name": "Hate Speech", "tooltip": ""},
|
|
|
"AT": {"icon": "🏥", "name": "Admission Test", "tooltip": ""},
|
|
|
"WIC": {"icon": "🔤", "name": "Word in Context", "tooltip": ""},
|
|
|
"FAQ": {"icon": "❓", "name": "Frequently Asked Questions", "tooltip": ""}
|
|
|
}
|
|
|
|
|
|
TASK_METADATA_GENERATIVE = {
|
|
|
"LS": {"icon": "🔄", "name": "Lexical Substitution", "tooltip": ""},
|
|
|
"SU": {"icon": "📝", "name": "Summarization", "tooltip": ""},
|
|
|
"NER": {"icon": "🏷️", "name": "Named Entity Recognition", "tooltip": ""},
|
|
|
"REL": {"icon": "🔗", "name": "Relation Extraction", "tooltip": ""},
|
|
|
}
|
|
|
|
|
|
|
|
|
def send_slack_notification(model_name, user_name, user_affiliation):
|
|
|
|
|
|
webhook_url = os.getenv("WEBHOOK_URL")
|
|
|
|
|
|
|
|
|
message = {
|
|
|
"text": f"New model submission for EVALITA-LLM leaderboard:\n\n"
|
|
|
f"**Model Name**: {model_name}\n"
|
|
|
f"**User**: {user_name}\n"
|
|
|
f"**Affiliation**: {user_affiliation}\n"
|
|
|
f"Check out the model on HuggingFace: https://huggingface.co/{model_name}"
|
|
|
}
|
|
|
|
|
|
|
|
|
response = requests.post(webhook_url, json=message)
|
|
|
|
|
|
|
|
|
if response.status_code == 200:
|
|
|
return "✅ **Notification sent successfully!**"
|
|
|
else:
|
|
|
return f"❌ **Failed to send notification**: {response.text}"
|
|
|
|
|
|
|
|
|
|
|
|
def validate_and_submit_request(model_name, user_email, user_affiliation):
|
|
|
|
|
|
if not model_name or not model_name.strip():
|
|
|
return "❌ **Error:** Model name is required."
|
|
|
|
|
|
|
|
|
if not user_email or not user_email.strip():
|
|
|
return "❌ **Error:** Email address is required."
|
|
|
|
|
|
|
|
|
email_regex = r'^[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,}$'
|
|
|
if not re.match(email_regex, user_email.strip()):
|
|
|
return "❌ **Error:** Invalid email format. Please enter a valid email address."
|
|
|
|
|
|
|
|
|
if not user_affiliation or not user_affiliation.strip():
|
|
|
return "❌ **Error:** Affiliation is required."
|
|
|
|
|
|
|
|
|
if "/" not in model_name:
|
|
|
return "❌ **Error:** Model name must be in format 'organization/model-name' (e.g., 'microsoft/DialoGPT-medium')."
|
|
|
|
|
|
|
|
|
if not re.match(r'^[a-zA-Z0-9._/-]+$', model_name):
|
|
|
return "❌ **Error:** Model name contains invalid characters."
|
|
|
|
|
|
slack_response = send_slack_notification(model_name.strip(), user_email.strip(), user_affiliation.strip())
|
|
|
|
|
|
|
|
|
return slack_response
|
|
|
|
|
|
|
|
|
|
|
|
def calculate_prompt_sensitivity(dataframe, tasks, prompt_ids):
|
|
|
|
|
|
generative_tasks = ["LS", "SU", "NER", "REL"]
|
|
|
|
|
|
cv_per_task = []
|
|
|
|
|
|
for task in tasks:
|
|
|
prompt_col = f"{task} Best Prompt Id"
|
|
|
task_accuracies = []
|
|
|
|
|
|
for pid in prompt_ids:
|
|
|
pid_int = int(pid)
|
|
|
|
|
|
|
|
|
if pid_int <= 6 and task in generative_tasks:
|
|
|
continue
|
|
|
|
|
|
elif pid_int in [7, 8] and task != "SU":
|
|
|
continue
|
|
|
|
|
|
elif pid_int in [9, 10] and task not in ["LS", "NER", "REL"]:
|
|
|
continue
|
|
|
|
|
|
|
|
|
total = len(dataframe[prompt_col].dropna())
|
|
|
count = (dataframe[prompt_col] == pid).sum()
|
|
|
accuracy = (count / total * 100) if total > 0 else 0
|
|
|
task_accuracies.append(accuracy)
|
|
|
|
|
|
|
|
|
if task_accuracies:
|
|
|
mean_acc = np.mean(task_accuracies)
|
|
|
std_acc = np.std(task_accuracies)
|
|
|
|
|
|
if mean_acc > 0:
|
|
|
cv = std_acc / mean_acc
|
|
|
cv_per_task.append(cv)
|
|
|
else:
|
|
|
cv_per_task.append(0)
|
|
|
else:
|
|
|
cv_per_task.append(0)
|
|
|
|
|
|
|
|
|
mean_cv = np.mean(cv_per_task) if cv_per_task else 0
|
|
|
|
|
|
|
|
|
if mean_cv >= 0.5:
|
|
|
psi = 1.0
|
|
|
else:
|
|
|
psi = mean_cv / 0.5
|
|
|
|
|
|
return psi, mean_cv, cv_per_task
|
|
|
|
|
|
def map_prompt_ids_for_generation(dataframe):
|
|
|
"""
|
|
|
Map original prompt IDs (1 or 2) to their corresponding generative prompt IDs.
|
|
|
|
|
|
- For task 'SU': 1 -> 7, 2 -> 8
|
|
|
- For tasks 'NER', 'REL', 'LS': 1 -> 9, 2 -> 10
|
|
|
"""
|
|
|
|
|
|
|
|
|
task = "SU"
|
|
|
best_prompt_col = f"{task} Best Prompt Id"
|
|
|
if best_prompt_col in dataframe.columns:
|
|
|
dataframe[best_prompt_col] = dataframe[best_prompt_col].apply(
|
|
|
lambda x: 7 if x == 1 else 8
|
|
|
)
|
|
|
|
|
|
|
|
|
for task in ["NER", "REL", "LS"]:
|
|
|
best_prompt_col = f"{task} Best Prompt Id"
|
|
|
if best_prompt_col in dataframe.columns:
|
|
|
dataframe[best_prompt_col] = dataframe[best_prompt_col].apply(
|
|
|
lambda x: 9 if x == 1 else 10
|
|
|
)
|
|
|
|
|
|
return dataframe
|
|
|
|
|
|
|
|
|
def create_best_model_comparison_table(dataframe):
|
|
|
"""
|
|
|
Tabella interattiva con dettagli dei modelli migliori per ogni task.
|
|
|
"""
|
|
|
tasks = ["TE", "SA", "HS", "AT", "WIC", "FAQ", "LS", "SU", "NER", "REL"]
|
|
|
|
|
|
table_data = {
|
|
|
'Task': [],
|
|
|
'Best Overall Model': [],
|
|
|
'CPS': [],
|
|
|
'Best Prompt Model': [],
|
|
|
'Acc.': []
|
|
|
}
|
|
|
|
|
|
'''
|
|
|
for task in tasks:
|
|
|
if task in dataframe.columns:
|
|
|
max_idx = dataframe[task].idxmax()
|
|
|
model_raw = dataframe.loc[max_idx, 'Model']
|
|
|
if isinstance(model_raw, str) and '<' in model_raw:
|
|
|
match = re.search(r'>([^<]+)<', model_raw)
|
|
|
model_name = match.group(1) if match else model_raw
|
|
|
else:
|
|
|
model_name = str(model_raw)
|
|
|
|
|
|
# Estraiamo il valore di "Best Prompt" per il task specifico
|
|
|
best_prompt_column = f"{task} Best Prompt"
|
|
|
best_prompt_value = dataframe.loc[max_idx, best_prompt_column]
|
|
|
print(best_prompt_value)
|
|
|
|
|
|
table_data['Task'].append(task)
|
|
|
table_data['Model'].append(model_name)
|
|
|
table_data['Comb. Perf.'].append(f"{dataframe.loc[max_idx, task]:.2f}")
|
|
|
table_data['Best Prompt'].append(f"{best_prompt_value:.2f}") # Aggiungiamo il valore del Best Prompt
|
|
|
table_data['Params (B)'].append(f"{dataframe.loc[max_idx, '#Params (B)']:.1f}")
|
|
|
'''
|
|
|
|
|
|
for task in tasks:
|
|
|
if task in dataframe.columns:
|
|
|
|
|
|
max_idx = dataframe[task].idxmax()
|
|
|
model_raw = dataframe.loc[max_idx, 'Model']
|
|
|
|
|
|
|
|
|
if isinstance(model_raw, str) and '<' in model_raw:
|
|
|
match = re.search(r'>([^<]+)<', model_raw)
|
|
|
model_name = match.group(1) if match else model_raw
|
|
|
else:
|
|
|
model_name = str(model_raw)
|
|
|
|
|
|
|
|
|
comb_perf_value = dataframe.loc[max_idx, task]
|
|
|
|
|
|
|
|
|
best_prompt_column = f"{task} Best Prompt"
|
|
|
best_prompt_value = dataframe.loc[max_idx, best_prompt_column]
|
|
|
|
|
|
|
|
|
best_prompt_idx = dataframe[best_prompt_column].idxmax()
|
|
|
best_prompt_model_raw = dataframe.loc[best_prompt_idx, 'Model']
|
|
|
if isinstance(best_prompt_model_raw, str) and '<' in best_prompt_model_raw:
|
|
|
match = re.search(r'>([^<]+)<', best_prompt_model_raw)
|
|
|
best_prompt_model = match.group(1) if match else best_prompt_model_raw
|
|
|
else:
|
|
|
best_prompt_model = str(best_prompt_model_raw)
|
|
|
|
|
|
|
|
|
best_prompt_accuracy = dataframe.loc[best_prompt_idx, best_prompt_column]
|
|
|
|
|
|
|
|
|
table_data['Task'].append(task)
|
|
|
table_data['Best Overall Model'].append(model_name)
|
|
|
table_data['CPS'].append(f"{comb_perf_value:.2f}")
|
|
|
table_data['Best Prompt Model'].append(best_prompt_model)
|
|
|
table_data['Acc.'].append(f"{best_prompt_accuracy:.2f}")
|
|
|
|
|
|
fig = go.Figure(data=[go.Table(
|
|
|
columnwidth=[40, 200, 40, 200, 40],
|
|
|
header=dict(
|
|
|
values=[f'<b>{col}</b>' for col in table_data.keys()],
|
|
|
fill_color=['#2171b5', '#2171b5', '#2171b5', '#4292c6', '#4292c6'],
|
|
|
|
|
|
font=dict(color='white', size=12, family='Arial'),
|
|
|
align='center',
|
|
|
height=30
|
|
|
),
|
|
|
cells=dict(
|
|
|
values=list(table_data.values()),
|
|
|
fill_color=[['#f0f0f0' if i % 2 == 0 else 'white' for i in range(len(table_data['Task']))]],
|
|
|
font=dict(color='#2c3e50', size=11, family='Arial'),
|
|
|
align=['center', 'left', 'center', 'left', 'center'],
|
|
|
height=30
|
|
|
)
|
|
|
)])
|
|
|
|
|
|
fig.update_layout(
|
|
|
title={'text': "Top Model per Task: CPS & Best Prompt",
|
|
|
'font': {'family': 'Arial', 'size': 14, 'color': '#2c3e50'}},
|
|
|
font=dict(family="Arial", size=11),
|
|
|
height=500,
|
|
|
margin=dict(l=20, r=20, t=60, b=100)
|
|
|
)
|
|
|
|
|
|
|
|
|
fig.add_annotation(
|
|
|
text="Best Overall Models: Scored using the primary metric, CPS, across all prompts. <br>"
|
|
|
"Best Prompt Model: Scored with the highest accuracy (unofficial) based on its best-performing prompt. <br>"
|
|
|
"No single model achieves the highest performance across all tasks.",
|
|
|
xref="paper", yref="paper",
|
|
|
x=0.5, y=-0.20,
|
|
|
showarrow=False,
|
|
|
font=dict(size=11, color="gray", family="Arial"),
|
|
|
align="center",
|
|
|
xanchor="center"
|
|
|
)
|
|
|
|
|
|
return fig
|
|
|
|
|
|
|
|
|
def create_prompt_heatmap(dataframe):
|
|
|
"""
|
|
|
Heatmap con percentuale di modelli che hanno ottenuto le best performance con ciascun prompt per ogni task,
|
|
|
mostrando solo i valori pertinenti:
|
|
|
- Prompt 1-6: solo per task multiple-choice
|
|
|
- Prompt 7-8: solo per SU
|
|
|
- Prompt 9-10: solo per LS, NER, REL
|
|
|
"""
|
|
|
|
|
|
tasks = ["TE", "SA", "HS", "AT", "WIC", "FAQ", "LS", "SU", "NER", "REL"]
|
|
|
generative_tasks = ["LS", "SU", "NER", "REL"]
|
|
|
mc_tasks = [t for t in tasks if t not in generative_tasks]
|
|
|
|
|
|
all_prompt_ids = set()
|
|
|
for task in tasks:
|
|
|
prompt_col = f"{task} Best Prompt Id"
|
|
|
if prompt_col in dataframe.columns:
|
|
|
all_prompt_ids.update(dataframe[prompt_col].dropna().unique())
|
|
|
|
|
|
prompt_ids = sorted(all_prompt_ids, key=int)
|
|
|
|
|
|
matrix = []
|
|
|
hover_texts = []
|
|
|
|
|
|
|
|
|
psi, mean_cv, cv_per_task = calculate_prompt_sensitivity(dataframe, tasks, prompt_ids)
|
|
|
print(f"Prompt Sensitivity Index (PSI): {psi:.3f}")
|
|
|
print(f"Mean CV: {mean_cv:.3f}")
|
|
|
print(f"CV per task: {cv_per_task}")
|
|
|
|
|
|
for pid in prompt_ids:
|
|
|
row = []
|
|
|
hover_row = []
|
|
|
for task in tasks:
|
|
|
prompt_col = f"{task} Best Prompt Id"
|
|
|
|
|
|
pid_int = int(pid)
|
|
|
|
|
|
if pid_int <= 6 and task in generative_tasks:
|
|
|
row.append(None)
|
|
|
hover_row.append("")
|
|
|
elif pid_int in [7, 8] and task != "SU":
|
|
|
row.append(None)
|
|
|
hover_row.append("")
|
|
|
elif pid_int in [9, 10] and task not in ["LS", "NER", "REL"]:
|
|
|
row.append(None)
|
|
|
hover_row.append("")
|
|
|
elif prompt_col in dataframe.columns:
|
|
|
total = len(dataframe[prompt_col].dropna())
|
|
|
count = (dataframe[prompt_col] == pid).sum()
|
|
|
percentage = (count / total * 100) if total > 0 else 0
|
|
|
row.append(percentage)
|
|
|
hover_row.append(
|
|
|
f"<b>Prompt {pid} - {task}</b><br>"
|
|
|
f"Models: {count}/{total}<br>"
|
|
|
f"Percentage: {percentage:.1f}%"
|
|
|
)
|
|
|
else:
|
|
|
row.append(0)
|
|
|
hover_row.append(f"<b>Prompt {pid} - {task}</b><br>No data")
|
|
|
matrix.append(row)
|
|
|
hover_texts.append(hover_row)
|
|
|
|
|
|
|
|
|
ticktext = []
|
|
|
for pid in prompt_ids:
|
|
|
pid_int = int(pid)
|
|
|
|
|
|
ticktext.append(f'<span style="color:#1f77b4;">P{pid} </span>')
|
|
|
|
|
|
|
|
|
|
|
|
fig = go.Figure(data=go.Heatmap(
|
|
|
z=matrix,
|
|
|
x=tasks,
|
|
|
y=prompt_ids,
|
|
|
colorscale=[
|
|
|
[0, '#f7fbff'],
|
|
|
[0.2, '#deebf7'],
|
|
|
[0.4, '#9ecae1'],
|
|
|
[0.6, '#4292c6'],
|
|
|
[0.8, '#2171b5'],
|
|
|
[1, '#08519c']
|
|
|
],
|
|
|
text=[[f"{val:.0f}%" if val is not None else "" for val in row] for row in matrix],
|
|
|
texttemplate="%{text}",
|
|
|
textfont={"size": 11, "family": "Arial"},
|
|
|
hovertemplate='%{customdata}<extra></extra>',
|
|
|
customdata=hover_texts,
|
|
|
colorbar=dict(title="% Models", ticksuffix="%"),
|
|
|
zmin=0,
|
|
|
zmax=100
|
|
|
))
|
|
|
|
|
|
fig.update_yaxes(
|
|
|
tickmode='array',
|
|
|
tickvals=prompt_ids,
|
|
|
ticktext=ticktext,
|
|
|
tickfont={"size": 11, "family": "Arial"}
|
|
|
)
|
|
|
|
|
|
fig.update_layout(
|
|
|
title={'text': "Most Effective Prompts per Task Across Models",
|
|
|
'font': {'family': 'Arial', 'size': 14, 'color': '#2c3e50'}},
|
|
|
xaxis_title="Task",
|
|
|
yaxis_title="Prompt Variant",
|
|
|
font=dict(family="Arial", size=11),
|
|
|
margin=dict(b=150),
|
|
|
template="plotly_white",
|
|
|
dragmode=False,
|
|
|
height=500
|
|
|
)
|
|
|
|
|
|
fig.add_annotation(
|
|
|
text=f"<b style='font-size:14px; color:#2c3e50;'>Mean CV: {mean_cv:.2f}</b>",
|
|
|
|
|
|
xref="paper", yref="paper",
|
|
|
x=0.3, y=0.85,
|
|
|
showarrow=False,
|
|
|
font=dict(size=14, color="#2c3e50", family="Verdana"),
|
|
|
align="center",
|
|
|
xanchor="center",
|
|
|
bgcolor="#f7f7f7",
|
|
|
borderpad=5,
|
|
|
bordercolor="#ccc",
|
|
|
borderwidth=1
|
|
|
)
|
|
|
|
|
|
fig.add_annotation(
|
|
|
text=(
|
|
|
"Prompts 1–6 are for multiple-choice tasks, 7–10 for generative tasks. Darker cells represent the number of times, across <br>"
|
|
|
"all model configurations tested, that a prompt achieved the top performance. With a Mean CV (Coefficient of Variation averaged across tasks) <br>"
|
|
|
"above 0.3 there is high variability between prompts, suggesting the use of multiple prompts for more stable evaluation."
|
|
|
),
|
|
|
xref="paper", yref="paper",
|
|
|
x=0.5, y=-0.35,
|
|
|
showarrow=False,
|
|
|
font=dict(size=11, color="gray", family="Arial"),
|
|
|
align="center",
|
|
|
xanchor="center"
|
|
|
)
|
|
|
|
|
|
fig.update_xaxes(fixedrange=True)
|
|
|
fig.update_yaxes(fixedrange=True)
|
|
|
|
|
|
return fig
|
|
|
|
|
|
|
|
|
def highlight_best_per_task(df):
|
|
|
"""Add 🟡 symbol next to the maximum value in each task column"""
|
|
|
|
|
|
task_columns = ["TE", "SA", "HS", "AT", "WIC", "FAQ", "LS", "SU", "NER", "REL"]
|
|
|
|
|
|
df = df.copy()
|
|
|
for col in task_columns:
|
|
|
if col in df.columns:
|
|
|
max_val = df[col].max()
|
|
|
df[col] = df[col].apply(
|
|
|
lambda x: f"{x:.1f}🔺" if x == max_val else f"{x:.1f}"
|
|
|
)
|
|
|
return df
|
|
|
|
|
|
def theoretical_performance(df_hash):
|
|
|
"""
|
|
|
Theoretical performance of a model that scores the highest on every individual task
|
|
|
"""
|
|
|
|
|
|
|
|
|
|
|
|
return 75.0
|
|
|
|
|
|
|
|
|
def scale_sizes(values, min_size=8, max_size=30):
|
|
|
"""Normalize sizes for scatter plot markers """
|
|
|
if not values:
|
|
|
return []
|
|
|
vmin, vmax = min(values), max(values)
|
|
|
if vmax == vmin:
|
|
|
return [(min_size + max_size) / 2] * len(values)
|
|
|
return [
|
|
|
min_size + (val - vmin) / (vmax - vmin) * (max_size - min_size)
|
|
|
for val in values
|
|
|
]
|
|
|
|
|
|
|
|
|
def extract_model_name(model_string):
|
|
|
"""Extract model name from HTML string."""
|
|
|
match = re.search(r'>([^<]+)<', model_string)
|
|
|
return match.group(1) if match else model_string
|
|
|
|
|
|
|
|
|
def create_line_chart(dataframe):
|
|
|
"""Create left chart."""
|
|
|
|
|
|
def scale_sizes(values, min_size=8, max_size=30):
|
|
|
vmin, vmax = min(values), max(values)
|
|
|
return [
|
|
|
min_size + (val - vmin) / (vmax - vmin) * (max_size - min_size) if vmax > vmin
|
|
|
else (min_size + max_size) / 2
|
|
|
for val in values
|
|
|
]
|
|
|
|
|
|
fig = go.Figure()
|
|
|
|
|
|
|
|
|
for shot, color in [(True, "blue"), (False, "red")]:
|
|
|
df = dataframe[dataframe["IS_FS"] == shot]
|
|
|
|
|
|
x = df["#Params (B)"].tolist()
|
|
|
y = df["Avg. Comb. Perf. ⬆️"].tolist()
|
|
|
labels = [
|
|
|
re.search(r'>([^<]+)<', m).group(1) if isinstance(m, str) and re.search(r'>([^<]+)<', m) else str(m)
|
|
|
for m in df["Model"].tolist()
|
|
|
]
|
|
|
|
|
|
fig.add_trace(go.Scatter(
|
|
|
x=x,
|
|
|
y=y,
|
|
|
mode="markers",
|
|
|
name="5-Shot" if shot else "0-Shot",
|
|
|
marker=dict(color=color, size=scale_sizes(x)),
|
|
|
hovertemplate="<b>%{customdata}</b><br>#Params: %{x}<br>Performance: %{y}<extra></extra>",
|
|
|
customdata=labels,
|
|
|
))
|
|
|
|
|
|
|
|
|
all_y = dataframe["Avg. Comb. Perf. ⬆️"].tolist()
|
|
|
if all_y:
|
|
|
max_idx = all_y.index(max(all_y))
|
|
|
max_x = dataframe["#Params (B)"].iloc[max_idx]
|
|
|
max_y = all_y[max_idx]
|
|
|
max_label = re.search(r'>([^<]+)<', dataframe["Model"].iloc[max_idx]).group(1)
|
|
|
|
|
|
fig.add_annotation(
|
|
|
x=max_x,
|
|
|
y=max_y,
|
|
|
text=max_label,
|
|
|
showarrow=True,
|
|
|
arrowhead=2,
|
|
|
arrowsize=1,
|
|
|
arrowwidth=2,
|
|
|
arrowcolor="black",
|
|
|
font=dict(size=11, color="black"),
|
|
|
xshift=10, yshift=10,
|
|
|
ax=-30, ay=-20,
|
|
|
xanchor="right"
|
|
|
)
|
|
|
|
|
|
|
|
|
fig.update_layout(
|
|
|
title="Model Accuracy vs #Params",
|
|
|
xaxis_title="#Params (B)", yaxis_title="Avgerage CPS",
|
|
|
template="plotly_white", hovermode="closest",
|
|
|
font=dict(family="Arial", size=10), dragmode=False,
|
|
|
xaxis=dict(tickvals=[0, 25, 50, 75, 100, 125], ticktext=["0", "25", "50", "75", "100"]),
|
|
|
yaxis=dict(tickvals=[0, 20, 40, 60, 80, 100], range=[0, 100])
|
|
|
)
|
|
|
|
|
|
|
|
|
fig.add_annotation(
|
|
|
text="Accuracy generally rises with #Params, but smaller models <br>"
|
|
|
"with 5-shot can outperform larger zero-shot models.",
|
|
|
xref="paper", yref="paper", x=0.5, y=-0.3,
|
|
|
showarrow=False, font=dict(size=11, color="gray"),
|
|
|
align="center", xanchor="center"
|
|
|
)
|
|
|
|
|
|
fig.update_xaxes(fixedrange=True, rangeslider_visible=False)
|
|
|
fig.update_yaxes(fixedrange=True)
|
|
|
|
|
|
return fig
|
|
|
|
|
|
|
|
|
def create_boxplot_task(dataframe=None, baselines=None, references=None):
|
|
|
"""Create right chart"""
|
|
|
|
|
|
tasks = ["TE", "SA", "HS", "AT", "WIC", "FAQ", "LS", "SU", "NER", "REL"]
|
|
|
|
|
|
|
|
|
if dataframe is None:
|
|
|
np.random.seed(42)
|
|
|
dataframe = pd.DataFrame({task: np.random.uniform(0.4, 0.9, 20) * 100 for task in tasks})
|
|
|
|
|
|
if baselines is None:
|
|
|
baselines = {task: np.random.randint(50, 70) for task in tasks}
|
|
|
|
|
|
if references is None:
|
|
|
references = {}
|
|
|
|
|
|
colors = ["#1f77b4", "#ff7f0e", "#2ca02c", "#d62728", "#9467bd",
|
|
|
"#8c564b", "#e377c2", "#7f7f7f", "#bcbd22", "#17becf"]
|
|
|
|
|
|
fig = go.Figure()
|
|
|
|
|
|
for i, task in enumerate(tasks):
|
|
|
if task not in dataframe.columns:
|
|
|
continue
|
|
|
|
|
|
y_data = dataframe[task].dropna().tolist()
|
|
|
|
|
|
|
|
|
fig.add_trace(go.Box(
|
|
|
y=y_data,
|
|
|
name=task,
|
|
|
marker=dict(color=colors[i]),
|
|
|
line=dict(color="black", width=2),
|
|
|
fillcolor=colors[i],
|
|
|
opacity=0.7,
|
|
|
hovertemplate="<b>"+task+"</b><br>Accuracy: %{y:.2f}%<extra></extra>",
|
|
|
hoverlabel=dict(bgcolor=colors[i], font_color="white"),
|
|
|
width=0.6,
|
|
|
whiskerwidth=0.2,
|
|
|
quartilemethod="linear"
|
|
|
))
|
|
|
|
|
|
|
|
|
baseline_value = baselines.get(task)
|
|
|
if baseline_value is not None:
|
|
|
fig.add_shape(
|
|
|
type="line",
|
|
|
x0=i - 0.3, x1=i + 0.3,
|
|
|
y0=baseline_value, y1=baseline_value,
|
|
|
line=dict(color="black", width=2, dash="dot"),
|
|
|
xref="x", yref="y"
|
|
|
)
|
|
|
|
|
|
|
|
|
reference_value = references.get(task)
|
|
|
if reference_value is not None:
|
|
|
fig.add_shape(
|
|
|
type="line",
|
|
|
x0=i - 0.3, x1=i + 0.3,
|
|
|
y0=reference_value, y1=reference_value,
|
|
|
line=dict(color="red", width=2, dash="dashdot"),
|
|
|
xref="x", yref="y"
|
|
|
)
|
|
|
|
|
|
|
|
|
fig.update_layout(
|
|
|
title="Distribution of Model Accuracy by Task",
|
|
|
xaxis_title="Task",
|
|
|
yaxis_title="Average CPS",
|
|
|
template="plotly_white",
|
|
|
boxmode="group",
|
|
|
dragmode=False,
|
|
|
font=dict(family="Arial", size=10),
|
|
|
margin=dict(b=80),
|
|
|
)
|
|
|
|
|
|
|
|
|
fig.add_annotation(
|
|
|
text=(
|
|
|
"In tasks like TE and SA, models approach the accuracy of supervised models at EVALITA (dashed black line).<br>"
|
|
|
"In NER and REL they remain lower. Dashed red lines show GPT-4o reference results for generative tasks."
|
|
|
),
|
|
|
xref="paper", yref="paper",
|
|
|
x=0.5, y=-0.30,
|
|
|
showarrow=False,
|
|
|
font=dict(size=11, color="gray"),
|
|
|
align="center"
|
|
|
)
|
|
|
|
|
|
fig.update_yaxes(range=[0, 100], fixedrange=True)
|
|
|
fig.update_xaxes(fixedrange=True)
|
|
|
|
|
|
return fig
|
|
|
|
|
|
|
|
|
def create_medal_assignments(sorted_df):
|
|
|
"""Function for medal assignment logic"""
|
|
|
medals = {
|
|
|
'large_fs': False, 'medium_fs': False, 'small_fs': False,
|
|
|
'large_0shot': False, 'medium_0shot': False, 'small_0shot': False
|
|
|
}
|
|
|
|
|
|
new_model_column = []
|
|
|
|
|
|
for _, row in sorted_df.iterrows():
|
|
|
model_name = row['Model']
|
|
|
size = row["Size"]
|
|
|
is_fs = row['IS_FS']
|
|
|
|
|
|
if is_fs:
|
|
|
if size == "🔵🔵🔵" and not medals['large_fs']:
|
|
|
model_name = f"{model_name} 🔵🔵🔵🏆"
|
|
|
medals['large_fs'] = True
|
|
|
elif size == "🔵🔵" and not medals['medium_fs']:
|
|
|
model_name = f"{model_name} 🔵🔵🏆"
|
|
|
medals['medium_fs'] = True
|
|
|
elif size == "🔵" and not medals['small_fs']:
|
|
|
model_name = f"{model_name} 🔵🏆"
|
|
|
medals['small_fs'] = True
|
|
|
else:
|
|
|
if size == "🔵🔵🔵" and not medals['large_0shot']:
|
|
|
model_name = f"{model_name} 🔵🔵🔵🎖️"
|
|
|
medals['large_0shot'] = True
|
|
|
elif size == "🔵🔵" and not medals['medium_0shot']:
|
|
|
model_name = f"{model_name} 🔵🔵🎖️"
|
|
|
medals['medium_0shot'] = True
|
|
|
elif size == "🔵" and not medals['small_0shot']:
|
|
|
model_name = f"{model_name} 🔵🎖️"
|
|
|
medals['small_0shot'] = True
|
|
|
|
|
|
new_model_column.append(model_name)
|
|
|
|
|
|
return new_model_column
|
|
|
|
|
|
|
|
|
def create_leaderboard_base(sorted_dataframe, field_list, hidden_columns):
|
|
|
"""Base leaderboard creation with common parameters. """
|
|
|
|
|
|
return Leaderboard(
|
|
|
value=sorted_dataframe,
|
|
|
datatype=[c.type for c in field_list],
|
|
|
search_columns=[AutoEvalColumn.model.name],
|
|
|
hide_columns=hidden_columns,
|
|
|
filter_columns=[
|
|
|
ColumnFilter(AutoEvalColumn.fewshot_symbol.name, type="checkboxgroup", label="N-Shot Learning (FS)"),
|
|
|
ColumnFilter(AutoEvalColumn.params.name, type="slider", min=0, max=100, default=[0, 100],
|
|
|
label="Select the number of parameters (B)"),
|
|
|
],
|
|
|
bool_checkboxgroup_label="Evaluation Mode",
|
|
|
interactive=False,
|
|
|
)
|
|
|
|
|
|
|
|
|
def init_leaderboard(dataframe, default_selection=None, hidden_columns=None):
|
|
|
"""Leaderboard initialization """
|
|
|
if dataframe is None or dataframe.empty:
|
|
|
raise ValueError("Leaderboard DataFrame is empty or None.")
|
|
|
|
|
|
|
|
|
sorted_dataframe = dataframe.sort_values(by="Avg. Comb. Perf. ⬆️", ascending=False).reset_index(drop=True)
|
|
|
sorted_dataframe["Rank"] = sorted_dataframe.index + 1
|
|
|
|
|
|
|
|
|
sorted_dataframe["Model"] = create_medal_assignments(sorted_dataframe)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
field_list = fields(AutoEvalColumn)
|
|
|
|
|
|
return create_leaderboard_base(sorted_dataframe, field_list, hidden_columns)
|
|
|
|
|
|
|
|
|
def update_task_leaderboard(dataframe, default_selection=None, hidden_columns=None):
|
|
|
|
|
|
""" Task-specific leaderboard update."""
|
|
|
if dataframe is None or dataframe.empty:
|
|
|
raise ValueError("Leaderboard DataFrame is empty or None.")
|
|
|
|
|
|
|
|
|
sorted_dataframe = dataframe.sort_values(by="Comb. Perf. ⬆️", ascending=False).reset_index(drop=True)
|
|
|
sorted_dataframe["Rank"] = sorted_dataframe.index + 1
|
|
|
|
|
|
|
|
|
sorted_dataframe["Model"] = create_medal_assignments(sorted_dataframe)
|
|
|
|
|
|
field_list = fields(AutoEvalColumn)
|
|
|
|
|
|
return Leaderboard(
|
|
|
value=sorted_dataframe,
|
|
|
datatype=[c.type for c in field_list] + [int],
|
|
|
search_columns=[AutoEvalColumn.model.name],
|
|
|
hide_columns=hidden_columns,
|
|
|
filter_columns=[
|
|
|
ColumnFilter(AutoEvalColumn.fewshot_symbol.name, type="checkboxgroup", label="N-Shot Learning (FS)"),
|
|
|
ColumnFilter(AutoEvalColumn.params.name, type="slider", min=0, max=100, default=[0, 100],
|
|
|
label="Select the number of parameters (B)"),
|
|
|
],
|
|
|
bool_checkboxgroup_label="Evaluation Mode",
|
|
|
interactive=False
|
|
|
)
|
|
|
|
|
|
|
|
|
def download_snapshot(repo, local_dir, max_retries=3):
|
|
|
"""Snapshot download with retry logic."""
|
|
|
for attempt in range(max_retries):
|
|
|
try:
|
|
|
logger.info(f"Downloading from {repo} to {local_dir} (attempt {attempt + 1}/{max_retries})")
|
|
|
snapshot_download(
|
|
|
repo_id=repo,
|
|
|
local_dir=local_dir,
|
|
|
repo_type="dataset",
|
|
|
tqdm_class=None,
|
|
|
etag_timeout=30,
|
|
|
token=TOKEN
|
|
|
)
|
|
|
return True
|
|
|
except Exception as e:
|
|
|
logger.error(f"Error downloading {repo} (attempt {attempt + 1}): {e}")
|
|
|
if attempt == max_retries - 1:
|
|
|
logger.error(f"Failed to download {repo} after {max_retries} attempts")
|
|
|
return False
|
|
|
return False
|
|
|
|
|
|
|
|
|
def restart_space():
|
|
|
"""Restart the Hugging Face space."""
|
|
|
try:
|
|
|
logger.info("Restarting space... ")
|
|
|
API.restart_space(repo_id=REPO_ID)
|
|
|
except Exception as e:
|
|
|
logger.error(f"Error restarting space: {e}")
|
|
|
|
|
|
|
|
|
def create_title_html():
|
|
|
"""Function for title HTML."""
|
|
|
return """
|
|
|
<div class="title-header">
|
|
|
<h1 class="title-text">
|
|
|
EVALITA-LLM Leaderboard
|
|
|
</h1>
|
|
|
<a href="https://huggingface.co/spaces/mii-llm/open_ita_llm_leaderboard" target="_blank" class="title-link">
|
|
|
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24">
|
|
|
<path d="M3.9 12a5 5 0 0 1 7.07-7.07l1.41 1.41-1.41 1.41-1.42-1.42a3 3 0 1 0 4.24 4.24l3.54-3.54a5 5 0 0 1-7.07 7.07l-1.41-1.41 1.41-1.41 1.42 1.42z"/>
|
|
|
<path d="M20.1 12a5 5 0 0 1-7.07 7.07l-1.41-1.41 1.41-1.41 1.42 1.42a3 3 0 1 0-4.24-4.24l-3.54 3.54a5 5 0 0 1 7.07-7.07l1.41 1.41-1.41 1.41-1.42-1.42z"/>
|
|
|
</svg>
|
|
|
Open Italian LLM Leaderboard
|
|
|
</a>
|
|
|
</div>
|
|
|
"""
|
|
|
|
|
|
|
|
|
def create_credits_markdown():
|
|
|
"""Credits section."""
|
|
|
return """
|
|
|
**This project has benefited from the following support:**
|
|
|
|
|
|
- 🧠 **Codebase**: Based on and extended from the Open Italian LLM Leaderboard, developed by **Alessandro Ercolani** and **Samuele Colombo**. We warmly thank them for their invaluable support and guidance in implementing this leaderboard.
|
|
|
|
|
|
- 💶 **Funding**: Partially supported by the PNRR project **FAIR - Future AI Research (PE00000013)**, under the NRRP MUR program funded by **NextGenerationEU**.
|
|
|
|
|
|
- 🖥️ **Computation**: We gratefully acknowledge **CINECA** for granting access to the **LEONARDO** supercomputer.
|
|
|
"""
|
|
|
|
|
|
|
|
|
|
|
|
def initialize_app():
|
|
|
"""Initialize the application ."""
|
|
|
try:
|
|
|
|
|
|
queue_success = download_snapshot(QUEUE_REPO, EVAL_REQUESTS_PATH)
|
|
|
results_success = download_snapshot(RESULTS_REPO, EVAL_RESULTS_PATH)
|
|
|
|
|
|
if not (queue_success and results_success):
|
|
|
logger.error("Failed to download required data")
|
|
|
return None, None, None, None, None
|
|
|
|
|
|
|
|
|
leaderboard_df = get_leaderboard_df(EVAL_RESULTS_PATH, EVAL_REQUESTS_PATH, COLS, BENCHMARK_COLS)
|
|
|
finished_eval_queue_df, running_eval_queue_df, pending_eval_queue_df = get_evaluation_queue_df(
|
|
|
EVAL_REQUESTS_PATH, EVAL_COLS)
|
|
|
|
|
|
|
|
|
theoretical_max = theoretical_performance(hash(str(leaderboard_df.values.tobytes())))
|
|
|
|
|
|
return leaderboard_df, finished_eval_queue_df, running_eval_queue_df, pending_eval_queue_df, theoretical_max
|
|
|
|
|
|
except Exception as e:
|
|
|
logger.error(f"Error initializing app: {e}")
|
|
|
return None, None, None, None, None
|
|
|
|
|
|
|
|
|
|
|
|
LEADERBOARD_DF, finished_eval_queue_df, running_eval_queue_df, pending_eval_queue_df, theoretical_max_combined_perf = initialize_app()
|
|
|
LEADERBOARD_DF = map_prompt_ids_for_generation(LEADERBOARD_DF)
|
|
|
|
|
|
|
|
|
if LEADERBOARD_DF is None:
|
|
|
|
|
|
logger.error("Failed to initialize app data")
|
|
|
theoretical_max_combined_perf = 0.0
|
|
|
|
|
|
|
|
|
|
|
|
def create_gradio_interface():
|
|
|
"""The main Gradio interface."""
|
|
|
demo = gr.Blocks(css=custom_css)
|
|
|
|
|
|
with demo:
|
|
|
|
|
|
gr.HTML(create_title_html())
|
|
|
gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text")
|
|
|
|
|
|
|
|
|
with gr.Tabs(elem_classes="tab-buttons") as tabs:
|
|
|
|
|
|
with gr.TabItem("🏅 Benchmark"):
|
|
|
if LEADERBOARD_DF is not None:
|
|
|
|
|
|
|
|
|
with gr.Row():
|
|
|
gr.HTML(f"""
|
|
|
<div class="performance-metrics">
|
|
|
<div class="metric-label" title="Total number of configurations (zero-shot and 5-few-shot) of the models evaluated in the leaderboard." style="color: #333333;">
|
|
|
Models tested: {len(LEADERBOARD_DF)}
|
|
|
</div>
|
|
|
<div class="metric-label" title="Average accuracy of the evaluated models." style="color: #333333;">
|
|
|
Avg combined perf.: {LEADERBOARD_DF['Avg. Comb. Perf. ⬆️'].mean():.2f}
|
|
|
</div>
|
|
|
<div class="metric-label" title="Standard deviation of the evaluated models' performance." style="color: #333333;">
|
|
|
Std. Dev. {LEADERBOARD_DF['Avg. Comb. Perf. ⬆️'].std():.2f}
|
|
|
</div>
|
|
|
<div class="metric-label" title="Best evaluated model." style="color: #333333;">
|
|
|
Best model: {LEADERBOARD_DF.loc[LEADERBOARD_DF['Avg. Comb. Perf. ⬆️'].idxmax(), 'Model']}
|
|
|
</div>
|
|
|
<div class="metric-label" title="Accuracy of the best evaluated model." style="color: #333333;">
|
|
|
Best model accuracy: {LEADERBOARD_DF.loc[LEADERBOARD_DF['Avg. Comb. Perf. ⬆️'].idxmax(), 'Avg. Comb. Perf. ⬆️']:.2f}
|
|
|
</div>
|
|
|
<div class="metric-label" title="Maximum achievable accuracy based on the highest performance for each task by any model in the leaderboard." style="color: #333333;">
|
|
|
Ideal model: {theoretical_max_combined_perf:.2f}
|
|
|
</div>
|
|
|
</div>
|
|
|
""")
|
|
|
|
|
|
|
|
|
with gr.Row():
|
|
|
gr.Plot(value=create_line_chart(LEADERBOARD_DF), elem_id="line-chart")
|
|
|
gr.Plot(value=create_boxplot_task(LEADERBOARD_DF, BASELINES, REFERENCES), elem_id="line-chart")
|
|
|
|
|
|
with gr.Row():
|
|
|
gr.Plot(value=create_prompt_heatmap(LEADERBOARD_DF), elem_id="line-chart")
|
|
|
gr.Plot(value=create_best_model_comparison_table(LEADERBOARD_DF), elem_id="line-chart")
|
|
|
|
|
|
|
|
|
leaderboard = init_leaderboard(
|
|
|
LEADERBOARD_DF,
|
|
|
default_selection=['Rank', 'Size', 'FS', 'Model', "Avg. Comb. Perf. ⬆️",
|
|
|
"TE", "SA", "HS", "AT", "WIC", "FAQ", "LS", "SU", "NER", "REL"],
|
|
|
hidden_columns=[col for col in LEADERBOARD_DF.columns if
|
|
|
col not in ['Rank', 'Size', 'FS', 'Model', "Avg. Comb. Perf. ⬆️",
|
|
|
"TE", "SA", "HS", "AT", "WIC", "FAQ", "LS", "SU", "NER", "REL"]]
|
|
|
)
|
|
|
|
|
|
|
|
|
with gr.TabItem("📝 About"):
|
|
|
gr.Markdown(LLM_BENCHMARKS_TEXT, elem_classes="markdown-text")
|
|
|
|
|
|
|
|
|
|
|
|
with gr.TabItem("🚀 Submit"):
|
|
|
gr.Markdown("# 📝 Model Evaluation Request", elem_classes="markdown-text")
|
|
|
gr.Markdown("""
|
|
|
**Fill out the form below to request evaluation of your model on EVALITA-LLM.**
|
|
|
|
|
|
Once submitted, our team will automatically receive a notification. We will evaluate the
|
|
|
submission’s relevance for both research and commercial purposes, as well as assess its feasibility.
|
|
|
""", elem_classes="markdown-text")
|
|
|
|
|
|
with gr.Row():
|
|
|
with gr.Column():
|
|
|
|
|
|
model_name_input = gr.Textbox(
|
|
|
label="HuggingFace Model Name",
|
|
|
placeholder="e.g., microsoft/DialoGPT-medium",
|
|
|
info="Enter the complete model name as it appears on HuggingFace Hub (organization/model-name)",
|
|
|
elem_id="model-name-input"
|
|
|
)
|
|
|
|
|
|
|
|
|
user_name_input = gr.Textbox(
|
|
|
label="Your email address",
|
|
|
placeholder="e.g., mario.rossi@example.com",
|
|
|
info="Enter your email address for communication",
|
|
|
elem_id="user-email-input"
|
|
|
)
|
|
|
|
|
|
|
|
|
user_affiliation_input = gr.Textbox(
|
|
|
label="Affiliation",
|
|
|
placeholder="e.g., University of Milan, Google Research, Freelancer",
|
|
|
info="Enter your affiliation (university, company, organization)",
|
|
|
elem_id="user-affiliation-input"
|
|
|
)
|
|
|
|
|
|
|
|
|
submit_request_button = gr.Button(
|
|
|
"📤 Submit Request",
|
|
|
variant="primary",
|
|
|
elem_id="submit-request-button"
|
|
|
)
|
|
|
|
|
|
|
|
|
submission_status = gr.Markdown(elem_id="submission-status")
|
|
|
|
|
|
|
|
|
submit_request_button.click(
|
|
|
validate_and_submit_request,
|
|
|
inputs=[model_name_input, user_name_input, user_affiliation_input],
|
|
|
outputs=submission_status
|
|
|
)
|
|
|
|
|
|
|
|
|
with gr.Accordion("ℹ️ Additional Information", open=False):
|
|
|
gr.Markdown("""
|
|
|
**What happens after submission:**
|
|
|
1. Your request is automatically sent to the EVALITA-LLM team
|
|
|
2. We verify that the model is accessible on HuggingFace
|
|
|
3. We contact you to confirm inclusion in the evaluation
|
|
|
4. The model is added to the evaluation queue
|
|
|
|
|
|
**Model requirements:**
|
|
|
- Model must be publicly accessible on HuggingFace Hub
|
|
|
- Must be compatible with the EleutherAI/lm-evaluation-harness framework
|
|
|
- Must have a license that allows evaluation
|
|
|
|
|
|
**Evaluation tasks:**
|
|
|
Your model will be evaluated on all tasks: TE, SA, HS, AT, WIC, FAQ, LS, SU, NER, REL.
|
|
|
""", elem_classes="markdown-text")
|
|
|
|
|
|
|
|
|
|
|
|
with gr.TabItem("║", interactive=False):
|
|
|
gr.Markdown("", elem_classes="markdown-text")
|
|
|
|
|
|
|
|
|
if LEADERBOARD_DF is not None:
|
|
|
for task, metadata in TASK_METADATA_MULTIPLECHOICE.items():
|
|
|
with gr.TabItem(f"{metadata['icon']}{task}"):
|
|
|
task_description = TASK_DESCRIPTIONS.get(task, "Description not available.")
|
|
|
gr.Markdown(task_description, elem_classes="markdown-text")
|
|
|
|
|
|
leaderboard_task = update_task_leaderboard(
|
|
|
LEADERBOARD_DF.rename(columns={
|
|
|
f"{task} Prompt Average": "Prompt Average",
|
|
|
f"{task} Prompt Std": "Prompt Std",
|
|
|
f"{task} Best Prompt": "Best Prompt",
|
|
|
f"{task} Best Prompt Id": "Best Prompt Id",
|
|
|
task: "Comb. Perf. ⬆️"
|
|
|
}),
|
|
|
default_selection=['Rank', 'Size', 'FS', 'Model', 'Comb. Perf. ⬆️',
|
|
|
'Prompt Average', 'Prompt Std', 'Best Prompt', 'Best Prompt Id'],
|
|
|
hidden_columns=[col for col in LEADERBOARD_DF.columns if
|
|
|
col not in ['Rank', 'Size', 'FS', 'Model', 'Comb. Perf. ⬆️',
|
|
|
'Prompt Average', 'Prompt Std', 'Best Prompt',
|
|
|
'Best Prompt Id']]
|
|
|
)
|
|
|
|
|
|
|
|
|
with gr.TabItem("│", interactive=False):
|
|
|
gr.Markdown("", elem_classes="markdown-text")
|
|
|
|
|
|
|
|
|
if LEADERBOARD_DF is not None:
|
|
|
for task, metadata in TASK_METADATA_GENERATIVE.items():
|
|
|
with gr.TabItem(f"{metadata['icon']}{task}"):
|
|
|
task_description = TASK_DESCRIPTIONS.get(task, "Description not available.")
|
|
|
gr.Markdown(task_description, elem_classes="markdown-text")
|
|
|
|
|
|
leaderboard_task = update_task_leaderboard(
|
|
|
LEADERBOARD_DF.rename(columns={
|
|
|
f"{task} Prompt Average": "Prompt Average",
|
|
|
f"{task} Prompt Std": "Prompt Std",
|
|
|
f"{task} Best Prompt": "Best Prompt",
|
|
|
f"{task} Best Prompt Id": "Best Prompt Id",
|
|
|
task: "Comb. Perf. ⬆️"
|
|
|
}),
|
|
|
default_selection=['Rank', 'Size', 'FS', 'Model', 'Comb. Perf. ⬆️',
|
|
|
'Prompt Average', 'Prompt Std', 'Best Prompt', 'Best Prompt Id'],
|
|
|
hidden_columns=[col for col in LEADERBOARD_DF.columns if
|
|
|
col not in ['Rank', 'Size', 'FS', 'Model', 'Comb. Perf. ⬆️',
|
|
|
'Prompt Average', 'Prompt Std', 'Best Prompt',
|
|
|
'Best Prompt Id']]
|
|
|
)
|
|
|
|
|
|
|
|
|
with gr.Accordion("📙 Citation", open=False):
|
|
|
gr.Textbox(
|
|
|
value=CITATION_BUTTON_TEXT,
|
|
|
label=CITATION_BUTTON_LABEL,
|
|
|
lines=20,
|
|
|
elem_id="citation-button",
|
|
|
show_copy_button=True
|
|
|
)
|
|
|
|
|
|
with gr.Accordion("📙 Credits", open=False):
|
|
|
gr.Markdown(create_credits_markdown())
|
|
|
|
|
|
return demo
|
|
|
|
|
|
|
|
|
|
|
|
demo = create_gradio_interface()
|
|
|
|
|
|
|
|
|
scheduler = BackgroundScheduler()
|
|
|
scheduler.add_job(restart_space, "interval", seconds=1800)
|
|
|
scheduler.start()
|
|
|
|
|
|
|
|
|
if __name__ == "__main__":
|
|
|
demo.queue(default_concurrency_limit=40).launch(
|
|
|
debug=True,
|
|
|
show_error=True
|
|
|
)
|
|
|
|