Spaces:
Running
Running
Commit
·
f5220e7
1
Parent(s):
06fd8bd
updates
Browse files
app.py
CHANGED
|
@@ -23,6 +23,7 @@ def restart_space():
|
|
| 23 |
print("Pulling evaluation results")
|
| 24 |
repo = snapshot_download(
|
| 25 |
local_dir=repo_dir_herm,
|
|
|
|
| 26 |
repo_id=evals_repo,
|
| 27 |
use_auth_token=COLLAB_TOKEN,
|
| 28 |
tqdm_class=None,
|
|
@@ -31,7 +32,7 @@ repo = snapshot_download(
|
|
| 31 |
)
|
| 32 |
|
| 33 |
|
| 34 |
-
def avg_over_herm(
|
| 35 |
"""
|
| 36 |
Averages over the subsets alpacaeval, mt-bench, llmbar, refusals, hep and returns dataframe with only these columns.
|
| 37 |
|
|
@@ -42,7 +43,8 @@ def avg_over_herm(dataframe):
|
|
| 42 |
4. Code: Includes the code subsets (hep-cpp, hep-go, hep-java, hep-js, hep-python, hep-rust)
|
| 43 |
|
| 44 |
"""
|
| 45 |
-
new_df =
|
|
|
|
| 46 |
|
| 47 |
# for main subsets, keys in subset_mapping, take the weighted avg by example_counts and store for the models
|
| 48 |
for subset, sub_subsets in subset_mapping.items():
|
|
@@ -52,10 +54,39 @@ def avg_over_herm(dataframe):
|
|
| 52 |
new_df[subset] = np.round(np.average(sub_data, axis=1, weights=sub_counts), 2) # take the weighted average
|
| 53 |
# new_df[subset] = np.round(np.nanmean(new_df[subset_cols].values, axis=1), 2)
|
| 54 |
|
| 55 |
-
|
|
|
|
| 56 |
# keep_columns = ["model", "average"] + subsets
|
| 57 |
new_df = new_df[keep_columns]
|
| 58 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 59 |
return new_df
|
| 60 |
|
| 61 |
def expand_subsets(dataframe):
|
|
@@ -100,11 +131,12 @@ def length_bias_check(dataframe):
|
|
| 100 |
|
| 101 |
|
| 102 |
herm_data = load_all_data(repo_dir_herm, subdir="eval-set").sort_values(by='average', ascending=False)
|
| 103 |
-
herm_data_avg = avg_over_herm(herm_data).sort_values(by='Chat', ascending=False)
|
| 104 |
herm_data_length = length_bias_check(herm_data).sort_values(by='Terse Bias', ascending=False)
|
| 105 |
prefs_data = load_all_data(repo_dir_herm, subdir="pref-sets").sort_values(by='average', ascending=False)
|
| 106 |
# prefs_data_sub = expand_subsets(prefs_data).sort_values(by='average', ascending=False)
|
| 107 |
|
|
|
|
|
|
|
| 108 |
col_types_herm = ["markdown"] + ["str"] + ["number"] * (len(herm_data.columns) - 1)
|
| 109 |
col_types_herm_avg = ["markdown"]+ ["str"] + ["number"] * (len(herm_data_avg.columns) - 1)
|
| 110 |
cols_herm_data_length = ["markdown"] + ["number"] * (len(herm_data_length.columns) - 1)
|
|
|
|
| 23 |
print("Pulling evaluation results")
|
| 24 |
repo = snapshot_download(
|
| 25 |
local_dir=repo_dir_herm,
|
| 26 |
+
ignore_patterns=["pref-sets-scores/*", "eval-set-scores/*"],
|
| 27 |
repo_id=evals_repo,
|
| 28 |
use_auth_token=COLLAB_TOKEN,
|
| 29 |
tqdm_class=None,
|
|
|
|
| 32 |
)
|
| 33 |
|
| 34 |
|
| 35 |
+
def avg_over_herm(dataframe_core, dataframe_prefs):
|
| 36 |
"""
|
| 37 |
Averages over the subsets alpacaeval, mt-bench, llmbar, refusals, hep and returns dataframe with only these columns.
|
| 38 |
|
|
|
|
| 43 |
4. Code: Includes the code subsets (hep-cpp, hep-go, hep-java, hep-js, hep-python, hep-rust)
|
| 44 |
|
| 45 |
"""
|
| 46 |
+
new_df = dataframe_core.copy()
|
| 47 |
+
dataframe_prefs = dataframe_prefs.copy()
|
| 48 |
|
| 49 |
# for main subsets, keys in subset_mapping, take the weighted avg by example_counts and store for the models
|
| 50 |
for subset, sub_subsets in subset_mapping.items():
|
|
|
|
| 54 |
new_df[subset] = np.round(np.average(sub_data, axis=1, weights=sub_counts), 2) # take the weighted average
|
| 55 |
# new_df[subset] = np.round(np.nanmean(new_df[subset_cols].values, axis=1), 2)
|
| 56 |
|
| 57 |
+
data_cols = list(subset_mapping.keys())
|
| 58 |
+
keep_columns = ["model",] + ["model_type"] + data_cols
|
| 59 |
# keep_columns = ["model", "average"] + subsets
|
| 60 |
new_df = new_df[keep_columns]
|
| 61 |
|
| 62 |
+
# selected average from pref_sets
|
| 63 |
+
pref_columns = ["anthropic_helpful", "mtbench_gpt4", "shp", "summarize"]
|
| 64 |
+
pref_data = dataframe_prefs[pref_columns].values
|
| 65 |
+
|
| 66 |
+
# add column test sets knowing the rows are not identical, take superset
|
| 67 |
+
dataframe_prefs["Test Sets"] = np.round(np.nanmean(pref_data, axis=1), 2)
|
| 68 |
+
|
| 69 |
+
# add column Test Sets empty to new_df
|
| 70 |
+
new_df["Test Sets"] = np.nan
|
| 71 |
+
# per row in new_df if model is in dataframe_prefs, add the value to new_df["Test Sets"]
|
| 72 |
+
values = []
|
| 73 |
+
for i, row in new_df.iterrows():
|
| 74 |
+
model = row["model"]
|
| 75 |
+
if model in dataframe_prefs["model"].values:
|
| 76 |
+
values.append(dataframe_prefs[dataframe_prefs["model"] == model]["Test Sets"].values[0])
|
| 77 |
+
# new_df.at[i, "Test Sets"] = dataframe_prefs[dataframe_prefs["model"] == model]["Test Sets"].values[0]
|
| 78 |
+
else:
|
| 79 |
+
values.append(np.nan)
|
| 80 |
+
|
| 81 |
+
new_df["Test Sets"] = values
|
| 82 |
+
|
| 83 |
+
# add total average
|
| 84 |
+
data_cols += ["Test Sets"]
|
| 85 |
+
new_df["average"] = np.round(np.nanmean(new_df[data_cols].values, axis=1), 2)
|
| 86 |
+
|
| 87 |
+
# make average third column
|
| 88 |
+
keep_columns = ["model", "model_type", "average"] + data_cols
|
| 89 |
+
new_df = new_df[keep_columns]
|
| 90 |
return new_df
|
| 91 |
|
| 92 |
def expand_subsets(dataframe):
|
|
|
|
| 131 |
|
| 132 |
|
| 133 |
herm_data = load_all_data(repo_dir_herm, subdir="eval-set").sort_values(by='average', ascending=False)
|
|
|
|
| 134 |
herm_data_length = length_bias_check(herm_data).sort_values(by='Terse Bias', ascending=False)
|
| 135 |
prefs_data = load_all_data(repo_dir_herm, subdir="pref-sets").sort_values(by='average', ascending=False)
|
| 136 |
# prefs_data_sub = expand_subsets(prefs_data).sort_values(by='average', ascending=False)
|
| 137 |
|
| 138 |
+
herm_data_avg = avg_over_herm(herm_data, prefs_data).sort_values(by='average', ascending=False)
|
| 139 |
+
|
| 140 |
col_types_herm = ["markdown"] + ["str"] + ["number"] * (len(herm_data.columns) - 1)
|
| 141 |
col_types_herm_avg = ["markdown"]+ ["str"] + ["number"] * (len(herm_data_avg.columns) - 1)
|
| 142 |
cols_herm_data_length = ["markdown"] + ["number"] * (len(herm_data_length.columns) - 1)
|