Spaces:
Running
Running
| """A gradio app that renders a static leaderboard. This is used for Hugging Face Space.""" | |
| import ast | |
| import argparse | |
| import glob | |
| import pickle | |
| import gradio as gr | |
| import numpy as np | |
| import pandas as pd | |
| notebook_url = "https://colab.research.google.com/drive/1KdwokPjirkTmpO_P1WByFNFiqxWQquwH#scrollTo=o_CpbkGEbhrK" | |
| basic_component_values = [None] * 6 | |
| leader_component_values = [None] * 5 | |
| def make_default_md(arena_df, elo_results): | |
| leaderboard_md = f""" | |
| # π | |
| | [GitHub](https://) | |
| """ | |
| return leaderboard_md | |
| def make_arena_leaderboard_md(arena_df): | |
| total_votes = sum(arena_df["num_battles"]) // 2 | |
| total_models = len(arena_df) | |
| leaderboard_md = f""" | |
| Last updated: April 9, 2024. | |
| Find more analysis in the [notebook]({notebook_url}). | |
| """ | |
| return leaderboard_md | |
| def make_full_leaderboard_md(elo_results): | |
| leaderboard_md = f""" | |
| enchmarks are displayed: | |
| """ | |
| return leaderboard_md | |
| def make_leaderboard_md_live(elo_results): | |
| leaderboard_md = f""" | |
| # Leaderboard | |
| Last updated: {elo_results["last_updated_datetime"]} | |
| {elo_results["leaderboard_table"]} | |
| """ | |
| return leaderboard_md | |
| def update_elo_components(max_num_files, elo_results_file): | |
| log_files = get_log_files(max_num_files) | |
| # Leaderboard | |
| if elo_results_file is None: # Do live update | |
| battles = clean_battle_data(log_files) | |
| elo_results = report_elo_analysis_results(battles) | |
| leader_component_values[0] = make_leaderboard_md_live(elo_results) | |
| leader_component_values[1] = elo_results["win_fraction_heatmap"] | |
| # Basic stats | |
| basic_stats = report_basic_stats(log_files) | |
| md0 = f"Last updated: {basic_stats['last_updated_datetime']}" | |
| md1 = "### Action Histogram\n" | |
| md1 += basic_stats["action_hist_md"] + "\n" | |
| basic_component_values[0] = md0 | |
| basic_component_values[1] = basic_stats["chat_dates_bar"] | |
| basic_component_values[2] = md1 | |
| def update_worker(max_num_files, interval, elo_results_file): | |
| while True: | |
| tic = time.time() | |
| update_elo_components(max_num_files, elo_results_file) | |
| durtaion = time.time() - tic | |
| print(f"update duration: {durtaion:.2f} s") | |
| time.sleep(max(interval - durtaion, 0)) | |
| def load_demo(url_params, request: gr.Request): | |
| logger.info(f"load_demo. ip: {request.client.host}. params: {url_params}") | |
| return basic_component_values + leader_component_values | |
| def model_hyperlink(model_name, link): | |
| return f'<a target="_blank" href="{link}" style="color: var(--link-text-color); text-decoration: underline;text-decoration-style: dotted;">{model_name}</a>' | |
| def load_leaderboard_table_csv(filename, add_hyperlink=True): | |
| lines = open(filename).readlines() | |
| heads = [v.strip() for v in lines[0].split(",")] | |
| rows = [] | |
| for i in range(1, len(lines)): | |
| row = [v.strip() for v in lines[i].split(",")] | |
| for j in range(len(heads)): | |
| item = {} | |
| for h, v in zip(heads, row): | |
| if h == "": | |
| if v != "-": | |
| v = int(ast.literal_eval(v)) | |
| else: | |
| v = np.nan | |
| item[h] = v | |
| if add_hyperlink: | |
| item["Model"] = model_hyperlink(item["Model"], item["Link"]) | |
| rows.append(item) | |
| return rows | |
| def build_basic_stats_tab(): | |
| empty = "Loading ..." | |
| basic_component_values[:] = [empty, None, empty, empty, empty, empty] | |
| md0 = gr.Markdown(empty) | |
| gr.Markdown("#### Figure 1:") | |
| plot_1 = gr.Plot(show_label=False) | |
| with gr.Row(): | |
| with gr.Column(): | |
| md1 = gr.Markdown(empty) | |
| with gr.Column(): | |
| md2 = gr.Markdown(empty) | |
| with gr.Row(): | |
| with gr.Column(): | |
| md3 = gr.Markdown(empty) | |
| with gr.Column(): | |
| md4 = gr.Markdown(empty) | |
| return [md0, plot_1, md1, md2, md3, md4] | |
| def get_full_table(arena_df, model_table_df): | |
| values = [] | |
| for i in range(len(model_table_df)): | |
| row = [] | |
| model_key = model_table_df.iloc[i]["key"] | |
| model_name = model_table_df.iloc[i]["Model"] | |
| # model display name | |
| row.append(model_name) | |
| if model_key in arena_df.index: | |
| idx = arena_df.index.get_loc(model_key) | |
| row.append(round(arena_df.iloc[idx]["rating"])) | |
| else: | |
| row.append(np.nan) | |
| # Organization | |
| row.append(model_table_df.iloc[i]["Organization"]) | |
| # license | |
| row.append(model_table_df.iloc[i]["License"]) | |
| values.append(row) | |
| values.sort(key=lambda x: -x[1] if not np.isnan(x[1]) else 1e9) | |
| return values | |
| def get_arena_table(arena_df, model_table_df): | |
| # sort by rating | |
| arena_df = arena_df.sort_values(by=["final_ranking", "rating"], ascending=[True, False]) | |
| values = [] | |
| for i in range(len(arena_df)): | |
| row = [] | |
| model_key = arena_df.index[i] | |
| model_name = model_table_df[model_table_df["key"] == model_key]["Model"].values[ | |
| 0 | |
| ] | |
| # rank | |
| ranking = arena_df.iloc[i].get("final_ranking") or i+1 | |
| row.append(ranking) | |
| # model display name | |
| row.append(model_name) | |
| # elo rating | |
| row.append(round(arena_df.iloc[i]["rating"])) | |
| upper_diff = round( | |
| arena_df.iloc[i]["rating_q975"] - arena_df.iloc[i]["rating"] | |
| ) | |
| lower_diff = round( | |
| arena_df.iloc[i]["rating"] - arena_df.iloc[i]["rating_q025"] | |
| ) | |
| row.append(f"+{upper_diff}/-{lower_diff}") | |
| # num battles | |
| row.append(round(arena_df.iloc[i]["num_battles"])) | |
| # Organization | |
| row.append( | |
| model_table_df[model_table_df["key"] == model_key]["Organization"].values[0] | |
| ) | |
| # license | |
| row.append( | |
| model_table_df[model_table_df["key"] == model_key]["License"].values[0] | |
| ) | |
| cutoff_date = model_table_df[model_table_df["key"] == model_key]["Knowledge cutoff date"].values[0] | |
| if cutoff_date == "-": | |
| row.append("Unknown") | |
| else: | |
| row.append(cutoff_date) | |
| values.append(row) | |
| return values | |
| def build_leaderboard_tab(elo_results_file, leaderboard_table_file, show_plot=False): | |
| if elo_results_file is None: # Do live update | |
| default_md = "Loading ..." | |
| p1 = p2 = p3 = p4 = None | |
| else: | |
| with open(elo_results_file, "rb") as fin: | |
| elo_results = pickle.load(fin) | |
| if "full" in elo_results: | |
| elo_results = elo_results["full"] | |
| p1 = category_elo_results["Overall"]["win_fraction_heatmap"] | |
| p2 = category_elo_results["Overall"]["battle_count_heatmap"] | |
| p3 = category_elo_results["Overall"]["bootstrap_elo_rating"] | |
| p4 = category_elo_results["Overall"]["average_win_rate_bar"] | |
| arena_df = arena_dfs["Overall"] | |
| default_md = make_default_md(arena_df, category_elo_results["Overall"]) | |
| md_1 = gr.Markdown(default_md, elem_id="leaderboard_markdown") | |
| if leaderboard_table_file: | |
| data = load_leaderboard_table_csv(leaderboard_table_file) | |
| model_table_df = pd.DataFrame(data) | |
| with gr.Tabs() as tabs: | |
| # arena table | |
| arena_table_vals = get_arena_table(arena_df, model_table_df) | |
| with gr.Tab("Arena Elo", id=0): | |
| md = make_arena_leaderboard_md(arena_df) | |
| gr.Markdown(md, elem_id="leaderboard_markdown") | |
| gr.Dataframe( | |
| headers=[ | |
| "Rank", | |
| "π€ Model", | |
| "Organization", | |
| "License", | |
| ], | |
| datatype=[ | |
| "str", | |
| "markdown", | |
| "str", | |
| "str", | |
| ], | |
| value=arena_table_vals, | |
| elem_id="arena_leaderboard_dataframe", | |
| height=700, | |
| column_widths=[50, 200, 120, 100, 100, 150, 150, 100], | |
| wrap=True, | |
| ) | |
| with gr.Tab("Full Leaderboard", id=1): | |
| md = make_full_leaderboard_md(elo_results) | |
| gr.Markdown(md, elem_id="leaderboard_markdown") | |
| full_table_vals = get_full_table(arena_df, model_table_df) | |
| gr.Dataframe( | |
| headers=[ | |
| "π€ Model", | |
| "π MMLU", | |
| "Organization", | |
| "License", | |
| ], | |
| datatype=["markdown", "number", "str", "str"], | |
| value=full_table_vals, | |
| elem_id="full_leaderboard_dataframe", | |
| column_widths=[200, 100, 100, 100, 150, 150], | |
| height=700, | |
| wrap=True, | |
| ) | |
| if not show_plot: | |
| gr.Markdown( | |
| """ ## Visit our [HF space](https://huggingface.co/spaces/) for more analysis! | |
| """, | |
| elem_id="leaderboard_markdown", | |
| ) | |
| else: | |
| pass | |
| gr.Markdown( | |
| f""" | |
| """, | |
| elem_id="leaderboard_markdown" | |
| ) | |
| leader_component_values[:] = [default_md, p1, p2, p3, p4] | |
| if show_plot: | |
| gr.Markdown( | |
| f"""## More Statistics\n | |
| Below are figures for more statistics. The code for generating them is also included in this [notebook]({notebook_url}). | |
| """, | |
| elem_id="leaderboard_markdown" | |
| ) | |
| with gr.Row(): | |
| with gr.Column(): | |
| gr.Markdown( | |
| "#### Figure 1: " | |
| ) | |
| with gr.Column(): | |
| gr.Markdown( | |
| "#### Figure 2: " | |
| ) | |
| with gr.Accordion( | |
| "π Citation", | |
| open=True, | |
| ): | |
| citation_md = """ | |
| ### Citation | |
| """ | |
| gr.Markdown(citation_md, elem_id="leaderboard_markdown") | |
| gr.Markdown(acknowledgment_md) | |
| if show_plot: | |
| return [md_1, plot_1, plot_2, plot_3, plot_4] | |
| return [md_1] | |
| block_css = """ | |
| #notice_markdown { | |
| font-size: 104% | |
| } | |
| #notice_markdown th { | |
| display: none; | |
| } | |
| #notice_markdown td { | |
| padding-top: 6px; | |
| padding-bottom: 6px; | |
| } | |
| #leaderboard_markdown { | |
| font-size: 104% | |
| } | |
| #leaderboard_markdown td { | |
| padding-top: 6px; | |
| padding-bottom: 6px; | |
| } | |
| #leaderboard_dataframe td { | |
| line-height: 0.1em; | |
| } | |
| footer { | |
| display:none !important | |
| } | |
| .sponsor-image-about img { | |
| margin: 0 20px; | |
| margin-top: 20px; | |
| height: 40px; | |
| max-height: 100%; | |
| width: auto; | |
| float: left; | |
| } | |
| """ | |
| acknowledgment_md = """ | |
| ### Acknowledgment | |
| """ | |
| def build_demo(results_file, leaderboard_table_file): | |
| text_size = gr.themes.sizes.text_lg | |
| with gr.Blocks( | |
| title="Leaderboard", | |
| theme=gr.themes.Base(text_size=text_size), | |
| css=block_css, | |
| ) as demo: | |
| leader_components = build_leaderboard_tab( | |
| results_file, leaderboard_table_file, show_plot=True | |
| ) | |
| return demo | |
| if __name__ == "__main__": | |
| parser = argparse.ArgumentParser() | |
| parser.add_argument("--share", action="store_true") | |
| args = parser.parse_args() | |
| result_files = glob.glob("elo_results_*.pkl") | |
| result_files.sort(key=lambda x: int(x[12:-4])) | |
| result_file = result_files[-1] | |
| # result_file = None | |
| leaderboard_table_files = glob.glob("leaderboard_table_*.csv") | |
| leaderboard_table_files.sort(key=lambda x: int(x[18:-4])) | |
| leaderboard_table_file = leaderboard_table_files[-1] | |
| demo = build_demo(result_file, leaderboard_table_file) | |
| demo.launch(share=args.share) |