Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
| import json | |
| import os | |
| import glob | |
| import yaml | |
| import click | |
| from colorama import Fore | |
| from huggingface_hub import HfApi, snapshot_download | |
| EVAL_REQUESTS_PATH = "eval-queue" | |
| QUEUE_REPO = "cot-leaderboard/cot-leaderboard-requests" | |
| READMES = ["README.md", "../open_cot_dashboard/README.md"] | |
| precisions = ("float16", "bfloat16", "8bit (LLM.int8)", "4bit (QLoRA / FP4)", "GPTQ") | |
| model_types = ("pretrained", "fine-tuned", "RL-tuned", "instruction-tuned") | |
| weight_types = ("Original", "Delta", "Adapter") | |
| def update_readme(readme_path: str, models: list[str]): | |
| # read lines from the readme | |
| with open(readme_path, "r") as f: | |
| lines = f.readlines() | |
| lines = [line.rstrip() for line in lines] | |
| if not lines: | |
| raise ValueError(f"Readme file {readme_path} is empty") | |
| if not lines[0].startswith("---"): | |
| raise ValueError(f"Readme file {readme_path} does not start with a metadata block") | |
| lines = lines[1:] | |
| if "---" not in lines: | |
| raise ValueError(f"Readme file {readme_path} does not close the metadata block") | |
| lines = lines[:lines.index("---")] | |
| models_idx = lines.index("models:") | |
| if any(not line.startswith(" - ") for line in lines[models_idx + 1:]): | |
| raise ValueError(f"Readme file {readme_path} does not have a valid list of models") | |
| lines = lines[:models_idx + 1] | |
| for model in models: | |
| lines.append(f" - {model}") | |
| lines = ["---"] + lines + ["---"] | |
| # write lines back to the readme | |
| with open(readme_path, "w") as f: | |
| f.write("\n".join(lines)+"\n") | |
| def main(): | |
| api = HfApi() | |
| snapshot_download(repo_id=QUEUE_REPO, revision="main", local_dir=EVAL_REQUESTS_PATH, repo_type="dataset") | |
| eval_requests = [] | |
| for file in glob.glob(f"{EVAL_REQUESTS_PATH}/**/*.json", recursive=True): | |
| with open(file, "r") as f: | |
| eval_requests.append(json.load(f)) | |
| models_evaluated = [ | |
| eval_request['model'] for eval_request in eval_requests | |
| if eval_request["status"] == "FINISHED" | |
| ] | |
| models_evaluated.sort() | |
| print(models_evaluated) | |
| for readme in READMES: | |
| update_readme(readme, models_evaluated) | |
| if __name__ == "__main__": | |
| main() | |