Spaces:
Sleeping
Sleeping
| from huggingface_hub import HfApi, Repository | |
| import gradio as gr | |
| import json | |
| def change_tab(query_param): | |
| query_param = query_param.replace("'", '"') | |
| query_param = json.loads(query_param) | |
| if ( | |
| isinstance(query_param, dict) | |
| and "tab" in query_param | |
| and query_param["tab"] == "plot" | |
| ): | |
| return gr.Tabs.update(selected=1) | |
| else: | |
| return gr.Tabs.update(selected=0) | |
| LLM_MODEL_ARCHS = { | |
| # branded ? | |
| "gpt_bigcode": "GPT-BigCode πΈ", | |
| "RefinedWebModel": "Falcon π¦ ", | |
| "RefinedWeb": "Falcon π¦ ", | |
| "baichuan": "Baichuan π", | |
| "bloom": "Bloom πΈ", | |
| "llama": "LLaMA π¦", | |
| # unbranded ? suggest something | |
| "stablelm_alpha": "StableLM-Alpha", | |
| "gpt_neox": "GPT-NeoX", | |
| "gpt_neo": "GPT-Neo", | |
| "codegen": "CodeGen", | |
| "chatglm": "ChatGLM", | |
| "gpt2": "GPT-2", | |
| "gptj": "GPT-J", | |
| "xglm": "XGLM", | |
| "rwkv": "RWKV", | |
| "bart": "BART", | |
| "opt": "OPT", | |
| "mpt": "MPT", | |
| } | |
| def model_hyperlink(link, model_name): | |
| return f'<a target="_blank" href="{link}" style="color: var(--link-text-color); text-decoration: underline;text-decoration-style: dotted;">{model_name}</a>' | |
| def process_model_name(model_name): | |
| link = f"https://huggingface.co/{model_name}" | |
| return model_hyperlink(link, model_name) | |
| def process_model_arch(model_arch): | |
| if model_arch in LLM_MODEL_ARCHS: | |
| return LLM_MODEL_ARCHS[model_arch] | |
| else: | |
| return model_arch | |