TuRTLe-Leaderboard / config /model_metadata.py
ggcristian's picture
Add Gemini 2.5 Flash (Medium)
73cf928
from dataclasses import dataclass
from typing import Literal, Optional
@dataclass
class ModelMetadata:
url: str # HF model card
params: Optional[float] # in B
model_type: Literal["General", "Coding", "RTL-Specific"]
release: Literal["V1", "V2", "V3"] # release of the leaderboard for which the model was included
model_arch: Literal["Dense", "Reasoning"] # to distinguish between reasoners and non-reasoners
# fmt: off
MODELS = {
"DeepSeek R1-0528": ModelMetadata(
"https://huggingface.co/deepseek-ai/DeepSeek-R1-0528", 685, "General", "V2", "Reasoning"
),
"DeepSeek R1": ModelMetadata(
"https://huggingface.co/deepseek-ai/DeepSeek-R1", 685, "General", "V1", "Reasoning"
),
"Llama 3.1 405B": ModelMetadata(
"https://huggingface.co/RedHatAI/Meta-Llama-3.1-405B-FP8", 406, "General", "V1", "Dense"
),
"Qwen3 236B A22B": ModelMetadata(
"https://huggingface.co/Qwen/Qwen3-235B-A22B", 235, "General", "V2", "Reasoning"
),
"Llama 3.(1-3) 70B": ModelMetadata(
"https://huggingface.co/meta-llama/Llama-3.3-70B-Instruct", 70.6, "General", "V1", "Dense"
),
"Qwen2.5 72B": ModelMetadata(
"https://huggingface.co/Qwen/Qwen2.5-72B-Instruct", 72.7, "General", "V1", "Dense"
),
"QwQ 32B": ModelMetadata(
"https://huggingface.co/Qwen/QwQ-32B", 32.8, "General", "V2", "Reasoning"
),
"Qwen2.5 32B": ModelMetadata(
"https://huggingface.co/Qwen/Qwen2.5-32B", 32.5, "General", "V1", "Dense"
),
"StarChat2 15B v0.1": ModelMetadata(
"https://huggingface.co/HuggingFaceH4/starchat2-15b-v0.1", 16, "General", "V1", "Dense"
),
"DeepSeek R1 Distill Qwen 14B": ModelMetadata(
"https://huggingface.co/deepseek-ai/DeepSeek-R1-Distill-Qwen-14B", 14.8, "General", "V1", "Reasoning"
),
"CodeLlama 70B": ModelMetadata(
"https://huggingface.co/codellama/CodeLlama-70b-hf", 69, "Coding", "V1", "Dense"
),
"QwenCoder 2.5 32B": ModelMetadata(
"https://huggingface.co/Qwen/Qwen2.5-Coder-32B-Instruct", 32.5, "Coding", "V1", "Dense"
),
"DeepSeek Coder 33B": ModelMetadata(
"https://huggingface.co/deepseek-ai/deepseek-coder-33b-instruct", 33.3, "Coding", "V1", "Dense"
),
"QwenCoder 2.5 14B": ModelMetadata(
"https://huggingface.co/Qwen/Qwen2.5-Coder-14B-Instruct", 14.7, "Coding", "V1", "Dense"
),
"DeepCoder 14B": ModelMetadata(
"https://huggingface.co/agentica-org/DeepCoder-14B-Preview", 14.8, "Coding", "V2", "Reasoning"
),
"OpenCoder 8B": ModelMetadata(
"https://huggingface.co/infly/OpenCoder-8B-Instruct", 7.77, "Coding", "V1", "Dense"
),
"SeedCoder 8B": ModelMetadata(
"https://huggingface.co/ByteDance-Seed/Seed-Coder-8B-Instruct", 8.25, "Coding", "V2", "Dense"
),
"SeedCoder 8B Reasoning": ModelMetadata(
"https://huggingface.co/ByteDance-Seed/Seed-Coder-8B-Reasoning-bf16", 8.25, "Coding", "V2", "Reasoning"
),
"QwenCoder 2.5 7B": ModelMetadata(
"https://huggingface.co/Qwen/Qwen2.5-Coder-7B-Instruct", 7.61, "Coding", "V1", "Dense"
),
"DeepSeek Coder 6.7B": ModelMetadata(
"https://huggingface.co/deepseek-ai/deepseek-coder-6.7b-instruct", 6.74, "Coding", "V1", "Dense"
),
"HaVen-CodeQwen": ModelMetadata(
"https://huggingface.co/yangyiyao/HaVen-CodeQwen", 7.25, "RTL-Specific", "V1", "Dense"
),
"CodeV R1 Distill Qwen 7B": ModelMetadata(
"https://huggingface.co/zhuyaoyu/CodeV-R1-Distill-Qwen-7B", 7.62, "RTL-Specific", "V2", "Reasoning"
),
"CodeV-CL-7B": ModelMetadata(
"https://huggingface.co/yang-z/CodeV-CL-7B", 6.74, "RTL-Specific", "V1", "Dense"
),
"CodeV-QW-7B": ModelMetadata(
"https://huggingface.co/yang-z/CodeV-QW-7B", 7.25, "RTL-Specific", "V1", "Dense"
),
"CodeV-DS-6.7B": ModelMetadata(
"https://huggingface.co/yang-z/CodeV-DS-6.7B", 6.74, "RTL-Specific", "V1", "Dense"
),
"RTLCoder Mistral": ModelMetadata(
"https://huggingface.co/ishorn5/RTLCoder-v1.1", 7.24, "RTL-Specific", "V1", "Dense"
),
"RTLCoder DeepSeek": ModelMetadata(
"https://huggingface.co/ishorn5/RTLCoder-Deepseek-v1.1", 6.74, "RTL-Specific", "V1", "Dense"
),
"OriGen": ModelMetadata(
"https://huggingface.co/henryen/OriGen", 6.74, "RTL-Specific", "V1", "Dense"
),
"Qwen3 Coder 480B A35B": ModelMetadata(
"https://huggingface.co/Qwen/Qwen3-Coder-480B-A35B-Instruct", 480, "Coding", "V2", "Dense"
),
"Magistral Small 2506": ModelMetadata(
"https://huggingface.co/mistralai/Magistral-Small-2506", 23.6, "General", "V2", "Reasoning"
),
"gpt-oss-20b": ModelMetadata(
"https://huggingface.co/openai/gpt-oss-20b", 21.5, "General", "V3", "Reasoning"
),
"gpt-oss-120b": ModelMetadata(
"https://huggingface.co/openai/gpt-oss-120b", 120, "General", "V3", "Reasoning"
),
"Seed-OSS-36B": ModelMetadata(
"https://huggingface.co/ByteDance-Seed/Seed-OSS-36B-Instruct", 36.2, "General", "V3", "Reasoning"
),
"Qwen3-8B": ModelMetadata(
"https://huggingface.co/Qwen/Qwen3-8B", 8.2, "General", "V3", "Reasoning"
),
"Hermes-4-14B": ModelMetadata(
"https://huggingface.co/NousResearch/Hermes-4-14B", 14, "General", "V3", "Dense"
),
"Hermes-4-14B-Reasoning": ModelMetadata(
"https://huggingface.co/NousResearch/Hermes-4-14B", 14, "General", "V3", "Reasoning"
),
"Gemini 2.5 Flash (Medium)": ModelMetadata(
"https://huggingface.co/google", None, "General", "V3", "Reasoning"
),
}