Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.  
							See raw diff
- ACE_Plus_finetunes_20250426_171734.csv_finetunes_20250426_171734.csv +496 -0
 - Analog-Diffusion_finetunes_20250424_223250.csv_finetunes_20250424_223250.csv +83 -0
 - CLIP-ViT-bigG-14-laion2B-39B-b160k_finetunes_20250426_171734.csv_finetunes_20250426_171734.csv +247 -0
 - ControlNet-diff-modules_finetunes_20250426_215237.csv_finetunes_20250426_215237.csv +9 -0
 - DeepCoder-14B-Preview_finetunes_20250425_165642.csv_finetunes_20250425_165642.csv +551 -0
 - DeepSeek-Coder-V2-Lite-Instruct_finetunes_20250425_165642.csv_finetunes_20250425_165642.csv +328 -0
 - DeepSeek-R1-Distill-Qwen-32B_finetunes_20250424_193500.csv_finetunes_20250424_193500.csv +0 -0
 - DeepSeek-V2-Chat_finetunes_20250425_165642.csv_finetunes_20250425_165642.csv +376 -0
 - DeepSeek-V3-Base_finetunes_20250424_193500.csv_finetunes_20250424_193500.csv +0 -0
 - DeepSeek-V3_finetunes_20250422_220003.csv +0 -0
 - DepthPro_finetunes_20250425_165642.csv_finetunes_20250425_165642.csv +199 -0
 - Double-Exposure-Diffusion_finetunes_20250427_003734.csv_finetunes_20250427_003734.csv +67 -0
 - F5-Spanish_finetunes_20250426_221535.csv_finetunes_20250426_221535.csv +81 -0
 - Flux-Midjourney-Mix2-LoRA_finetunes_20250425_165642.csv_finetunes_20250425_165642.csv +104 -0
 - GuoFeng3_finetunes_20250425_165642.csv_finetunes_20250425_165642.csv +160 -0
 - Hermes-2-Pro-Llama-3-8B_finetunes_20250426_014322.csv_finetunes_20250426_014322.csv +0 -0
 - Hermes-2-Theta-Llama-3-8B_finetunes_20250426_215237.csv_finetunes_20250426_215237.csv +355 -0
 - Higgs-Llama-3-70B_finetunes_20250426_171734.csv_finetunes_20250426_171734.csv +208 -0
 - HunyuanVideo_comfy_finetunes_20250426_014322.csv_finetunes_20250426_014322.csv +13 -0
 - HunyuanVideo_finetunes_20250424_193500.csv_finetunes_20250424_193500.csv +0 -0
 - Illustration-Diffusion_finetunes_20250427_003734.csv_finetunes_20250427_003734.csv +15 -0
 - InfiniteYou_finetunes_20250425_143346.csv_finetunes_20250425_143346.csv +144 -0
 - Kokoro-82M_finetunes_20250422_220003.csv +450 -0
 - Llama-2-7b-chat-hf_finetunes_20250422_180448.csv +0 -0
 - Llama-2-7b-chat-hf_finetunes_20250426_221535.csv_finetunes_20250426_221535.csv +0 -0
 - Mistral-NeMo-12B-Instruct_finetunes_20250427_003734.csv_finetunes_20250427_003734.csv +70 -0
 - Mistral-Trismegistus-7B_finetunes_20250426_171734.csv_finetunes_20250426_171734.csv +78 -0
 - Molmo-7B-D-0924_finetunes_20250425_143346.csv_finetunes_20250425_143346.csv +211 -0
 - NVLM-D-72B_finetunes_20250425_041137.csv_finetunes_20250425_041137.csv +732 -0
 - Nemotron-4-340B-Instruct_finetunes_20250424_223250.csv_finetunes_20250424_223250.csv +0 -0
 - Nous-Hermes-Llama2-13b_finetunes_20250426_121513.csv_finetunes_20250426_121513.csv +0 -0
 - Phi-3-vision-128k-instruct_finetunes_20250424_223250.csv_finetunes_20250424_223250.csv +367 -0
 - QVQ-72B-Preview_finetunes_20250425_143346.csv_finetunes_20250425_143346.csv +704 -0
 - QwQ-32B-GGUF_finetunes_20250426_221535.csv_finetunes_20250426_221535.csv +125 -0
 - Qwen-14B_finetunes_20250426_212347.csv_finetunes_20250426_212347.csv +1109 -0
 - ReaderLM-v2_finetunes_20250425_143346.csv_finetunes_20250425_143346.csv +305 -0
 - SillyTavern-Presets-Sphiratrioth_finetunes_20250427_003734.csv_finetunes_20250427_003734.csv +171 -0
 - TableGPT2-7B_finetunes_20250426_215237.csv_finetunes_20250426_215237.csv +234 -0
 - TemporalNet_finetunes_20250426_014322.csv_finetunes_20250426_014322.csv +41 -0
 - Wizard-Vicuna-13B-Uncensored_finetunes_20250426_171734.csv_finetunes_20250426_171734.csv +38 -0
 - Wizard-Vicuna-30B-Uncensored-GPTQ_finetunes_20250425_165642.csv_finetunes_20250425_165642.csv +325 -0
 - YOLOv8_finetunes_20250426_212347.csv_finetunes_20250426_212347.csv +0 -0
 - Yi-Coder-9B-Chat_finetunes_20250426_215237.csv_finetunes_20250426_215237.csv +232 -0
 - Ziya-LLaMA-13B-v1_finetunes_20250426_171734.csv_finetunes_20250426_171734.csv +218 -0
 - bce-embedding-base_v1_finetunes_20250426_014322.csv_finetunes_20250426_014322.csv +630 -0
 - bert-base-chinese_finetunes_20250424_223250.csv_finetunes_20250424_223250.csv +0 -0
 - bert-base-portuguese-cased_finetunes_20250427_003734.csv_finetunes_20250427_003734.csv +0 -0
 - cde-small-v1_finetunes_20250426_171734.csv_finetunes_20250426_171734.csv +0 -0
 - control_v1p_sd15_qrcode_monster_finetunes_20250424_193500.csv_finetunes_20250424_193500.csv +59 -0
 - detr-resnet-50_finetunes_20250424_223250.csv_finetunes_20250424_223250.csv +0 -0
 
    	
        ACE_Plus_finetunes_20250426_171734.csv_finetunes_20250426_171734.csv
    ADDED
    
    | 
         @@ -0,0 +1,496 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
         
     | 
| 2 | 
         
            +
            ali-vilab/ACE_Plus,"<p align=""center"">
         
     | 
| 3 | 
         
            +
             
     | 
| 4 | 
         
            +
              <h2 align=""center""><img src=""assets/figures/icon.png"" height=16> ++: Instruction-Based Image Creation and Editing <br> via Context-Aware Content Filling </h2>
         
     | 
| 5 | 
         
            +
             
     | 
| 6 | 
         
            +
              <p align=""center"">
         
     | 
| 7 | 
         
            +
                <a href=""https://arxiv.org/abs/2501.02487""><img src='https://img.shields.io/badge/arXiv-ACE++-red' alt='Paper PDF'></a>
         
     | 
| 8 | 
         
            +
                <a href='https://ali-vilab.github.io/ACE_plus_page/'><img src='https://img.shields.io/badge/Project_Page-ACE++-blue' alt='Project Page'></a>
         
     | 
| 9 | 
         
            +
                <a href='https://github.com/modelscope/scepter'><img src='https://img.shields.io/badge/Scepter-ACE++-green'></a>
         
     | 
| 10 | 
         
            +
                <a href='https://huggingface.co/spaces/scepter-studio/ACE-Plus'><img src='https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Space-orange'></a>
         
     | 
| 11 | 
         
            +
                <a href='https://huggingface.co/ali-vilab/ACE_Plus/tree/main'><img src='https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Model-orange'></a>
         
     | 
| 12 | 
         
            +
                <a href='https://modelscope.cn/models/iic/ACE_Plus/summary'><img src='https://img.shields.io/badge/ModelScope-Model-purple'></a>
         
     | 
| 13 | 
         
            +
                <br>
         
     | 
| 14 | 
         
            +
                <strong>Chaojie Mao</strong>
         
     | 
| 15 | 
         
            +
                ·
         
     | 
| 16 | 
         
            +
                <strong>Jingfeng Zhang</strong>
         
     | 
| 17 | 
         
            +
                ·
         
     | 
| 18 | 
         
            +
                <strong>Yulin Pan</strong>
         
     | 
| 19 | 
         
            +
                ·
         
     | 
| 20 | 
         
            +
                <strong>Zeyinzi Jiang</strong>
         
     | 
| 21 | 
         
            +
                ·
         
     | 
| 22 | 
         
            +
                <strong>Zhen Han</strong>
         
     | 
| 23 | 
         
            +
                <br>
         
     | 
| 24 | 
         
            +
                ·
         
     | 
| 25 | 
         
            +
                <strong>Yu Liu</strong>
         
     | 
| 26 | 
         
            +
                ·
         
     | 
| 27 | 
         
            +
                <strong>Jingren Zhou</strong>
         
     | 
| 28 | 
         
            +
                <br>
         
     | 
| 29 | 
         
            +
                Tongyi Lab, Alibaba Group
         
     | 
| 30 | 
         
            +
              </p>
         
     | 
| 31 | 
         
            +
              <table align=""center"">
         
     | 
| 32 | 
         
            +
                <tr>
         
     | 
| 33 | 
         
            +
                <td>
         
     | 
| 34 | 
         
            +
                  <img src=""assets/ace_method/method++.png"">
         
     | 
| 35 | 
         
            +
                </td>
         
     | 
| 36 | 
         
            +
                </tr>
         
     | 
| 37 | 
         
            +
              </table>
         
     | 
| 38 | 
         
            +
             
     | 
| 39 | 
         
            +
            ## 📚 Introduction
         
     | 
| 40 | 
         
            +
             
     | 
| 41 | 
         
            +
            The original intention behind the design of ACE++ was to unify reference image generation, local editing, 
         
     | 
| 42 | 
         
            +
            and controllable generation into a single framework, and to enable one model to adapt to a wider range of tasks. 
         
     | 
| 43 | 
         
            +
            A more versatile model is often capable of handling more complex tasks. We have already released three LoRA models, 
         
     | 
| 44 | 
         
            +
            focusing on portraits, objects, and regional editing, with the expectation that each would demonstrate strong adaptability 
         
     | 
| 45 | 
         
            +
            within their respective domains. Undoubtedly, this presents certain challenges. 
         
     | 
| 46 | 
         
            +
             
     | 
| 47 | 
         
            +
            We are currently training a fully fine-tuned model, which has now entered the final stage of quality tuning. 
         
     | 
| 48 | 
         
            +
            We are confident it will be released soon. This model will support a broader range of capabilities and is 
         
     | 
| 49 | 
         
            +
            expected to empower community developers to build even more interesting applications.
         
     | 
| 50 | 
         
            +
             
     | 
| 51 | 
         
            +
            ## 📢 News
         
     | 
| 52 | 
         
            +
            - [x] **[2025.01.06]** Release the code and models of ACE++.
         
     | 
| 53 | 
         
            +
            - [x] **[2025.01.07]** Release the demo on [HuggingFace](https://huggingface.co/spaces/scepter-studio/ACE-Plus).
         
     | 
| 54 | 
         
            +
            - [x] **[2025.01.16]** Release the training code for lora.
         
     | 
| 55 | 
         
            +
            - [x] **[2025.02.15]** Collection of workflows in Comfyui.
         
     | 
| 56 | 
         
            +
            - [x] **[2025.02.15]** Release the config for fully fine-tuning.
         
     | 
| 57 | 
         
            +
            - [x] **[2025.03.03]** Release a unified fft model for ACE++, support more image to image tasks.
         
     | 
| 58 | 
         
            +
             
     | 
| 59 | 
         
            +
            ## 🔥The unified fft model for ACE++
         
     | 
| 60 | 
         
            +
            Fully finetuning a composite model with ACE’s data to support various editing and reference generation tasks through an instructive approach.
         
     | 
| 61 | 
         
            +
             
     | 
| 62 | 
         
            +
            We found that there are conflicts between the repainting task and the editing task during the experimental process. This is because the edited image is concatenated with noise in the channel dimension, whereas the repainting task modifies the region using zero pixel values in the VAE's latent space. The editing task uses RGB pixel values in the modified region through the VAE's latent space, which is similar to the distribution of the non-modified part of the repainting task, making it a challenge for the model to distinguish between the two tasks.
         
     | 
| 63 | 
         
            +
             
     | 
| 64 | 
         
            +
            To address this issue, we introduced 64 additional channels in the channel dimension to differentiate between these two tasks. In these channels, we place the latent representation of the pixel space from the edited image, while keeping other channels consistent with the repainting task. This approach significantly enhances the model's adaptability to different tasks.
         
     | 
| 65 | 
         
            +
             
     | 
| 66 | 
         
            +
            One issue with this approach is that it changes the input channel number of the FLUX-Fill-Dev model from 384 to 448. The specific configuration can be referenced in the [configuration file](config/ace_plus_fft.yaml).
         
     | 
| 67 | 
         
            +
             
     | 
| 68 | 
         
            +
            ### Examples
         
     | 
| 69 | 
         
            +
            <table><tbody>
         
     | 
| 70 | 
         
            +
              <tr>
         
     | 
| 71 | 
         
            +
                <td>Input Reference Image</td>
         
     | 
| 72 | 
         
            +
                <td>Input Edit Image</td>
         
     | 
| 73 | 
         
            +
                <td>Input Edit Mask</td>
         
     | 
| 74 | 
         
            +
                <td>Output</td>
         
     | 
| 75 | 
         
            +
                <td>Instruction</td>
         
     | 
| 76 | 
         
            +
                <td>Function</td>
         
     | 
| 77 | 
         
            +
              </tr>
         
     | 
| 78 | 
         
            +
              <tr>
         
     | 
| 79 | 
         
            +
                <td><img src=""./assets/samples/portrait/human_1.jpg"" width=""200""></td>
         
     | 
| 80 | 
         
            +
                <td></td>
         
     | 
| 81 | 
         
            +
                <td></td>
         
     | 
| 82 | 
         
            +
                <td><img src=""./assets/samples/portrait/human_1_fft.webp"" width=""200""></td>
         
     | 
| 83 | 
         
            +
                <td style=""word-wrap:break-word;word-break:break-all;"" width=""250px"";>""Maintain the facial features, A girl is wearing a neat police uniform and sporting a badge. She is smiling with a friendly and confident demeanor. The background is blurred, featuring a cartoon logo.""</td>
         
     | 
| 84 | 
         
            +
                <td style=""word-wrap:break-word;word-break:break-all;"" width=""250px"";>""Character ID Consistency Generation""</td>
         
     | 
| 85 | 
         
            +
              </tr>
         
     | 
| 86 | 
         
            +
              <tr>
         
     | 
| 87 | 
         
            +
                <td><img src=""./assets/samples/subject/subject_1.jpg"" width=""200""></td>
         
     | 
| 88 | 
         
            +
                <td></td>
         
     | 
| 89 | 
         
            +
                <td></td>    
         
     | 
| 90 | 
         
            +
                <td><img src=""./assets/samples/subject/subject_1_fft.webp"" width=""200""></td>
         
     | 
| 91 | 
         
            +
                <td style=""word-wrap:break-word;word-break:break-all;"" width=""250px"";>""Display the logo in a minimalist style printed in white on a matte black ceramic coffee mug, alongside a steaming cup of coffee on a cozy cafe table.""</td>
         
     | 
| 92 | 
         
            +
                <td style=""word-wrap:break-word;word-break:break-all;"" width=""250px"";>""Subject Consistency Generation""</td>
         
     | 
| 93 | 
         
            +
              </tr>
         
     | 
| 94 | 
         
            +
              <tr>
         
     | 
| 95 | 
         
            +
                <td><img src=""./assets/samples/application/photo_editing/1_ref.png"" width=""200""></td>
         
     | 
| 96 | 
         
            +
                <td><img src=""./assets/samples/application/photo_editing/1_2_edit.jpg"" width=""200""></td>
         
     | 
| 97 | 
         
            +
                <td><img src=""./assets/samples/application/photo_editing/1_2_m.webp"" width=""200""></td>
         
     | 
| 98 | 
         
            +
                <td><img src=""./assets/samples/application/photo_editing/1_2_fft.webp"" width=""200""></td>
         
     | 
| 99 | 
         
            +
                <td style=""word-wrap:break-word;word-break:break-all;"" width=""250px"";>""The item is put on the table.""</td>
         
     | 
| 100 | 
         
            +
                <td style=""word-wrap:break-word;word-break:break-all;"" width=""250px"";>""Subject Consistency Editing""</td>
         
     | 
| 101 | 
         
            +
              </tr>
         
     | 
| 102 | 
         
            +
              <tr>
         
     | 
| 103 | 
         
            +
                <td><img src=""./assets/samples/application/logo_paste/1_ref.png"" width=""200""></td>
         
     | 
| 104 | 
         
            +
                <td><img src=""./assets/samples/application/logo_paste/1_1_edit.png"" width=""200""></td>
         
     | 
| 105 | 
         
            +
                <td><img src=""./assets/samples/application/logo_paste/1_1_m.png"" width=""200""></td>
         
     | 
| 106 | 
         
            +
                <td><img src=""./assets/samples/application/logo_paste/1_1_fft.webp"" width=""200""></td>
         
     | 
| 107 | 
         
            +
                <td style=""word-wrap:break-word;word-break:break-all;"" width=""250px"";>""The logo is printed on the headphones.""</td>
         
     | 
| 108 | 
         
            +
                <td style=""word-wrap:break-word;word-break:break-all;"" width=""250px"";>""Subject Consistency Editing""</td>
         
     | 
| 109 | 
         
            +
              </tr>
         
     | 
| 110 | 
         
            +
              <tr>
         
     | 
| 111 | 
         
            +
                <td><img src=""./assets/samples/application/try_on/1_ref.png"" width=""200""></td>
         
     | 
| 112 | 
         
            +
                <td><img src=""./assets/samples/application/try_on/1_1_edit.png"" width=""200""></td>
         
     | 
| 113 | 
         
            +
                <td><img src=""./assets/samples/application/try_on/1_1_m.png"" width=""200""></td>
         
     | 
| 114 | 
         
            +
                <td><img src=""./assets/samples/application/try_on/1_1_fft.webp"" width=""200""></td>
         
     | 
| 115 | 
         
            +
                <td style=""word-wrap:break-word;word-break:break-all;"" width=""250px"";>""The woman dresses this skirt.""</td>
         
     | 
| 116 | 
         
            +
                <td style=""word-wrap:break-word;word-break:break-all;"" width=""250px"";>""Try On""</td>
         
     | 
| 117 | 
         
            +
              </tr>
         
     | 
| 118 | 
         
            +
              <tr>
         
     | 
| 119 | 
         
            +
                <td><img src=""./assets/samples/application/movie_poster/1_ref.png"" width=""200""></td>
         
     | 
| 120 | 
         
            +
                <td><img src=""./assets/samples/portrait/human_1.jpg"" width=""200""></td>
         
     | 
| 121 | 
         
            +
                <td><img src=""./assets/samples/application/movie_poster/1_2_m.webp"" width=""200""></td>
         
     | 
| 122 | 
         
            +
                <td><img src=""./assets/samples/application/movie_poster/1_1_fft.webp"" width=""200""></td>
         
     | 
| 123 | 
         
            +
                <td style=""word-wrap:break-word;word-break:break-all;"" width=""250px"";>""{image}, the man faces the camera.""</td>
         
     | 
| 124 | 
         
            +
                <td style=""word-wrap:break-word;word-break:break-all;"" width=""250px"";>""Face swap""</td>
         
     | 
| 125 | 
         
            +
              </tr>
         
     | 
| 126 | 
         
            +
             <tr>
         
     | 
| 127 | 
         
            +
                <td></td>
         
     | 
| 128 | 
         
            +
                <td><img src=""./assets/samples/application/sr/sr_tiger.png"" width=""200""></td>
         
     | 
| 129 | 
         
            +
                <td><img src=""./assets/samples/application/sr/sr_tiger_m.webp"" width=""200""></td>
         
     | 
| 130 | 
         
            +
                <td><img src=""./assets/samples/application/sr/sr_tiger_fft.webp"" width=""200""></td>
         
     | 
| 131 | 
         
            +
                <td style=""word-wrap:break-word;word-break:break-all;"" width=""250px"";>""{image} features a close-up of a young, furry tiger cub on a rock. The tiger, which appears to be quite young, has distinctive orange, black, and white striped fur, typical of tigers. The cub's eyes have a bright and curious expression, and its ears are perked up, indicating alertness. The cub seems to be in the act of climbing or resting on the rock. The background is a blurred grassland with trees, but the focus is on the cub, which is vividly colored while the rest of the image is in grayscale, drawing attention to the tiger's details. The photo captures a moment in the wild, depicting the charming and tenacious nature of this young tiger, as well as its typical interaction with the environment.""</td>
         
     | 
| 132 | 
         
            +
                <td style=""word-wrap:break-word;word-break:break-all;"" width=""250px"";>""Super-resolution""</td>
         
     | 
| 133 | 
         
            +
              </tr>
         
     | 
| 134 | 
         
            +
              <tr>
         
     | 
| 135 | 
         
            +
                <td></td>
         
     | 
| 136 | 
         
            +
                <td><img src=""./assets/samples/application/photo_editing/1_ref.png"" width=""200""></td>
         
     | 
| 137 | 
         
            +
                <td><img src=""./assets/samples/application/photo_editing/1_1_orm.webp"" width=""200""></td>
         
     | 
| 138 | 
         
            +
                <td><img src=""./assets/samples/application/regional_editing/1_1_fft.webp"" width=""200""></td>
         
     | 
| 139 | 
         
            +
                <td style=""word-wrap:break-word;word-break:break-all;"" width=""250px"";>""a blue hand""</td>
         
     | 
| 140 | 
         
            +
                <td style=""word-wrap:break-word;word-break:break-all;"" width=""250px"";>""Regional Editing""</td>
         
     | 
| 141 | 
         
            +
              </tr>
         
     | 
| 142 | 
         
            +
              <tr>
         
     | 
| 143 | 
         
            +
                <td></td>
         
     | 
| 144 | 
         
            +
                <td><img src=""./assets/samples/application/photo_editing/1_ref.png"" width=""200""></td>
         
     | 
| 145 | 
         
            +
                <td><img src=""./assets/samples/application/photo_editing/1_1_rm.webp"" width=""200""></td>
         
     | 
| 146 | 
         
            +
                <td><img src=""./assets/samples/application/regional_editing/1_2_fft.webp"" width=""200""></td>
         
     | 
| 147 | 
         
            +
                <td style=""word-wrap:break-word;word-break:break-all;"" width=""250px"";>""Mechanical  hands like a robot""</td>
         
     | 
| 148 | 
         
            +
                <td style=""word-wrap:break-word;word-break:break-all;"" width=""250px"";>""Regional Editing""</td>
         
     | 
| 149 | 
         
            +
              </tr>
         
     | 
| 150 | 
         
            +
              <tr>
         
     | 
| 151 | 
         
            +
                <td></td>
         
     | 
| 152 | 
         
            +
                <td><img src=""./assets/samples/control/1_1_recolor.webp"" width=""200""></td>
         
     | 
| 153 | 
         
            +
                <td><img src=""./assets/samples/control/1_1_m.webp"" width=""200""></td>
         
     | 
| 154 | 
         
            +
                <td><img src=""./assets/samples/control/1_1_fft_recolor.webp"" width=""200""></td>
         
     | 
| 155 | 
         
            +
                <td style=""word-wrap:break-word;word-break:break-all;"" width=""250px"";>""{image} Beautiful female portrait, Robot with smooth White transparent carbon shell, rococo detailing, Natural lighting, Highly detailed, Cinematic, 4K.""</td>
         
     | 
| 156 | 
         
            +
                <td style=""word-wrap:break-word;word-break:break-all;"" width=""250px"";>""Recolorizing""</td>
         
     | 
| 157 | 
         
            +
              </tr>
         
     | 
| 158 | 
         
            +
              <tr>
         
     | 
| 159 | 
         
            +
                <td></td>
         
     | 
| 160 | 
         
            +
                <td><img src=""./assets/samples/control/1_1_depth.webp"" width=""200""></td>
         
     | 
| 161 | 
         
            +
                <td><img src=""./assets/samples/control/1_1_m.webp"" width=""200""></td>
         
     | 
| 162 | 
         
            +
                <td><img src=""./assets/samples/control/1_1_fft_depth.webp"" width=""200""></td>
         
     | 
| 163 | 
         
            +
                <td style=""word-wrap:break-word;word-break:break-all;"" width=""250px"";>""{image} Beautiful female portrait, Robot with smooth White transparent carbon shell, rococo detailing, Natural lighting, Highly detailed, Cinematic, 4K.""</td>
         
     | 
| 164 | 
         
            +
                <td style=""word-wrap:break-word;word-break:break-all;"" width=""250px"";>""Depth Guided Generation""</td>
         
     | 
| 165 | 
         
            +
              </tr>
         
     | 
| 166 | 
         
            +
              <tr>
         
     | 
| 167 | 
         
            +
                <td></td>
         
     | 
| 168 | 
         
            +
                <td><img src=""./assets/samples/control/1_1_contourc.webp"" width=""200""></td>
         
     | 
| 169 | 
         
            +
                <td><img src=""./assets/samples/control/1_1_m.webp"" width=""200""></td>
         
     | 
| 170 | 
         
            +
                <td><img src=""./assets/samples/control/1_1_fft_contour.webp"" width=""200""></td>
         
     | 
| 171 | 
         
            +
                <td style=""word-wrap:break-word;word-break:break-all;"" width=""250px"";>""{image} Beautiful female portrait, Robot with smooth White transparent carbon shell, rococo detailing, Natural lighting, Highly detailed, Cinematic, 4K.""</td>
         
     | 
| 172 | 
         
            +
                <td style=""word-wrap:break-word;word-break:break-all;"" width=""250px"";>""Contour Guided Generation""</td>
         
     | 
| 173 | 
         
            +
              </tr>
         
     | 
| 174 | 
         
            +
            </tbody>
         
     | 
| 175 | 
         
            +
            </table>
         
     | 
| 176 | 
         
            +
             
     | 
| 177 | 
         
            +
             
     | 
| 178 | 
         
            +
            ##  Comfyui Workflows in community
         
     | 
| 179 | 
         
            +
            We are deeply grateful to the community developers for building many fascinating applications based on the ACE++ series of models. 
         
     | 
| 180 | 
         
            +
            During this process, we have received valuable feedback, particularly regarding artifacts in generated images and the stability of the results. 
         
     | 
| 181 | 
         
            +
            In response to these issues, many developers have proposed creative solutions, which have greatly inspired us, and we pay tribute to them. 
         
     | 
| 182 | 
         
            +
            At the same time, we will take these concerns into account in our further optimization efforts, carefully evaluating and testing before releasing new models.
         
     | 
| 183 | 
         
            +
             
     | 
| 184 | 
         
            +
            In the table below, we have briefly listed some workflows for everyone to use.
         
     | 
| 185 | 
         
            +
             
     | 
| 186 | 
         
            +
            <table><tbody>
         
     | 
| 187 | 
         
            +
              <tr>
         
     | 
| 188 | 
         
            +
                <td>Workflow</td>
         
     | 
| 189 | 
         
            +
                <td>Author</td>
         
     | 
| 190 | 
         
            +
                <td>Example</td>
         
     | 
| 191 | 
         
            +
              </tr>
         
     | 
| 192 | 
         
            +
              <tr>
         
     | 
| 193 | 
         
            +
                <td><a href=""https://www.runninghub.cn/post/1890052281759571970""> 【飞翔鲸】王炸!FLUX+ACE++换头 </a> </td>
         
     | 
| 194 | 
         
            +
                <td><a href=""https://www.runninghub.cn/user-center/1851827947798740994""> 飞翔鲸 </a></td>
         
     | 
| 195 | 
         
            +
                <td><img src=""./assets/comfyui/feixiangjing_face.png"" height=""200""></td>
         
     | 
| 196 | 
         
            +
              </tr>
         
     | 
| 197 | 
         
            +
              <tr>
         
     | 
| 198 | 
         
            +
                <td><a href=""https://www.runninghub.cn/post/1890014204382916609""> 最新ACE++ Redux 万物迁移 AI模特换装 UP 楚门的AI世界 </a> </td>
         
     | 
| 199 | 
         
            +
                <td><a href=""https://www.runninghub.cn/user-center/1865415077888405506""> 楚门的AI世界 </a></td>
         
     | 
| 200 | 
         
            +
                <td><img src=""./assets/comfyui/chumen_tryon.jpg"" height=""200""></td>
         
     | 
| 201 | 
         
            +
              </tr>
         
     | 
| 202 | 
         
            +
              <tr>
         
     | 
| 203 | 
         
            +
                <td><a href=""https://openart.ai/workflows/t8star/ace-plusfillredux/bgQDNz8SeySMDqn13ZBv""> Ace Plus+Fill+Redux稳定无抽卡换装工作流</a> </td>
         
     | 
| 204 | 
         
            +
                <td><a href=""https://openart.ai/workflows/profile/t8star?sort=latest""> T8star-Aix </a></td>
         
     | 
| 205 | 
         
            +
                <td><img src=""./assets/comfyui/t8_star_tryon.jpg"" height=""200""></td>
         
     | 
| 206 | 
         
            +
              </tr>
         
     | 
| 207 | 
         
            +
              <tr>
         
     | 
| 208 | 
         
            +
                <td><a href=""https://openart.ai/workflows/t8star/ace-plusfillredux/ifIvaWXW9QkLtNV405j7""> Ace Plus+Fill+Redux稳定少抽卡标志工作流</a> </td>
         
     | 
| 209 | 
         
            +
                <td><a href=""https://openart.ai/workflows/profile/t8star?sort=latest""> T8star-Aix </a></td>
         
     | 
| 210 | 
         
            +
                <td><img src=""./assets/comfyui/t8_star_logo.jpg"" height=""200""></td>
         
     | 
| 211 | 
         
            +
              </tr>
         
     | 
| 212 | 
         
            +
              <tr>
         
     | 
| 213 | 
         
            +
                <td><a href=""https://openart.ai/workflows/t8star/ace-plusfillredux/WdwUwGXPLHhnSOlSEfTg""> Ace Plus+Fill+Redux稳定无抽卡换脸工作流</a> </td>
         
     | 
| 214 | 
         
            +
                <td><a href=""https://openart.ai/workflows/profile/t8star?sort=latest""> T8star-Aix </a></td>
         
     | 
| 215 | 
         
            +
                <td><img src=""./assets/comfyui/t8_star_face.jpg"" height=""200""></td>
         
     | 
| 216 | 
         
            +
              </tr>
         
     | 
| 217 | 
         
            +
              <tr>
         
     | 
| 218 | 
         
            +
                <td><a href=""https://openart.ai/workflows/cat_untimely_42/ace-face-swap-in-different-styles/VocvdfQrvDhmKNLEBwJY""> ace++ face swap in different styles </a> </td>
         
     | 
| 219 | 
         
            +
                <td><a href=""https://openart.ai/workflows/profile/cat_untimely_42?sort=latest""> jax </a></td>
         
     | 
| 220 | 
         
            +
                <td><img src=""./assets/comfyui/jax_face_swap.jpg"" height=""200""></td>
         
     | 
| 221 | 
         
            +
              </tr>
         
     | 
| 222 | 
         
            +
              <tr>
         
     | 
| 223 | 
         
            +
                <td><a href=""https://openart.ai/workflows/leeguandong/fllux-ace-subject-without-reference-image/HjYf6Eae2PRGACJWXdrE""> fllux ace++ subject without reference image </a> </td>
         
     | 
| 224 | 
         
            +
                <td><a href=""https://openart.ai/workflows/profile/leeguandong?sort=latest""> leeguandong </a></td>
         
     | 
| 225 | 
         
            +
                <td><img src=""./assets/comfyui/leeguandong_subject.jpg"" height=""200""></td>
         
     | 
| 226 | 
         
            +
              </tr>
         
     | 
| 227 | 
         
            +
              <tr>
         
     | 
| 228 | 
         
            +
                <td><a href=""https://openart.ai/workflows/whale_waterlogged_60/scepter-ace-more-convenient-replacement-of-everything/gjAsh5rGjfC6OEB2AUZv""> Scepter-ACE++ More convenient replacement of everything</a> </td>
         
     | 
| 229 | 
         
            +
                <td><a href=""https://openart.ai/workflows/profile/whale_waterlogged_60?sort=latest""> HaoBeen </a></td>
         
     | 
| 230 | 
         
            +
                <td><img src=""./assets/comfyui/haobeen_ace_plus.jpg"" height=""200""></td>
         
     | 
| 231 | 
         
            +
              </tr>
         
     | 
| 232 | 
         
            +
            </tbody>
         
     | 
| 233 | 
         
            +
            </table>
         
     | 
| 234 | 
         
            +
             
     | 
| 235 | 
         
            +
            Additionally, many bloggers have published tutorials on how to use it, which are listed in the table below.
         
     | 
| 236 | 
         
            +
             
     | 
| 237 | 
         
            +
            <table><tbody>
         
     | 
| 238 | 
         
            +
              <tr>
         
     | 
| 239 | 
         
            +
                <td>Tutorial</td>
         
     | 
| 240 | 
         
            +
              </tr>
         
     | 
| 241 | 
         
            +
              <tr>
         
     | 
| 242 | 
         
            +
                <td><a href=""https://www.youtube.com/watch?v=5OwcxugdWxI""> Best Faceswapper I've Seen. ACE++ in ComfyUI. </a> </td>
         
     | 
| 243 | 
         
            +
              </tr>
         
     | 
| 244 | 
         
            +
              <tr>
         
     | 
| 245 | 
         
            +
                <td><a href=""https://www.youtube.com/watch?v=2fgT35H_tuE&pp=ygUIYWNlIHBsdXM%3D""> ACE ++ In ComfyUI All-round Creator & Editor - More Than Just A Faceswap AI </a> </td>
         
     | 
| 246 | 
         
            +
              </tr>
         
     | 
| 247 | 
         
            +
              <tr>
         
     | 
| 248 | 
         
            +
                <td><a href=""https://www.youtube.com/watch?v=XU376PzgnXc""> Ai绘画进阶140-咦?大家用的都不对?!Ace Plus工作流正确搭建方式及逻辑,参数详解,Flux Fill,Redux联用-T8 Comfyui教程</a> </td>
         
     | 
| 249 | 
         
            +
              </tr>
         
     | 
| 250 | 
         
            +
              <tr>
         
     | 
| 251 | 
         
            +
                <td><a href=""https://www.youtube.com/watch?v=1cbOkN0mTw0""> ace++:告别 Lora 训练,无需pulid,轻松打造专属角色! | No Lora Training, Easily Create Exclusive Characters!</a> </td>
         
     | 
| 252 | 
         
            +
              </tr>
         
     | 
| 253 | 
         
            +
              <tr>
         
     | 
| 254 | 
         
            +
                <td><a href=""https://www.youtube.com/watch?v=0wMoWSTm5Hc""> Ace++ and Flux Fill: Advanced Face Swapping Made Easy in ComfyUI | No Lora Training, Easily Create Exclusive Characters!</a> </td>
         
     | 
| 255 | 
         
            +
              </tr>
         
     | 
| 256 | 
         
            +
              <tr>
         
     | 
| 257 | 
         
            +
                <td><a href=""https://www.youtube.com/watch?v=7GrkIFuRQAc""> ComfyUI - ACE Plus Subject Portrait Lora </a> </td>
         
     | 
| 258 | 
         
            +
              </tr>
         
     | 
| 259 | 
         
            +
              <tr>
         
     | 
| 260 | 
         
            +
                <td><a href=""https://www.bilibili.com/video/BV1HiKpeuE8o/?spm_id_from=333.337.search-card.all.click&vd_source=927630f34c77eee560afd69cfdba3f47""> 🤗AI一致性技术新突破!ACE++技术一致性comfyui工作流🍋 </a> </td>
         
     | 
| 261 | 
         
            +
              </tr>
         
     | 
| 262 | 
         
            +
              <tr>
         
     | 
| 263 | 
         
            +
                <td><a href=""https://www.bilibili.com/video/BV1obN9enEvp/?spm_id_from=333.337.search-card.all.click&vd_source=927630f34c77eee560afd69cfdba3f47""> ComfyUI 第55集 人像换脸 FLUX的FILL模型+ACE LORA </a> </td>
         
     | 
| 264 | 
         
            +
              </tr>
         
     | 
| 265 | 
         
            +
              <tr>
         
     | 
| 266 | 
         
            +
                <td><a href=""https://www.bilibili.com/video/BV1pPN3eBEtr/?spm_id_from=333.337.search-card.all.click&vd_source=927630f34c77eee560afd69cfdba3f47""> 换装变脸贴logo,无所不能的Ace_Plus lora </a> </td>
         
     | 
| 267 | 
         
            +
              </tr>
         
     | 
| 268 | 
         
            +
            </tbody>
         
     | 
| 269 | 
         
            +
            </table>
         
     | 
| 270 | 
         
            +
             
     | 
| 271 | 
         
            +
             
     | 
| 272 | 
         
            +
            ##  🔥 ACE Models
         
     | 
| 273 | 
         
            +
            ACE++ provides a comprehensive toolkit for image editing and generation to support various applications. We encourage developers to choose the appropriate model based on their own scenarios and to fine-tune their models using data from their specific scenarios to achieve more stable results.  
         
     | 
| 274 | 
         
            +
             
     | 
| 275 | 
         
            +
             
     | 
| 276 | 
         
            +
             
     | 
| 277 | 
         
            +
             
     | 
| 278 | 
         
            +
            ### ACE++ Portrait
         
     | 
| 279 | 
         
            +
            Portrait-consistent generation to maintain the consistency of the portrait.
         
     | 
| 280 | 
         
            +
             
     | 
| 281 | 
         
            +
            <table><tbody>
         
     | 
| 282 | 
         
            +
              <tr>
         
     | 
| 283 | 
         
            +
                <td>Tuning Method</td>
         
     | 
| 284 | 
         
            +
                <td>Input</td>
         
     | 
| 285 | 
         
            +
                <td>Output</td>
         
     | 
| 286 | 
         
            +
                <td>Instruction</td>
         
     | 
| 287 | 
         
            +
                <td>Models</td>
         
     | 
| 288 | 
         
            +
              </tr>
         
     | 
| 289 | 
         
            +
              <tr>
         
     | 
| 290 | 
         
            +
                <td>LoRA <br>+ ACE Data</td>
         
     | 
| 291 | 
         
            +
                <td><img src=""./assets/samples/portrait/human_1.jpg"" width=""200""></td>
         
     | 
| 292 | 
         
            +
                <td><img src=""./assets/samples/portrait/human_1_1.jpg"" width=""200""></td>
         
     | 
| 293 | 
         
            +
                <td style=""word-wrap:break-word;word-break:break-all;"" width=""250px"";>""Maintain the facial features. A girl is wearing a neat police uniform and sporting a badge. She is smiling with a friendly and confident demeanor. The background is blurred, featuring a cartoon logo.""</td>
         
     | 
| 294 | 
         
            +
                <td align=""center"" style=""word-wrap:break-word;word-break:break-all;"" width=""200px"";><a href=""https://www.modelscope.cn/models/iic/ACE_Plus/""><img src=""https://img.shields.io/badge/ModelScope-Model-blue"" alt=""ModelScope link""> </a> <a href=""https://huggingface.co/ali-vilab/ACE_Plus/tree/main/portrait/""><img src=""https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Model-yellow"" alt=""HuggingFace link""> </a> </td>
         
     | 
| 295 | 
         
            +
              </tr>
         
     | 
| 296 | 
         
            +
            </tbody>
         
     | 
| 297 | 
         
            +
            </table>
         
     | 
| 298 | 
         
            +
             
     | 
| 299 | 
         
            +
            Models' scepter_path: 
         
     | 
| 300 | 
         
            +
            - **ModelScope:** ms://iic/ACE_Plus@portrait/xxxx.safetensors
         
     | 
| 301 | 
         
            +
            - **HuggingFace:** hf://ali-vilab/ACE_Plus@portrait/xxxx.safetensors
         
     | 
| 302 | 
         
            +
             
     | 
| 303 | 
         
            +
             
     | 
| 304 | 
         
            +
            ### ACE++ Subject
         
     | 
| 305 | 
         
            +
            Subject-driven image generation task to maintain the consistency of a specific subject in different scenes.
         
     | 
| 306 | 
         
            +
            <table><tbody>
         
     | 
| 307 | 
         
            +
              <tr>
         
     | 
| 308 | 
         
            +
                <td>Tuning Method</td>
         
     | 
| 309 | 
         
            +
                <td>Input</td>
         
     | 
| 310 | 
         
            +
                <td>Output</td>
         
     | 
| 311 | 
         
            +
                <td>Instruction</td>
         
     | 
| 312 | 
         
            +
                <td>Models</td>
         
     | 
| 313 | 
         
            +
              </tr>
         
     | 
| 314 | 
         
            +
              <tr>
         
     | 
| 315 | 
         
            +
                <td>LoRA <br>+ ACE Data</td>
         
     | 
| 316 | 
         
            +
                <td><img src=""./assets/samples/subject/subject_1.jpg"" width=""200""></td>
         
     | 
| 317 | 
         
            +
                <td><img src=""./assets/samples/subject/subject_1_1.jpg"" width=""200""></td>
         
     | 
| 318 | 
         
            +
                <td style=""word-wrap:break-word;word-break:break-all;"" width=""250px"";>""Display the logo in a minimalist style printed in white on a matte black ceramic coffee mug, alongside a steaming cup of coffee on a cozy cafe table.""</td>
         
     | 
| 319 | 
         
            +
                <td align=""center"" style=""word-wrap:break-word;word-break:break-all;"" width=""200px"";><a href=""https://www.modelscope.cn/models/iic/ACE_Plus/""><img src=""https://img.shields.io/badge/ModelScope-Model-blue"" alt=""ModelScope link""> </a> <a href=""https://huggingface.co/ali-vilab/ACE_Plus/tree/main/subject/""><img src=""https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Model-yellow"" alt=""HuggingFace link""> </a> </td>
         
     | 
| 320 | 
         
            +
              </tr>
         
     | 
| 321 | 
         
            +
            </tbody>
         
     | 
| 322 | 
         
            +
            </table>
         
     | 
| 323 | 
         
            +
             
     | 
| 324 | 
         
            +
            Models' scepter_path: 
         
     | 
| 325 | 
         
            +
            - **ModelScope:** ms://iic/ACE_Plus@subject/xxxx.safetensors
         
     | 
| 326 | 
         
            +
            - **HuggingFace:** hf://ali-vilab/ACE_Plus@subject/xxxx.safetensors
         
     | 
| 327 | 
         
            +
             
     | 
| 328 | 
         
            +
             
     | 
| 329 | 
         
            +
            ### ACE++ LocalEditing
         
     | 
| 330 | 
         
            +
            Redrawing the mask area of images while maintaining the original structural information of the edited area.
         
     | 
| 331 | 
         
            +
            <table><tbody>
         
     | 
| 332 | 
         
            +
              <tr>
         
     | 
| 333 | 
         
            +
                <td>Tuning Method</td>
         
     | 
| 334 | 
         
            +
                <td>Input</td>
         
     | 
| 335 | 
         
            +
                <td>Output</td>
         
     | 
| 336 | 
         
            +
                <td>Instruction</td>
         
     | 
| 337 | 
         
            +
                <td>Models</td>
         
     | 
| 338 | 
         
            +
              </tr>
         
     | 
| 339 | 
         
            +
              <tr>
         
     | 
| 340 | 
         
            +
                <td>LoRA <br>+ ACE Data</td>
         
     | 
| 341 | 
         
            +
                <td><img src=""./assets/samples/local/local_1.webp"" width=""200""><br><img src=""./assets/samples/local/local_1_m.webp"" width=""200""></td>
         
     | 
| 342 | 
         
            +
                <td><img src=""./assets/samples/local/local_1_1.jpg"" width=""200""></td>
         
     | 
| 343 | 
         
            +
                <td style=""word-wrap:break-word;word-break:break-all;"" width=""250px"";>""By referencing the mask, restore a partial image from the doodle {image} that aligns with the textual explanation: ""1 white old owl"".""</td>
         
     | 
| 344 | 
         
            +
                <td align=""center"" style=""word-wrap:break-word;word-break:break-all;"" width=""200px"";><a href=""https://www.modelscope.cn/models/iic/ACE_Plus/""><img src=""https://img.shields.io/badge/ModelScope-Model-blue"" alt=""ModelScope link""> </a> <a href=""https://huggingface.co/ali-vilab/ACE_Plus/tree/main/local_editing/""><img src=""https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Model-yellow"" alt=""HuggingFace link""> </a> </td>
         
     | 
| 345 | 
         
            +
              </tr>
         
     | 
| 346 | 
         
            +
            </tbody>
         
     | 
| 347 | 
         
            +
            </table>
         
     | 
| 348 | 
         
            +
             
     | 
| 349 | 
         
            +
            Models' scepter_path: 
         
     | 
| 350 | 
         
            +
            - **ModelScope:** ms://iic/ACE_Plus@local_editing/xxxx.safetensors
         
     | 
| 351 | 
         
            +
            - **HuggingFace:** hf://ali-vilab/ACE_Plus@local_editing/xxxx.safetensors
         
     | 
| 352 | 
         
            +
             
     | 
| 353 | 
         
            +
            ##  🔥 Applications
         
     | 
| 354 | 
         
            +
            The ACE++ model supports a wide range of downstream tasks through simple adaptations. Here are some examples, and we look forward to seeing the community explore even more exciting applications utilizing the ACE++ model.
         
     | 
| 355 | 
         
            +
             
     | 
| 356 | 
         
            +
            <table><tbody>
         
     | 
| 357 | 
         
            +
              <tr>
         
     | 
| 358 | 
         
            +
                <th align=""center"" colspan=""1"">Application</th>
         
     | 
| 359 | 
         
            +
                <th align=""center"" colspan=""1"">ACE++ Model</th>
         
     | 
| 360 | 
         
            +
                <th align=""center"" colspan=""5"">Examples</th>
         
     | 
| 361 | 
         
            +
              </tr>
         
     | 
| 362 | 
         
            +
              <tr>
         
     | 
| 363 | 
         
            +
                <td>Try On</td>
         
     | 
| 364 | 
         
            +
                <td>ACE++ Subject</td>
         
     | 
| 365 | 
         
            +
                <td><img src=""./assets/samples/application/try_on/1_ref.png"" width=""200""></td>
         
     | 
| 366 | 
         
            +
                <td><img src=""./assets/samples/application/try_on/1_1_edit.png"" width=""200""></td>
         
     | 
| 367 | 
         
            +
                <td><img src=""./assets/samples/application/try_on/1_1_m.png"" width=""200""></td>
         
     | 
| 368 | 
         
            +
                <td><img src=""./assets/samples/application/try_on/1_1_res.png"" width=""200""></td>
         
     | 
| 369 | 
         
            +
                <td style=""word-wrap:break-word;word-break:break-all;"" width=""100px"";>""The woman dresses this skirt.""</td>
         
     | 
| 370 | 
         
            +
              </tr>
         
     | 
| 371 | 
         
            +
              <tr>
         
     | 
| 372 | 
         
            +
                <td>Logo Paste</td>
         
     | 
| 373 | 
         
            +
                <td>ACE++ Subject</td>
         
     | 
| 374 | 
         
            +
                <td><img src=""./assets/samples/application/logo_paste/1_ref.png"" width=""200""></td>
         
     | 
| 375 | 
         
            +
                <td><img src=""./assets/samples/application/logo_paste/1_1_edit.png"" width=""200""></td>
         
     | 
| 376 | 
         
            +
                <td><img src=""./assets/samples/application/logo_paste/1_1_m.png"" width=""200""></td>
         
     | 
| 377 | 
         
            +
                <td><img src=""./assets/samples/application/logo_paste/1_1_res.webp"" width=""200""></td>
         
     | 
| 378 | 
         
            +
                <td style=""word-wrap:break-word;word-break:break-all;"" width=""100px"";>""The logo is printed on the headphones.""</td>
         
     | 
| 379 | 
         
            +
              </tr>
         
     | 
| 380 | 
         
            +
              <tr>
         
     | 
| 381 | 
         
            +
                <td>Photo Editing</td>
         
     | 
| 382 | 
         
            +
                <td>ACE++ Subject</td>
         
     | 
| 383 | 
         
            +
                <td><img src=""./assets/samples/application/photo_editing/1_ref.png"" width=""200""></td>
         
     | 
| 384 | 
         
            +
                <td><img src=""./assets/samples/application/photo_editing/1_1_edit.png"" width=""200""></td>
         
     | 
| 385 | 
         
            +
                <td><img src=""./assets/samples/application/photo_editing/1_1_m.png"" width=""200""></td>
         
     | 
| 386 | 
         
            +
                <td><img src=""./assets/samples/application/photo_editing/1_1_res.jpg"" width=""200""></td>
         
     | 
| 387 | 
         
            +
                <td style=""word-wrap:break-word;word-break:break-all;"" width=""100px"";>""The item is put on the ground.""</td>
         
     | 
| 388 | 
         
            +
              </tr>
         
     | 
| 389 | 
         
            +
              <tr>
         
     | 
| 390 | 
         
            +
                <td>Movie Poster Editor</td>
         
     | 
| 391 | 
         
            +
                <td>ACE++ Portrait</td>
         
     | 
| 392 | 
         
            +
                <td><img src=""./assets/samples/application/movie_poster/1_ref.png"" width=""200""></td>
         
     | 
| 393 | 
         
            +
                <td><img src=""./assets/samples/application/movie_poster/1_1_edit.png"" width=""200""></td>
         
     | 
| 394 | 
         
            +
                <td><img src=""./assets/samples/application/movie_poster/1_1_m.png"" width=""200""></td>
         
     | 
| 395 | 
         
            +
                <td><img src=""./assets/samples/application/movie_poster/1_1_res.webp"" width=""200""></td>
         
     | 
| 396 | 
         
            +
                <td style=""word-wrap:break-word;word-break:break-all;"" width=""100px"";>""The man is facing the camera and is smiling.""</td>
         
     | 
| 397 | 
         
            +
              </tr>
         
     | 
| 398 | 
         
            +
            </tbody>
         
     | 
| 399 | 
         
            +
            </table>
         
     | 
| 400 | 
         
            +
             
     | 
| 401 | 
         
            +
            ## ⚙️️ Installation
         
     | 
| 402 | 
         
            +
            Download the code using the following command:
         
     | 
| 403 | 
         
            +
            ```bash
         
     | 
| 404 | 
         
            +
            git clone https://github.com/ali-vilab/ACE_plus.git
         
     | 
| 405 | 
         
            +
            ```
         
     | 
| 406 | 
         
            +
             
     | 
| 407 | 
         
            +
            Install the necessary packages with `pip`: 
         
     | 
| 408 | 
         
            +
            ```bash
         
     | 
| 409 | 
         
            +
            cd ACE_plus
         
     | 
| 410 | 
         
            +
            pip install -r requirements.txt
         
     | 
| 411 | 
         
            +
            ```
         
     | 
| 412 | 
         
            +
            ACE++ depends on FLUX.1-Fill-dev as its base model, which you can download from [](https://huggingface.co/black-forest-labs/FLUX.1-Fill-dev). 
         
     | 
| 413 | 
         
            +
            In order to run the inference code or Gradio demo normally, we have defined the relevant environment variables to specify the location of the model. 
         
     | 
| 414 | 
         
            +
            For model preparation, we provide three methods for downloading the model. The summary of relevant settings is as follows.
         
     | 
| 415 | 
         
            +
             
     | 
| 416 | 
         
            +
            |   Model Downloading Method    | Clone to Local Path                                                                                                                                                                                                                                         | Automatic Downloading during Runtime<br>(Setting the Environment Variables using scepter_path in [ACE Models](#-ace-models))                                                                                                       |
         
     | 
| 417 | 
         
            +
            |:-----------------------------:|:------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
         
     | 
| 418 | 
         
            +
            | Environment Variables Setting | <pre><code>export FLUX_FILL_PATH=""path/to/FLUX.1-Fill-dev""<br>export PORTRAIT_MODEL_PATH=""path/to/ACE++ PORTRAIT PATH""<br>export SUBJECT_MODEL_PATH=""path/to/ACE++ SUBJECT PATH""<br>export LOCAL_MODEL_PATH=""path/to/ACE++ LOCAL EDITING PATH""</code></pre> | <pre><code>export FLUX_FILL_PATH=""hf://black-forest-labs/FLUX.1-Fill-dev""<br>export PORTRAIT_MODEL_PATH=""${scepter_path}""<br>export SUBJECT_MODEL_PATH=""${scepter_path}""<br>export LOCAL_MODEL_PATH=""${scepter_path}""</code></pre> |
         
     | 
| 419 | 
         
            +
             
     | 
| 420 | 
         
            +
            ## 🚀 Inference
         
     | 
| 421 | 
         
            +
            Under the condition that the environment variables defined in [Installation](#-installation), users can run examples and test your own samples by executing infer.py. 
         
     | 
| 422 | 
         
            +
            The relevant commands are as follows:
         
     | 
| 423 | 
         
            +
            ```bash
         
     | 
| 424 | 
         
            +
            export FLUX_FILL_PATH=""hf://black-forest-labs/FLUX.1-Fill-dev""
         
     | 
| 425 | 
         
            +
            export PORTRAIT_MODEL_PATH=""ms://iic/ACE_Plus@portrait/comfyui_portrait_lora64.safetensors""                                                                                                                                      
         
     | 
| 426 | 
         
            +
            export SUBJECT_MODEL_PATH=""ms://iic/ACE_Plus@subject/comfyui_subject_lora16.safetensors""                                                                                                                                         
         
     | 
| 427 | 
         
            +
            export LOCAL_MODEL_PATH=""ms://iic/ACE_Plus@local_editing/comfyui_local_lora16.safetensors"" 
         
     | 
| 428 | 
         
            +
            # Use the model from huggingface
         
     | 
| 429 | 
         
            +
            # export PORTRAIT_MODEL_PATH=""hf://ali-vilab/ACE_Plus@portrait/comfyui_portrait_lora64.safetensors""        
         
     | 
| 430 | 
         
            +
            # export SUBJECT_MODEL_PATH=""hf://ali-vilab/ACE_Plus@subject/comfyui_subject_lora16.safetensors""        
         
     | 
| 431 | 
         
            +
            # export LOCAL_MODEL_PATH=""hf://ali-vilab/ACE_Plus@local_editing/comfyui_local_lora16.safetensors"" 
         
     | 
| 432 | 
         
            +
            python infer.py
         
     | 
| 433 | 
         
            +
            ```
         
     | 
| 434 | 
         
            +
             
     | 
| 435 | 
         
            +
            ## 🚀 Train
         
     | 
| 436 | 
         
            +
            We provide training code that allows users to train on their own data. Reference the data in 'data/train.csv' and 'data/eval.csv' to construct the training data and test data, respectively. We use '#;#' to separate fields. 
         
     | 
| 437 | 
         
            +
            The required fields include the following six, with their explanations as follows.
         
     | 
| 438 | 
         
            +
            ```angular2html
         
     | 
| 439 | 
         
            +
            ""edit_image"": represents the input image for the editing task. If it is not an editing task but a reference generation, this field can be left empty.
         
     | 
| 440 | 
         
            +
            ""edit_mask"": represents the input image mask for the editing task, used to specify the editing area. If it is not an editing task but rather for reference generation, this field can be left empty.
         
     | 
| 441 | 
         
            +
            ""ref_image"": represents the input image for the reference image generation task; if it is a pure editing task, this field can be left empty.
         
     | 
| 442 | 
         
            +
            ""target_image"": represents the generated target image and cannot be empty.
         
     | 
| 443 | 
         
            +
            ""prompt"": represents the prompt for the generation task.
         
     | 
| 444 | 
         
            +
            ""data_type"": represents the type of data, which can be 'portrait', 'subject', or 'local'. This field is not used in training phase.
         
     | 
| 445 | 
         
            +
            ```
         
     | 
| 446 | 
         
            +
             
     | 
| 447 | 
         
            +
            All parameters related to training are stored in 'train_config/ace_plus_lora.yaml'. To run the training code, execute the following command.
         
     | 
| 448 | 
         
            +
             
     | 
| 449 | 
         
            +
            ```bash
         
     | 
| 450 | 
         
            +
            export FLUX_FILL_PATH=""hf://black-forest-labs/FLUX.1-Fill-dev""
         
     | 
| 451 | 
         
            +
            python run_train.py  --cfg train_config/ace_plus_lora.yaml
         
     | 
| 452 | 
         
            +
            ```
         
     | 
| 453 | 
         
            +
             
     | 
| 454 | 
         
            +
            The models trained by ACE++ can be found in ./examples/exp_example/xxxx/checkpoints/xxxx/0_SwiftLoRA/comfyui_model.safetensors.
         
     | 
| 455 | 
         
            +
             
     | 
| 456 | 
         
            +
             
     | 
| 457 | 
         
            +
            ## 💻 Demo
         
     | 
| 458 | 
         
            +
            We have built a GUI demo based on Gradio to help users better utilize the ACE++ model. Just execute the following command.
         
     | 
| 459 | 
         
            +
            ```bash
         
     | 
| 460 | 
         
            +
            export FLUX_FILL_PATH=""hf://black-forest-labs/FLUX.1-Fill-dev""
         
     | 
| 461 | 
         
            +
            export PORTRAIT_MODEL_PATH=""ms://iic/ACE_Plus@portrait/comfyui_portrait_lora64.safetensors""                                                                                                                                      
         
     | 
| 462 | 
         
            +
            export SUBJECT_MODEL_PATH=""ms://iic/ACE_Plus@subject/comfyui_subject_lora16.safetensors""                                                                                                                                         
         
     | 
| 463 | 
         
            +
            export LOCAL_MODEL_PATH=""ms://iic/ACE_Plus@local_editing/comfyui_local_lora16.safetensors"" 
         
     | 
| 464 | 
         
            +
            # Use the model from huggingface
         
     | 
| 465 | 
         
            +
            # export PORTRAIT_MODEL_PATH=""hf://ali-vilab/ACE_Plus@portrait/comfyui_portrait_lora64.safetensors""        
         
     | 
| 466 | 
         
            +
            # export SUBJECT_MODEL_PATH=""hf://ali-vilab/ACE_Plus@subject/comfyui_subject_lora16.safetensors""        
         
     | 
| 467 | 
         
            +
            # export LOCAL_MODEL_PATH=""hf://ali-vilab/ACE_Plus@local_editing/comfyui_local_lora16.safetensors"" 
         
     | 
| 468 | 
         
            +
            python demo.py
         
     | 
| 469 | 
         
            +
            ```
         
     | 
| 470 | 
         
            +
             
     | 
| 471 | 
         
            +
            ## 📚 Limitations
         
     | 
| 472 | 
         
            +
            * For certain tasks, such as deleting and adding objects, there are flaws in instruction following. For adding and replacing objects, we recommend trying the repainting method of the local editing model to achieve this.
         
     | 
| 473 | 
         
            +
            * The generated results may contain artifacts, especially when it comes to the generation of hands, which still exhibit distortions.
         
     | 
| 474 | 
         
            +
            * The current version of ACE++ is still in the development stage. We are working on improving the model's performance and adding more features.
         
     | 
| 475 | 
         
            +
             
     | 
| 476 | 
         
            +
            ## 📝 Citation
         
     | 
| 477 | 
         
            +
            ACE++ is a post-training model based on the FLUX.1-dev series from black-forest-labs. Please adhere to its open-source license. The test materials used in ACE++ come from the internet and are intended for academic research and communication purposes. If the original creators feel uncomfortable, please contact us to have them removed. 
         
     | 
| 478 | 
         
            +
             
     | 
| 479 | 
         
            +
            If you use this model in your research, please cite the works of FLUX.1-dev and the following papers:
         
     | 
| 480 | 
         
            +
            ```bibtex
         
     | 
| 481 | 
         
            +
            @article{mao2025ace++,
         
     | 
| 482 | 
         
            +
              title={ACE++: Instruction-Based Image Creation and Editing via Context-Aware Content Filling},
         
     | 
| 483 | 
         
            +
              author={Mao, Chaojie and Zhang, Jingfeng and Pan, Yulin and Jiang, Zeyinzi and Han, Zhen and Liu, Yu and Zhou, Jingren},
         
     | 
| 484 | 
         
            +
              journal={arXiv preprint arXiv:2501.02487},
         
     | 
| 485 | 
         
            +
              year={2025}
         
     | 
| 486 | 
         
            +
            }
         
     | 
| 487 | 
         
            +
            ```
         
     | 
| 488 | 
         
            +
            ```bibtex
         
     | 
| 489 | 
         
            +
            @article{han2024ace,
         
     | 
| 490 | 
         
            +
              title={ACE: All-round Creator and Editor Following Instructions via Diffusion Transformer},
         
     | 
| 491 | 
         
            +
              author={Han, Zhen and Jiang, Zeyinzi and Pan, Yulin and Zhang, Jingfeng and Mao, Chaojie and Xie, Chenwei and Liu, Yu and Zhou, Jingren},
         
     | 
| 492 | 
         
            +
              journal={arXiv preprint arXiv:2410.00086},
         
     | 
| 493 | 
         
            +
              year={2024}
         
     | 
| 494 | 
         
            +
            }
         
     | 
| 495 | 
         
            +
            ```
         
     | 
| 496 | 
         
            +
            ","{""id"": ""ali-vilab/ACE_Plus"", ""author"": ""ali-vilab"", ""sha"": ""b1ba14af105f5cd7c1f7e6a896ce459c1bf334e4"", ""last_modified"": ""2025-03-14 22:14:34+00:00"", ""created_at"": ""2025-01-06 08:54:16+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 166, ""downloads_all_time"": null, ""likes"": 216, ""library_name"": null, ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""arxiv:2501.02487"", ""arxiv:2410.00086"", ""region:us""], ""pipeline_tag"": null, ""mask_token"": null, ""trending_score"": null, ""card_data"": null, ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='ace_plus_fft.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/ace_method/method++.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/comfyui/chumen_tryon.jpg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/comfyui/feixiangjing_face.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/comfyui/haobeen_ace_plus.jpg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/comfyui/jax_face_swap.jpg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/comfyui/leeguandong_subject.jpg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/comfyui/t8_star_face.jpg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/comfyui/t8_star_logo.jpg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/comfyui/t8_star_tryon.jpg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/figures/icon.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/samples/application/logo_paste/1_1_edit.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/samples/application/logo_paste/1_1_fft.webp', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/samples/application/logo_paste/1_1_m.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/samples/application/logo_paste/1_1_res.webp', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/samples/application/logo_paste/1_ref.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/samples/application/movie_poster/1_1_edit.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/samples/application/movie_poster/1_1_fft.webp', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/samples/application/movie_poster/1_1_m.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/samples/application/movie_poster/1_1_res.webp', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/samples/application/movie_poster/1_2_m.webp', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/samples/application/movie_poster/1_ref.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/samples/application/photo_editing/1_1_am.webp', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/samples/application/photo_editing/1_1_edit.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/samples/application/photo_editing/1_1_m.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/samples/application/photo_editing/1_1_orm.webp', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/samples/application/photo_editing/1_1_res.jpg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/samples/application/photo_editing/1_1_rm.webp', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/samples/application/photo_editing/1_2_edit.jpg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/samples/application/photo_editing/1_2_fft.webp', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/samples/application/photo_editing/1_2_m.webp', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/samples/application/photo_editing/1_ref.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/samples/application/regional_editing/1_1_fft.webp', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/samples/application/regional_editing/1_2_fft.webp', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/samples/application/sr/sr_tiger.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/samples/application/sr/sr_tiger_fft.webp', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/samples/application/sr/sr_tiger_m.webp', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/samples/application/try_on/1_1_edit.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/samples/application/try_on/1_1_fft.webp', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/samples/application/try_on/1_1_m.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/samples/application/try_on/1_1_res.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/samples/application/try_on/1_ref.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/samples/control/1_1.webp', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/samples/control/1_1_contourc.webp', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/samples/control/1_1_depth.webp', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/samples/control/1_1_fft_contour.webp', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/samples/control/1_1_fft_depth.webp', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/samples/control/1_1_fft_recolor.webp', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/samples/control/1_1_m.webp', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/samples/control/1_1_recolor.webp', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/samples/local/local_1.webp', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/samples/local/local_1_1.jpg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/samples/local/local_1_m.webp', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/samples/portrait/human_1.jpg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/samples/portrait/human_1_1.jpg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/samples/portrait/human_1_fft.webp', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/samples/subject/subject_1.jpg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/samples/subject/subject_1_1.jpg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/samples/subject/subject_1_fft.webp', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='local_editing/comfyui_local_lora16.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='portrait/comfyui_portrait_lora64.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='subject/comfyui_subject_lora16.safetensors', size=None, blob_id=None, lfs=None)""], ""spaces"": [""scepter-studio/ACE-Plus"", ""r4ruixi/ACE-Plus"", ""WolseyTheCat/fc-simple""], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-03-14 22:14:34+00:00"", ""cardData"": null, ""transformersInfo"": null, ""_id"": ""677b9a385823d21267cc7d10"", ""modelId"": ""ali-vilab/ACE_Plus"", ""usedStorage"": 36640362544}",0,,0,,0,,0,,0,"WolseyTheCat/fc-simple, huggingface/InferenceSupport/discussions/new?title=ali-vilab/ACE_Plus&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bali-vilab%2FACE_Plus%5D(%2Fali-vilab%2FACE_Plus)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, r4ruixi/ACE-Plus, scepter-studio/ACE-Plus",4
         
     | 
    	
        Analog-Diffusion_finetunes_20250424_223250.csv_finetunes_20250424_223250.csv
    ADDED
    
    | 
         @@ -0,0 +1,83 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
         
     | 
| 2 | 
         
            +
            wavymulder/Analog-Diffusion,"---
         
     | 
| 3 | 
         
            +
            language:
         
     | 
| 4 | 
         
            +
            - en
         
     | 
| 5 | 
         
            +
            thumbnail: ""https://huggingface.co/wavymulder/Analog-Diffusion/resolve/main/images/page1.jpg""
         
     | 
| 6 | 
         
            +
            license: creativeml-openrail-m
         
     | 
| 7 | 
         
            +
            tags:
         
     | 
| 8 | 
         
            +
            - stable-diffusion
         
     | 
| 9 | 
         
            +
            - stable-diffusion-diffusers
         
     | 
| 10 | 
         
            +
            - text-to-image
         
     | 
| 11 | 
         
            +
            - safetensors
         
     | 
| 12 | 
         
            +
            - diffusers
         
     | 
| 13 | 
         
            +
            inference: true
         
     | 
| 14 | 
         
            +
            ---
         
     | 
| 15 | 
         
            +
             
     | 
| 16 | 
         
            +
             
     | 
| 17 | 
         
            +
             
     | 
| 18 | 
         
            +
            **Analog Diffusion**
         
     | 
| 19 | 
         
            +
            
         
     | 
| 20 | 
         
            +
            [*CKPT DOWNLOAD LINK*](https://huggingface.co/wavymulder/Analog-Diffusion/resolve/main/analog-diffusion-1.0.ckpt) - This is a dreambooth model trained on a diverse set of analog photographs.
         
     | 
| 21 | 
         
            +
             
     | 
| 22 | 
         
            +
            In your prompt, use the activation token: `analog style`
         
     | 
| 23 | 
         
            +
             
     | 
| 24 | 
         
            +
            You may need to use the words `blur` `haze` `naked` in your negative prompts. My dataset did not include any NSFW material but the model seems to be pretty horny. Note that using `blur` and `haze` in your negative prompt can give a sharper image but also a less pronounced analog film effect.
         
     | 
| 25 | 
         
            +
             
     | 
| 26 | 
         
            +
            Trained from 1.5 with VAE.
         
     | 
| 27 | 
         
            +
             
     | 
| 28 | 
         
            +
            Please see [this document where I share the parameters (prompt, sampler, seed, etc.) used for all example images.](https://huggingface.co/wavymulder/Analog-Diffusion/resolve/main/parameters_used_examples.txt)
         
     | 
| 29 | 
         
            +
             
     | 
| 30 | 
         
            +
            ## Gradio
         
     | 
| 31 | 
         
            +
             
     | 
| 32 | 
         
            +
            We support a [Gradio](https://github.com/gradio-app/gradio) Web UI to run Analog-Diffusion:
         
     | 
| 33 | 
         
            +
             
     | 
| 34 | 
         
            +
            [Open in Spaces](https://huggingface.co/spaces/akhaliq/Analog-Diffusion)
         
     | 
| 35 | 
         
            +
             
     | 
| 36 | 
         
            +
             
     | 
| 37 | 
         
            +
            
         
     | 
| 38 | 
         
            +
            
         
     | 
| 39 | 
         
            +
             
     | 
| 40 | 
         
            +
            Here's a [link to non-cherrypicked batches.](https://imgur.com/a/7iOgTFv)
         
     | 
| 41 | 
         
            +
            ","{""id"": ""wavymulder/Analog-Diffusion"", ""author"": ""wavymulder"", ""sha"": ""211449c273875dedc683fdb5a95d8a0ff9d76484"", ""last_modified"": ""2023-01-27 22:30:51+00:00"", ""created_at"": ""2022-12-10 20:14:02+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 1175, ""downloads_all_time"": null, ""likes"": 877, ""library_name"": ""diffusers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""diffusers"", ""stable-diffusion"", ""stable-diffusion-diffusers"", ""text-to-image"", ""safetensors"", ""en"", ""license:creativeml-openrail-m"", ""autotrain_compatible"", ""endpoints_compatible"", ""diffusers:StableDiffusionPipeline"", ""region:us""], ""pipeline_tag"": ""text-to-image"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""language:\n- en\nlicense: creativeml-openrail-m\ntags:\n- stable-diffusion\n- stable-diffusion-diffusers\n- text-to-image\n- safetensors\n- diffusers\nthumbnail: https://huggingface.co/wavymulder/Analog-Diffusion/resolve/main/images/page1.jpg\ninference: true"", ""widget_data"": null, ""model_index"": null, ""config"": {""diffusers"": {""_class_name"": ""StableDiffusionPipeline""}}, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='analog-diffusion-1.0.ckpt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='analog-diffusion-1.0.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='feature_extractor/preprocessor_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='images/page1.jpg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='images/page2.jpg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='images/page3.jpg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model_index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='parameters_used_examples.txt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='safety_checker/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='safety_checker/pytorch_model.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='scheduler/scheduler_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='text_encoder/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='text_encoder/pytorch_model.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer/merges.txt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer/special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer/tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer/vocab.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='unet/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='unet/diffusion_pytorch_model.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vae/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vae/diffusion_pytorch_model.bin', size=None, blob_id=None, lfs=None)""], ""spaces"": [""anzorq/finetuned_diffusion"", ""radames/Real-Time-Latent-Consistency-Model"", ""Yntec/ToyWorld"", ""radames/Real-Time-Latent-Consistency-Model-Text-To-Image"", ""Yntec/PrintingPress"", ""radames/Real-Time-SD-Turbo"", ""radames/real-time-pix2pix-turbo"", ""latent-consistency/Real-Time-LCM-ControlNet-Lora-SD1.5"", ""Nymbo/image_gen_supaqueue"", ""akhaliq/Analog-Diffusion"", ""ennov8ion/3dart-Models"", ""phenixrhyder/NSFW-ToyWorld"", ""Yntec/blitz_diffusion"", ""sanaweb/text-to-image"", ""BilalSardar/Text-To-image-AllModels"", ""AdamOswald1/finetuned_diffusion"", ""Vedits/6x_Image_diffusion"", ""John6666/Diffusion80XX4sg"", ""ennov8ion/comicbook-models"", ""John6666/PrintingPress4"", ""Nickhilearla135095/maximum_diffusion"", ""SUPERSHANKY/Finetuned_Diffusion_Max"", ""PeepDaSlan9/B2BMGMT_Diffusion60XX"", ""latent-consistency/Real-Time-LCM-Text-to-Image-Lora-SD1.5"", ""Joeythemonster/Text-To-image-AllModels"", ""Daniela-C/6x_Image_diffusion"", ""Dao3/Text-To-image-AllModels"", ""phenixrhyder/PrintingPress"", ""John6666/hfd_test_nostopbutton"", ""mindtube/Diffusion50XX"", ""TheKitten/Fast-Images-Creature"", ""Nymbo/Diffusion80XX4sg"", ""AI-ML-API-tutorials/ai-sticker-maker"", ""kaleidoskop-hug/PrintingPress"", ""ennov8ion/stablediffusion-models"", ""JoPmt/Multi-SD_Cntrl_Cny_Pse_Img2Img"", ""JoPmt/Img2Img_SD_Control_Canny_Pose_Multi"", ""ReiPlush64/finetuned_diffusion"", ""John6666/ToyWorld4"", ""Omnibus-archive/Diffusion-Flood"", ""grzegorz2047/fast_diffusion"", ""Alfasign/dIFFU"", ""Nymbo/PrintingPress"", ""Rifd/Sdallmodels"", ""John6666/Diffusion80XX4g"", ""NativeAngels/HuggingfaceDiffusion"", ""ennov8ion/Scifi-Models"", ""ennov8ion/semirealistic-models"", ""Jackflack09/finetuned_diffusion2"", ""ennov8ion/FantasyArt-Models"", ""ennov8ion/dreamlike-models"", ""noes14155/img_All_models"", ""Nymbo/Game-Creator"", ""ennov8ion/500models"", ""AnimeStudio/anime-models"", ""John6666/Diffusion80XX4"", ""K00B404/HuggingfaceDiffusion_custom"", ""John6666/blitz_diffusion4"", ""John6666/blitz_diffusion_builtin"", ""VincentZB/Stable-Diffusion-ControlNet-WebUI"", ""Lyra121/finetuned_diffusion"", ""hylee/finetuned_diffusion"", ""Thafx/sdAnalog"", ""Dao3/Top-20-Models"", ""RhythmRemix14/PrintingPressDx"", ""Omnibus/game-test"", ""sohoso/PrintingPress"", ""NativeAngels/ToyWorld"", ""eeyorestoned/maximum_diffusion"", ""rzzgate/Stable-Diffusion-ControlNet-WebUI"", ""Harshveer/Finetuned_Diffusion_Max"", ""gato001k1/maximum_diffusion0k"", ""mindtube/maximum_multiplier_places"", ""Binettebob22/fast_diffusion2"", ""pikto/Elite-Scifi-Models"", ""Rooni/finetuned_diffusion"", ""Omnibus/Social-Card-Maker-DL"", ""PixelistStudio/3dart-Models"", ""devmiles/zexxiai"", ""JoPmt/Txt2Img_SD_LORA_MixMatch_Multi"", ""Nymbo/Diffusion60XX"", ""Nymbo/Social-Card-Maker-DL"", ""multimodalart/Real-Time-Latent-SDXL-Lightning"", ""TheKitten/Images"", ""ennov8ion/anime-models"", ""jordonpeter01/Diffusion70"", ""abdibrokhim/ai-sticker-maker"", ""Joeythemonster/finetuned_diffusion"", ""prikmmo9/finetuned_diffusion"", ""entresan1/wavymulder-Analog-Diffusion"", ""Pfs2021Funny/The-CG-Diffusion"", ""cloudwp/Top-20-Diffusion"", ""georgesX/finetuned_diffusion"", ""kerria/finetuned_diffusion"", ""leonel1122/Analog-Diffusion"", ""ygtrfed/pp-web-ui"", ""Karumoon/test007"", ""ivanmeyer/Finetuned_Diffusion_Max"", ""ennov8ion/Landscapes-models"", ""Shad0ws/ImageModelTestEnvironment""], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2023-01-27 22:30:51+00:00"", ""cardData"": ""language:\n- en\nlicense: creativeml-openrail-m\ntags:\n- stable-diffusion\n- stable-diffusion-diffusers\n- text-to-image\n- safetensors\n- diffusers\nthumbnail: https://huggingface.co/wavymulder/Analog-Diffusion/resolve/main/images/page1.jpg\ninference: true"", ""transformersInfo"": null, ""_id"": ""6394e88afaede12c7472e711"", ""modelId"": ""wavymulder/Analog-Diffusion"", ""usedStorage"": 24484136199}",0,https://huggingface.co/elanoqi/wb-ghibli-700,1,,0,,0,"https://huggingface.co/Yntec/Paramount, https://huggingface.co/Yntec/Analog, https://huggingface.co/Yntec/AnalogWorld",3,"AI-ML-API-tutorials/ai-sticker-maker, Daniela-C/6x_Image_diffusion, Joeythemonster/Text-To-image-AllModels, John6666/Diffusion80XX4sg, John6666/PrintingPress4, John6666/hfd_test_nostopbutton, Nymbo/image_gen_supaqueue, PeepDaSlan9/B2BMGMT_Diffusion60XX, Yntec/PrintingPress, Yntec/ToyWorld, Yntec/blitz_diffusion, akhaliq/Analog-Diffusion, huggingface/InferenceSupport/discussions/new?title=wavymulder/Analog-Diffusion&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bwavymulder%2FAnalog-Diffusion%5D(%2Fwavymulder%2FAnalog-Diffusion)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, phenixrhyder/NSFW-ToyWorld",14
         
     | 
| 42 | 
         
            +
            elanoqi/wb-ghibli-700,"---
         
     | 
| 43 | 
         
            +
            license: creativeml-openrail-m
         
     | 
| 44 | 
         
            +
            library_name: diffusers
         
     | 
| 45 | 
         
            +
            tags:
         
     | 
| 46 | 
         
            +
            - text-to-image
         
     | 
| 47 | 
         
            +
            - dreambooth
         
     | 
| 48 | 
         
            +
            - stable-diffusion
         
     | 
| 49 | 
         
            +
            - stable-diffusion-diffusers
         
     | 
| 50 | 
         
            +
            inference: true
         
     | 
| 51 | 
         
            +
            base_model: wavymulder/Analog-Diffusion
         
     | 
| 52 | 
         
            +
            instance_prompt: a photo of jksj man
         
     | 
| 53 | 
         
            +
            ---
         
     | 
| 54 | 
         
            +
             
     | 
| 55 | 
         
            +
            <!-- This model card has been generated automatically according to the information the training script had access to. You
         
     | 
| 56 | 
         
            +
            should probably proofread and complete it, then remove this comment. -->
         
     | 
| 57 | 
         
            +
             
     | 
| 58 | 
         
            +
             
     | 
| 59 | 
         
            +
            # DreamBooth - elanoqi/wb-ghibli-700
         
     | 
| 60 | 
         
            +
             
     | 
| 61 | 
         
            +
            This is a dreambooth model derived from wavymulder/Analog-Diffusion. The weights were trained on a photo of jksj man using [DreamBooth](https://dreambooth.github.io/).
         
     | 
| 62 | 
         
            +
            You can find some example images in the following. 
         
     | 
| 63 | 
         
            +
             
     | 
| 64 | 
         
            +
             
     | 
| 65 | 
         
            +
             
     | 
| 66 | 
         
            +
            DreamBooth for the text encoder was enabled: False.
         
     | 
| 67 | 
         
            +
             
     | 
| 68 | 
         
            +
             
     | 
| 69 | 
         
            +
            ## Intended uses & limitations
         
     | 
| 70 | 
         
            +
             
     | 
| 71 | 
         
            +
            #### How to use
         
     | 
| 72 | 
         
            +
             
     | 
| 73 | 
         
            +
            ```python
         
     | 
| 74 | 
         
            +
            # TODO: add an example code snippet for running this diffusion pipeline
         
     | 
| 75 | 
         
            +
            ```
         
     | 
| 76 | 
         
            +
             
     | 
| 77 | 
         
            +
            #### Limitations and bias
         
     | 
| 78 | 
         
            +
             
     | 
| 79 | 
         
            +
            [TODO: provide examples of latent issues and potential remediations]
         
     | 
| 80 | 
         
            +
             
     | 
| 81 | 
         
            +
            ## Training details
         
     | 
| 82 | 
         
            +
             
     | 
| 83 | 
         
            +
            [TODO: describe the data used to train the model]","{""id"": ""elanoqi/wb-ghibli-700"", ""author"": ""elanoqi"", ""sha"": ""f73079b8ad2c9b22b44022f00bfecc73ac6c44d9"", ""last_modified"": ""2024-03-02 11:06:54+00:00"", ""created_at"": ""2024-03-02 10:42:07+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 4, ""downloads_all_time"": null, ""likes"": 1, ""library_name"": ""diffusers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""diffusers"", ""tensorboard"", ""safetensors"", ""text-to-image"", ""dreambooth"", ""stable-diffusion"", ""stable-diffusion-diffusers"", ""base_model:wavymulder/Analog-Diffusion"", ""base_model:finetune:wavymulder/Analog-Diffusion"", ""license:creativeml-openrail-m"", ""autotrain_compatible"", ""endpoints_compatible"", ""diffusers:StableDiffusionPipeline"", ""region:us""], ""pipeline_tag"": ""text-to-image"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: wavymulder/Analog-Diffusion\nlibrary_name: diffusers\nlicense: creativeml-openrail-m\ntags:\n- text-to-image\n- dreambooth\n- stable-diffusion\n- stable-diffusion-diffusers\ninference: true\ninstance_prompt: a photo of jksj man"", ""widget_data"": null, ""model_index"": null, ""config"": {""diffusers"": {""_class_name"": ""StableDiffusionPipeline""}}, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='checkpoint-500/optimizer.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='checkpoint-500/random_states_0.pkl', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='checkpoint-500/scheduler.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='checkpoint-500/unet/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='checkpoint-500/unet/diffusion_pytorch_model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='feature_extractor/preprocessor_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='logs/dreambooth/1709376202.312467/events.out.tfevents.1709376202.9bfc643e889d.3689.1', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='logs/dreambooth/1709376202.314325/hparams.yml', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='logs/dreambooth/events.out.tfevents.1709376202.9bfc643e889d.3689.0', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model_index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='safety_checker/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='safety_checker/model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='scheduler/scheduler_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='text_encoder/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='text_encoder/model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer/merges.txt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer/special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer/tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer/vocab.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='unet/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='unet/diffusion_pytorch_model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vae/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vae/diffusion_pytorch_model.safetensors', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-03-02 11:06:54+00:00"", ""cardData"": ""base_model: wavymulder/Analog-Diffusion\nlibrary_name: diffusers\nlicense: creativeml-openrail-m\ntags:\n- text-to-image\n- dreambooth\n- stable-diffusion\n- stable-diffusion-diffusers\ninference: true\ninstance_prompt: a photo of jksj man"", ""transformersInfo"": null, ""_id"": ""65e3027fd26b426e3ecf7b9a"", ""modelId"": ""elanoqi/wb-ghibli-700"", ""usedStorage"": 10644410357}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=elanoqi/wb-ghibli-700&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Belanoqi%2Fwb-ghibli-700%5D(%2Felanoqi%2Fwb-ghibli-700)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
         
     | 
    	
        CLIP-ViT-bigG-14-laion2B-39B-b160k_finetunes_20250426_171734.csv_finetunes_20250426_171734.csv
    ADDED
    
    | 
         @@ -0,0 +1,247 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
         
     | 
| 2 | 
         
            +
            laion/CLIP-ViT-bigG-14-laion2B-39B-b160k,"---
         
     | 
| 3 | 
         
            +
            license: mit
         
     | 
| 4 | 
         
            +
            widget:
         
     | 
| 5 | 
         
            +
            - src: >-
         
     | 
| 6 | 
         
            +
                https://huggingface.co/datasets/mishig/sample_images/resolve/main/cat-dog-music.png
         
     | 
| 7 | 
         
            +
              candidate_labels: playing music, playing sports
         
     | 
| 8 | 
         
            +
              example_title: Cat & Dog
         
     | 
| 9 | 
         
            +
            library_name: open_clip
         
     | 
| 10 | 
         
            +
            pipeline_tag: zero-shot-image-classification
         
     | 
| 11 | 
         
            +
            ---
         
     | 
| 12 | 
         
            +
            # Model Card for CLIP ViT-bigG/14 - LAION-2B
         
     | 
| 13 | 
         
            +
             
     | 
| 14 | 
         
            +
            #  Table of Contents
         
     | 
| 15 | 
         
            +
             
     | 
| 16 | 
         
            +
            1. [Model Details](#model-details)
         
     | 
| 17 | 
         
            +
            2. [Uses](#uses)
         
     | 
| 18 | 
         
            +
            3. [Training Details](#training-details)
         
     | 
| 19 | 
         
            +
            4. [Evaluation](#evaluation)
         
     | 
| 20 | 
         
            +
            5. [Acknowledgements](#acknowledgements)
         
     | 
| 21 | 
         
            +
            6. [Citation](#citation)
         
     | 
| 22 | 
         
            +
            7. [How To Get Started With the Model](#how-to-get-started-with-the-model)
         
     | 
| 23 | 
         
            +
             
     | 
| 24 | 
         
            +
             
     | 
| 25 | 
         
            +
            # Model Details
         
     | 
| 26 | 
         
            +
             
     | 
| 27 | 
         
            +
            ## Model Description
         
     | 
| 28 | 
         
            +
             
     | 
| 29 | 
         
            +
            A CLIP ViT-bigG/14 model trained with the LAION-2B English subset of LAION-5B (https://laion.ai/blog/laion-5b/) using OpenCLIP (https://github.com/mlfoundations/open_clip).
         
     | 
| 30 | 
         
            +
             
     | 
| 31 | 
         
            +
            Model training done by Mitchell Wortsman on the [stability.ai](https://stability.ai/) cluster.
         
     | 
| 32 | 
         
            +
             
     | 
| 33 | 
         
            +
            The license for this model is MIT.
         
     | 
| 34 | 
         
            +
             
     | 
| 35 | 
         
            +
            # Uses
         
     | 
| 36 | 
         
            +
             
     | 
| 37 | 
         
            +
            As per the original [OpenAI CLIP model card](https://github.com/openai/CLIP/blob/d50d76daa670286dd6cacf3bcd80b5e4823fc8e1/model-card.md), this model is intended as a research output for research communities. We hope that this model will enable researchers to better understand and explore zero-shot, arbitrary image classification. We also hope it can be used for interdisciplinary studies of the potential impact of such model. 
         
     | 
| 38 | 
         
            +
             
     | 
| 39 | 
         
            +
            The OpenAI CLIP paper includes a discussion of potential downstream impacts to provide an example for this sort of analysis. Additionally, the LAION-5B blog (https://laion.ai/blog/laion-5b/) and upcoming paper include additional discussion as it relates specifically to the training dataset. 
         
     | 
| 40 | 
         
            +
             
     | 
| 41 | 
         
            +
            ## Direct Use
         
     | 
| 42 | 
         
            +
             
     | 
| 43 | 
         
            +
            Zero-shot image classification, image and text retrieval, among others.
         
     | 
| 44 | 
         
            +
             
     | 
| 45 | 
         
            +
            ## Downstream Use
         
     | 
| 46 | 
         
            +
             
     | 
| 47 | 
         
            +
            Image classification and other image task fine-tuning, linear probe image classification, image generation guiding and conditioning, among others.
         
     | 
| 48 | 
         
            +
             
     | 
| 49 | 
         
            +
            ## Out-of-Scope Use
         
     | 
| 50 | 
         
            +
             
     | 
| 51 | 
         
            +
            As per the OpenAI models,
         
     | 
| 52 | 
         
            +
             
     | 
| 53 | 
         
            +
            **Any** deployed use case of the model - whether commercial or not - is currently out of scope. Non-deployed use cases such as image search in a constrained environment, are also not recommended unless there is thorough in-domain testing of the model with a specific, fixed class taxonomy. This is because our safety assessment demonstrated a high need for task specific testing especially given the variability of CLIP’s performance with different class taxonomies. This makes untested and unconstrained deployment of the model in any use case currently potentially harmful. 
         
     | 
| 54 | 
         
            +
             
     | 
| 55 | 
         
            +
            Certain use cases which would fall under the domain of surveillance and facial recognition are always out-of-scope regardless of performance of the model. This is because the use of artificial intelligence for tasks such as these can be premature currently given the lack of testing norms and checks to ensure its fair use.
         
     | 
| 56 | 
         
            +
             
     | 
| 57 | 
         
            +
            Since the model has not been purposefully trained in or evaluated on any languages other than English, its use should be limited to English language use cases.
         
     | 
| 58 | 
         
            +
             
     | 
| 59 | 
         
            +
            Further the above notice, the LAION-5B dataset used in training of these models has additional considerations, see below.
         
     | 
| 60 | 
         
            +
             
     | 
| 61 | 
         
            +
            # Training Details
         
     | 
| 62 | 
         
            +
             
     | 
| 63 | 
         
            +
            ## Training Data
         
     | 
| 64 | 
         
            +
             
     | 
| 65 | 
         
            +
            This model was trained with the 2 Billion sample English subset of LAION-5B (https://laion.ai/blog/laion-5b/). 
         
     | 
| 66 | 
         
            +
            Fine-tuning was also partially done on LAION-A, a 900M subset of LAION-2B filtered with aesthetic V2 4.5+ and phash deduplicated.
         
     | 
| 67 | 
         
            +
             
     | 
| 68 | 
         
            +
            **IMPORTANT NOTE:** The motivation behind dataset creation is to democratize research and experimentation around large-scale multi-modal model training and handling of uncurated, large-scale datasets crawled from publically available internet. Our recommendation is therefore to use the dataset for research purposes. Be aware that this large-scale dataset is uncurated. Keep in mind that the uncurated nature of the dataset means that collected links may lead to strongly discomforting and disturbing content for a human viewer. Therefore, please use the demo links with caution and at your own risk. It is possible to extract a “safe” subset by filtering out samples based on the safety tags (using a customized trained NSFW classifier that we built). While this strongly reduces the chance for encountering potentially harmful content when viewing, we cannot entirely exclude the possibility for harmful content being still present in safe mode, so that the warning holds also there. We think that providing the dataset openly to broad research and other interested communities will allow for transparent investigation of benefits that come along with training large-scale models as well as pitfalls and dangers that may stay unreported or unnoticed when working with closed large datasets that remain restricted to a small community. Providing our dataset openly, we however do not recommend using it for creating ready-to-go industrial products, as the basic research about general properties and safety of such large-scale models, which we would like to encourage with this release, is still in progress.
         
     | 
| 69 | 
         
            +
             
     | 
| 70 | 
         
            +
            ## Training Procedure
         
     | 
| 71 | 
         
            +
             
     | 
| 72 | 
         
            +
            The training procedure will soon be discussed by a blog post on laion.ai.
         
     | 
| 73 | 
         
            +
             
     | 
| 74 | 
         
            +
            # Evaluation
         
     | 
| 75 | 
         
            +
             
     | 
| 76 | 
         
            +
            Evaluation done with code in the [LAION CLIP Benchmark suite](https://github.com/LAION-AI/CLIP_benchmark).
         
     | 
| 77 | 
         
            +
             
     | 
| 78 | 
         
            +
            ## Testing Data, Factors & Metrics
         
     | 
| 79 | 
         
            +
             
     | 
| 80 | 
         
            +
            ### Testing Data
         
     | 
| 81 | 
         
            +
             
     | 
| 82 | 
         
            +
            The testing is performed with VTAB+ (A combination of VTAB (https://arxiv.org/abs/1910.04867) w/ additional robustness datasets) for classification and COCO and Flickr for retrieval.
         
     | 
| 83 | 
         
            +
             
     | 
| 84 | 
         
            +
            **TODO** - more detail
         
     | 
| 85 | 
         
            +
             
     | 
| 86 | 
         
            +
            ## Results
         
     | 
| 87 | 
         
            +
             
     | 
| 88 | 
         
            +
            The model achieves a 80.1 zero-shot top-1 accuracy on ImageNet-1k.
         
     | 
| 89 | 
         
            +
             
     | 
| 90 | 
         
            +
            An initial round of benchmarks have been performed on a wider range of datasets, and will soon be visible at https://github.com/LAION-AI/CLIP_benchmark/blob/main/benchmark/results.ipynb
         
     | 
| 91 | 
         
            +
             
     | 
| 92 | 
         
            +
            **TODO** - create table for just this model's metrics.
         
     | 
| 93 | 
         
            +
             
     | 
| 94 | 
         
            +
            # Acknowledgements
         
     | 
| 95 | 
         
            +
             
     | 
| 96 | 
         
            +
            Acknowledging [stability.ai](https://stability.ai/) for the compute used to train this model.
         
     | 
| 97 | 
         
            +
             
     | 
| 98 | 
         
            +
            # Citation
         
     | 
| 99 | 
         
            +
             
     | 
| 100 | 
         
            +
            **BibTeX:**
         
     | 
| 101 | 
         
            +
             
     | 
| 102 | 
         
            +
            LAION-5B
         
     | 
| 103 | 
         
            +
            ```bibtex
         
     | 
| 104 | 
         
            +
            @inproceedings{schuhmann2022laionb,
         
     | 
| 105 | 
         
            +
              title={{LAION}-5B: An open large-scale dataset for training next generation image-text models},
         
     | 
| 106 | 
         
            +
              author={Christoph Schuhmann and
         
     | 
| 107 | 
         
            +
                      Romain Beaumont and
         
     | 
| 108 | 
         
            +
                      Richard Vencu and
         
     | 
| 109 | 
         
            +
                      Cade W Gordon and
         
     | 
| 110 | 
         
            +
                      Ross Wightman and
         
     | 
| 111 | 
         
            +
                      Mehdi Cherti and
         
     | 
| 112 | 
         
            +
                      Theo Coombes and
         
     | 
| 113 | 
         
            +
                      Aarush Katta and
         
     | 
| 114 | 
         
            +
                      Clayton Mullis and
         
     | 
| 115 | 
         
            +
                      Mitchell Wortsman and
         
     | 
| 116 | 
         
            +
                      Patrick Schramowski and
         
     | 
| 117 | 
         
            +
                      Srivatsa R Kundurthy and
         
     | 
| 118 | 
         
            +
                      Katherine Crowson and
         
     | 
| 119 | 
         
            +
                      Ludwig Schmidt and
         
     | 
| 120 | 
         
            +
                      Robert Kaczmarczyk and
         
     | 
| 121 | 
         
            +
                      Jenia Jitsev},
         
     | 
| 122 | 
         
            +
              booktitle={Thirty-sixth Conference on Neural Information Processing Systems Datasets and Benchmarks Track},
         
     | 
| 123 | 
         
            +
              year={2022},
         
     | 
| 124 | 
         
            +
              url={https://openreview.net/forum?id=M3Y74vmsMcY}
         
     | 
| 125 | 
         
            +
            }
         
     | 
| 126 | 
         
            +
            ```
         
     | 
| 127 | 
         
            +
             
     | 
| 128 | 
         
            +
            OpenAI CLIP paper
         
     | 
| 129 | 
         
            +
            ```
         
     | 
| 130 | 
         
            +
            @inproceedings{Radford2021LearningTV,
         
     | 
| 131 | 
         
            +
              title={Learning Transferable Visual Models From Natural Language Supervision},
         
     | 
| 132 | 
         
            +
              author={Alec Radford and Jong Wook Kim and Chris Hallacy and A. Ramesh and Gabriel Goh and Sandhini Agarwal and Girish Sastry and Amanda Askell and Pamela Mishkin and Jack Clark and Gretchen Krueger and Ilya Sutskever},
         
     | 
| 133 | 
         
            +
              booktitle={ICML},
         
     | 
| 134 | 
         
            +
              year={2021}
         
     | 
| 135 | 
         
            +
            }
         
     | 
| 136 | 
         
            +
            ```
         
     | 
| 137 | 
         
            +
             
     | 
| 138 | 
         
            +
            OpenCLIP software
         
     | 
| 139 | 
         
            +
            ```
         
     | 
| 140 | 
         
            +
            @software{ilharco_gabriel_2021_5143773,
         
     | 
| 141 | 
         
            +
              author       = {Ilharco, Gabriel and
         
     | 
| 142 | 
         
            +
                              Wortsman, Mitchell and
         
     | 
| 143 | 
         
            +
                              Wightman, Ross and
         
     | 
| 144 | 
         
            +
                              Gordon, Cade and
         
     | 
| 145 | 
         
            +
                              Carlini, Nicholas and
         
     | 
| 146 | 
         
            +
                              Taori, Rohan and
         
     | 
| 147 | 
         
            +
                              Dave, Achal and
         
     | 
| 148 | 
         
            +
                              Shankar, Vaishaal and
         
     | 
| 149 | 
         
            +
                              Namkoong, Hongseok and
         
     | 
| 150 | 
         
            +
                              Miller, John and
         
     | 
| 151 | 
         
            +
                              Hajishirzi, Hannaneh and
         
     | 
| 152 | 
         
            +
                              Farhadi, Ali and
         
     | 
| 153 | 
         
            +
                              Schmidt, Ludwig},
         
     | 
| 154 | 
         
            +
              title        = {OpenCLIP},
         
     | 
| 155 | 
         
            +
              month        = jul,
         
     | 
| 156 | 
         
            +
              year         = 2021,
         
     | 
| 157 | 
         
            +
              note         = {If you use this software, please cite it as below.},
         
     | 
| 158 | 
         
            +
              publisher    = {Zenodo},
         
     | 
| 159 | 
         
            +
              version      = {0.1},
         
     | 
| 160 | 
         
            +
              doi          = {10.5281/zenodo.5143773},
         
     | 
| 161 | 
         
            +
              url          = {https://doi.org/10.5281/zenodo.5143773}
         
     | 
| 162 | 
         
            +
            }
         
     | 
| 163 | 
         
            +
            ```
         
     | 
| 164 | 
         
            +
             
     | 
| 165 | 
         
            +
            Scaling OpenCLIP paper
         
     | 
| 166 | 
         
            +
            ```
         
     | 
| 167 | 
         
            +
            @article{cherti2022reproducible,
         
     | 
| 168 | 
         
            +
              title={Reproducible scaling laws for contrastive language-image learning},
         
     | 
| 169 | 
         
            +
              author={Cherti, Mehdi and Beaumont, Romain and Wightman, Ross and Wortsman, Mitchell and Ilharco, Gabriel and Gordon, Cade and Schuhmann, Christoph and Schmidt, Ludwig and Jitsev, Jenia},
         
     | 
| 170 | 
         
            +
              journal={arXiv preprint arXiv:2212.07143},
         
     | 
| 171 | 
         
            +
              year={2022}
         
     | 
| 172 | 
         
            +
            }
         
     | 
| 173 | 
         
            +
            ```
         
     | 
| 174 | 
         
            +
             
     | 
| 175 | 
         
            +
            # How to Get Started with the Model
         
     | 
| 176 | 
         
            +
             
     | 
| 177 | 
         
            +
            Use the code below to get started with the model.
         
     | 
| 178 | 
         
            +
             
     | 
| 179 | 
         
            +
            ** TODO ** - Hugging Face transformers, OpenCLIP, and timm getting started snippets","{""id"": ""laion/CLIP-ViT-bigG-14-laion2B-39B-b160k"", ""author"": ""laion"", ""sha"": ""743c27bd53dfe508a0ade0f50698f99b39d03bec"", ""last_modified"": ""2025-01-22 04:37:32+00:00"", ""created_at"": ""2023-01-23 07:12:35+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 566860, ""downloads_all_time"": null, ""likes"": 261, ""library_name"": ""open_clip"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""open_clip"", ""pytorch"", ""safetensors"", ""clip"", ""zero-shot-image-classification"", ""arxiv:1910.04867"", ""arxiv:2212.07143"", ""license:mit"", ""region:us""], ""pipeline_tag"": ""zero-shot-image-classification"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""library_name: open_clip\nlicense: mit\npipeline_tag: zero-shot-image-classification\nwidget:\n- src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/cat-dog-music.png\n  candidate_labels: playing music, playing sports\n  example_title: Cat & Dog"", ""widget_data"": [{""src"": ""https://huggingface.co/datasets/mishig/sample_images/resolve/main/cat-dog-music.png"", ""candidate_labels"": ""playing music, playing sports"", ""example_title"": ""Cat & Dog""}], ""model_index"": null, ""config"": {""architectures"": [""CLIPModel""], ""model_type"": ""clip"", ""tokenizer_config"": {""unk_token"": {""content"": ""<|endoftext|>"", ""single_word"": false, ""lstrip"": false, ""rstrip"": false, ""normalized"": true, ""__type"": ""AddedToken""}, ""bos_token"": {""content"": ""<|startoftext|>"", ""single_word"": false, ""lstrip"": false, ""rstrip"": false, ""normalized"": true, ""__type"": ""AddedToken""}, ""eos_token"": {""content"": ""<|endoftext|>"", ""single_word"": false, ""lstrip"": false, ""rstrip"": false, ""normalized"": true, ""__type"": ""AddedToken""}, ""pad_token"": ""<|endoftext|>""}}, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='merges.txt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='open_clip_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='open_clip_model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='open_clip_pytorch_model.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='preprocessor_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00001-of-00002.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00001-of-00002.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00002-of-00002.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00002-of-00002.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model.bin.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vocab.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [""yanze/PuLID-FLUX"", ""yanze/PuLID"", ""okaris/omni-zero"", ""radames/Enhance-This-DemoFusion-SDXL"", ""fffiloni/InstantIR"", ""multimodalart/cosxl"", ""BoyuanJiang/FitDiT"", ""latentexplorers/latentnavigation-flux"", ""roubaofeipi/UltraPixel-demo"", ""primecai/diffusion-self-distillation"", ""Fabrice-TIERCELIN/SUPIR"", ""dbaranchuk/Switti-1024"", ""exx8/differential-diffusion"", ""Kwai-Kolors/Kolors-FaceID"", ""JOY-Huang/InstantIR"", ""openfree/ultpixgen"", ""InstantX/SD35-IP-Adapter"", ""TIGER-Lab/AnyV2V"", ""tight-inversion/tight-inversion-pulid-demo"", ""fantaxy/flx-pulid"", ""fantos/Panorama"", ""HarborYuan/ovsam"", ""ginipick/time-machine"", ""OpenShape/openshape-demo"", ""briaai/BRIA-Background-Generation"", ""fffiloni/AccDiffusion"", ""naver-ai/VisualStylePrompting"", ""briaai/BRIA-Eraser-API"", ""Kwai-Kolors/Kolors-Inpainting"", ""Vchitect/Vchitect-2.0"", ""dbaranchuk/Switti"", ""okaris/omni-zero-couples"", ""ginipick/AccDiffusion"", ""naver-ai/VisualStylePrompting_Controlnet"", ""zideliu/styledrop"", ""aiqtech/kofaceid"", ""ginigen/panorama-images"", ""weepiess2383/CFG-Zero-Star"", ""elismasilva/mod-control-tile-upscaler-sdxl"", ""LanguageBind/LanguageBind"", ""editing-images/ai-halloween-photobooth"", ""briaai/BRIA-2.3-ControlNet-Inpainting"", ""elismasilva/mixture-of-diffusers-sdxl-tiling"", ""Eugeoter/ControlNeXt"", ""Deddy/PuLid-FLX-GPU"", ""LXT/OMG_Seg"", ""ECLIPSE-Community/ECLIPSE-Kandinsky-v2.2"", ""ECLIPSE-Community/lambda-eclipse-personalized-t2i"", ""sofianhw/PuLID-FLUX"", ""Upscaler/SUPIR"", ""bghira/FluxBooru-CFG3.5"", ""yiren98/MakeAnything"", ""ameerazam08/Stable-Cascade-Super-Resolution"", ""TypeGPT/Image-Gen-Pro"", ""Caoyunkang/AdaCLIP"", ""MoonQiu/FreeScale"", ""omer11a/bounded-attention"", ""qiuzhi2046/PuLID-FLUX"", ""hamacojr/SAM-CAT-Seg"", ""hamacojr/CAT-Seg"", ""SunderAli17/ToonMage"", ""RED-AIGC/InstantID-XS"", ""HuiZhang0812/CreatiLayout"", ""nroggendorff/latentnavigation-flux-uncensored"", ""cocktailpeanut/InstantIR"", ""xswu/HPSv2"", ""JoPmt/ConsisID"", ""shilinxu/rap-sam"", ""rp-yu/apiprompting"", ""ginipick/Time-Stream"", ""qiuzhi2046/PuLID"", ""SunderAli17/ToonMagev2"", ""smartfeed/image2image"", ""ginipick/AccuVision-Diffusion"", ""felipevictal/FitDiT007"", ""yanranxiaoxi/booxel"", ""cbensimon/omni-zero-couples"", ""dubiousx/SUPIR"", ""RobinsAIWorld/Instant-Image-Restoration"", ""SunderAli17/Blind_Image_Restoration"", ""rphrp1985/PuLID-FLUX"", ""yeq6x/MakeAnything"", ""VIDraft/tight-inversion-pulid-demo"", ""cocktailpeanut/cosxl"", ""TheNetherWatcher/Vid2Vid-using-Text-prompt"", ""SunderAli17/SAKFaceTransform"", ""MartsoBodziu1994/SUPIR"", ""innoai/Switti-ZeroGPU"", ""PiperMy/PuLID-FLUX"", ""Nightwing25/Enhance-This-DemoFusion-SDXL"", ""Apier/Enhance-This-DemoFusion-SDXL"", ""supernlp/Enhance-This-DemoFusion-SDXL"", ""johnygoddard/Enhance-This-DemoFusion-SDXL"", ""pablodawson/ldm3d-inpainting"", ""jbilcke-hf/ai-tube-model-pulid"", ""Shad0ws/PuLID"", ""ennov8ion/Animagine-XL"", ""tttoaster/SEED-X-17B"", ""Honglee003/BRIA-Background-Generation4"", ""smartfeed/turbo_fm""], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-01-22 04:37:32+00:00"", ""cardData"": ""library_name: open_clip\nlicense: mit\npipeline_tag: zero-shot-image-classification\nwidget:\n- src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/cat-dog-music.png\n  candidate_labels: playing music, playing sports\n  example_title: Cat & Dog"", ""transformersInfo"": null, ""_id"": ""63ce3363de7bcd2114efcb4d"", ""modelId"": ""laion/CLIP-ViT-bigG-14-laion2B-39B-b160k"", ""usedStorage"": 81268004900}",0,https://huggingface.co/aimagelab/ReT-OpenCLIP-ViT-G-14,1,,0,,0,,0,"BoyuanJiang/FitDiT, Fabrice-TIERCELIN/SUPIR, JOY-Huang/InstantIR, dbaranchuk/Switti-1024, exx8/differential-diffusion, fffiloni/InstantIR, huggingface/InferenceSupport/discussions/new?title=laion/CLIP-ViT-bigG-14-laion2B-39B-b160k&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Blaion%2FCLIP-ViT-bigG-14-laion2B-39B-b160k%5D(%2Flaion%2FCLIP-ViT-bigG-14-laion2B-39B-b160k)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, multimodalart/cosxl, okaris/omni-zero, primecai/diffusion-self-distillation, roubaofeipi/UltraPixel-demo, yanze/PuLID, yanze/PuLID-FLUX",13
         
     | 
| 180 | 
         
            +
            aimagelab/ReT-OpenCLIP-ViT-G-14,"---
         
     | 
| 181 | 
         
            +
            library_name: transformers
         
     | 
| 182 | 
         
            +
            license: apache-2.0
         
     | 
| 183 | 
         
            +
            base_model:
         
     | 
| 184 | 
         
            +
            - laion/CLIP-ViT-bigG-14-laion2B-39B-b160k
         
     | 
| 185 | 
         
            +
            datasets:
         
     | 
| 186 | 
         
            +
            - aimagelab/ReT-M2KR
         
     | 
| 187 | 
         
            +
            pipeline_tag: visual-document-retrieval
         
     | 
| 188 | 
         
            +
            ---
         
     | 
| 189 | 
         
            +
             
     | 
| 190 | 
         
            +
            # Model Card for Model ID
         
     | 
| 191 | 
         
            +
             
     | 
| 192 | 
         
            +
             
     | 
| 193 | 
         
            +
            ReT is a novel approach for multimodal document retrieval that supports both multimodal queries and documents. Unlike existing methods that only use features from the final layer of vision-and-language backbones, ReT employs a Transformer-based recurrent cell to leverage multi-level representations from different layers of both visual and textual backbones. The model features sigmoidal gates inspired by LSTM design that selectively control information flow between layers and modalities. ReT processes multimodal queries and documents independently, producing sets of latent tokens used for fine-grained late interaction similarity computation. ReT is designed to process images and text in both queries and documents. To this end, it has been trained and evaluated on a custom version of the challenging [M2KR](https://arxiv.org/abs/2402.08327) benchmark, with the following modifications: MSMARCO has been excluded as it does not contain images, and the documents from OVEN, InfoSeek, E-VQA, and OKVQA have been enriched with the addition of images.
         
     | 
| 194 | 
         
            +
             
     | 
| 195 | 
         
            +
            ### Model Sources
         
     | 
| 196 | 
         
            +
             
     | 
| 197 | 
         
            +
            <!-- Provide the basic links for the model. -->
         
     | 
| 198 | 
         
            +
             
     | 
| 199 | 
         
            +
            - **Repository:** https://github.com/aimagelab/ReT
         
     | 
| 200 | 
         
            +
            - **Paper:** [Recurrence-Enhanced Vision-and-Language Transformers for Robust Multimodal Document Retrieval](https://www.arxiv.org/abs/2503.01980) (CVPR 2025)
         
     | 
| 201 | 
         
            +
             
     | 
| 202 | 
         
            +
             
     | 
| 203 | 
         
            +
            ### Use with Transformers
         
     | 
| 204 | 
         
            +
            Follow the instructions on the [repository](https://github.com/aimagelab/ReT) to install the required environment.
         
     | 
| 205 | 
         
            +
            ```python
         
     | 
| 206 | 
         
            +
            from src.models import RetrieverModel, RetModel
         
     | 
| 207 | 
         
            +
            import torch
         
     | 
| 208 | 
         
            +
             
     | 
| 209 | 
         
            +
            device = 'cuda' if torch.cuda.is_available() else 'cpu'
         
     | 
| 210 | 
         
            +
            retriever = RetrieverModel.from_pretrained('aimagelab/ReT-OpenCLIP-ViT-G-14', device_map=device)
         
     | 
| 211 | 
         
            +
             
     | 
| 212 | 
         
            +
            # QUERY
         
     | 
| 213 | 
         
            +
            ret: RetModel = retriever.get_query_model()
         
     | 
| 214 | 
         
            +
            ret.init_tokenizer_and_image_processor()
         
     | 
| 215 | 
         
            +
            q_txt = ""Retrieve documents that provide an answer to the question alongside the image: What is the content of the image?""
         
     | 
| 216 | 
         
            +
            q_img = 'assets/model.png'
         
     | 
| 217 | 
         
            +
             
     | 
| 218 | 
         
            +
            ret_feats = ret.get_ret_features([[q_txt, q_img]])
         
     | 
| 219 | 
         
            +
            print(ret_feats.shape)  # torch.Size([1, 32, 128])
         
     | 
| 220 | 
         
            +
             
     | 
| 221 | 
         
            +
             
     | 
| 222 | 
         
            +
            # PASSAGE
         
     | 
| 223 | 
         
            +
            ret: RetModel = retriever.get_passage_model()
         
     | 
| 224 | 
         
            +
            ret.init_tokenizer_and_image_processor()
         
     | 
| 225 | 
         
            +
             
     | 
| 226 | 
         
            +
            p_txt = """"""The image shows a diagram of what appears to be a neural network architecture using a fine-grained loss approach for multimodal learning.
         
     | 
| 227 | 
         
            +
            The architecture has two parallel processing streams labeled ""ReTQ"" (left side, in purple) and ""ReTD"" (right side, in blue).
         
     | 
| 228 | 
         
            +
            Each side has: ...""""""
         
     | 
| 229 | 
         
            +
            p_img = ''
         
     | 
| 230 | 
         
            +
             
     | 
| 231 | 
         
            +
            ret_feats = ret.get_ret_features([[p_txt, p_img]])
         
     | 
| 232 | 
         
            +
            print(ret_feats.shape)  # torch.Size([1, 32, 128])
         
     | 
| 233 | 
         
            +
            ```
         
     | 
| 234 | 
         
            +
             
     | 
| 235 | 
         
            +
            ## Citation
         
     | 
| 236 | 
         
            +
             
     | 
| 237 | 
         
            +
            <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
         
     | 
| 238 | 
         
            +
             
     | 
| 239 | 
         
            +
            **BibTeX:**
         
     | 
| 240 | 
         
            +
            ```
         
     | 
| 241 | 
         
            +
            @inproceedings{caffagni2025recurrence,
         
     | 
| 242 | 
         
            +
              title={{Recurrence-Enhanced Vision-and-Language Transformers for Robust Multimodal Document Retrieval}},
         
     | 
| 243 | 
         
            +
              author={Caffagni, Davide and Sarto, Sara and Cornia, Marcella and Baraldi, Lorenzo and Cucchiara, Rita},
         
     | 
| 244 | 
         
            +
              booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition},
         
     | 
| 245 | 
         
            +
              year={2025}
         
     | 
| 246 | 
         
            +
            }
         
     | 
| 247 | 
         
            +
            ```","{""id"": ""aimagelab/ReT-OpenCLIP-ViT-G-14"", ""author"": ""aimagelab"", ""sha"": ""6ee9ec3b9c931980017a02965da2ef596dd7dd3f"", ""last_modified"": ""2025-04-08 13:30:53+00:00"", ""created_at"": ""2025-03-25 11:40:50+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 77, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""retriever"", ""visual-document-retrieval"", ""dataset:aimagelab/ReT-M2KR"", ""arxiv:2402.08327"", ""arxiv:2503.01980"", ""base_model:laion/CLIP-ViT-bigG-14-laion2B-39B-b160k"", ""base_model:finetune:laion/CLIP-ViT-bigG-14-laion2B-39B-b160k"", ""license:apache-2.0"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""visual-document-retrieval"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- laion/CLIP-ViT-bigG-14-laion2B-39B-b160k\ndatasets:\n- aimagelab/ReT-M2KR\nlibrary_name: transformers\nlicense: apache-2.0\npipeline_tag: visual-document-retrieval"", ""widget_data"": null, ""model_index"": null, ""config"": {""architectures"": [""RetrieverModel""], ""model_type"": ""retriever""}, ""transformers_info"": {""auto_model"": ""AutoModel"", ""custom_class"": null, ""pipeline_tag"": null, ""processor"": null}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00001-of-00003.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00002-of-00003.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00003-of-00003.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors.index.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": {""parameters"": {""F32"": 2683105280}, ""total"": 2683105280}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-04-08 13:30:53+00:00"", ""cardData"": ""base_model:\n- laion/CLIP-ViT-bigG-14-laion2B-39B-b160k\ndatasets:\n- aimagelab/ReT-M2KR\nlibrary_name: transformers\nlicense: apache-2.0\npipeline_tag: visual-document-retrieval"", ""transformersInfo"": {""auto_model"": ""AutoModel"", ""custom_class"": null, ""pipeline_tag"": null, ""processor"": null}, ""_id"": ""67e29642c007fa6bfd45876a"", ""modelId"": ""aimagelab/ReT-OpenCLIP-ViT-G-14"", ""usedStorage"": 10732632904}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=aimagelab/ReT-OpenCLIP-ViT-G-14&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Baimagelab%2FReT-OpenCLIP-ViT-G-14%5D(%2Faimagelab%2FReT-OpenCLIP-ViT-G-14)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
         
     | 
    	
        ControlNet-diff-modules_finetunes_20250426_215237.csv_finetunes_20250426_215237.csv
    ADDED
    
    | 
         @@ -0,0 +1,9 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
         
     | 
| 2 | 
         
            +
            kohya-ss/ControlNet-diff-modules,"Pre-made difference files extracted from original ControlNet models for transfer control.
         
     | 
| 3 | 
         
            +
             
     | 
| 4 | 
         
            +
            Can be used with https://github.com/Mikubill/sd-webui-controlnet
         
     | 
| 5 | 
         
            +
             
     | 
| 6 | 
         
            +
            Original models: https://huggingface.co/lllyasviel/ControlNet
         
     | 
| 7 | 
         
            +
             
     | 
| 8 | 
         
            +
             
     | 
| 9 | 
         
            +
            ","{""id"": ""kohya-ss/ControlNet-diff-modules"", ""author"": ""kohya-ss"", ""sha"": ""8e5919277dfa8cba7f9643ac13eb393175299d43"", ""last_modified"": ""2023-02-16 11:10:52+00:00"", ""created_at"": ""2023-02-16 10:39:46+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 198, ""library_name"": null, ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""region:us""], ""pipeline_tag"": null, ""mask_token"": null, ""trending_score"": null, ""card_data"": null, ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='diff_control_sd15_canny_fp16.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='diff_control_sd15_depth_fp16.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='diff_control_sd15_hed_fp16.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='diff_control_sd15_mlsd_fp16.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='diff_control_sd15_normal_fp16.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='diff_control_sd15_openpose_fp16.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='diff_control_sd15_scribble_fp16.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='diff_control_sd15_seg_fp16.safetensors', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2023-02-16 11:10:52+00:00"", ""cardData"": null, ""transformersInfo"": null, ""_id"": ""63ee07f2049599a8df88e68c"", ""modelId"": ""kohya-ss/ControlNet-diff-modules"", ""usedStorage"": 5780809416}",0,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=kohya-ss/ControlNet-diff-modules&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bkohya-ss%2FControlNet-diff-modules%5D(%2Fkohya-ss%2FControlNet-diff-modules)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
         
     | 
    	
        DeepCoder-14B-Preview_finetunes_20250425_165642.csv_finetunes_20250425_165642.csv
    ADDED
    
    | 
         @@ -0,0 +1,551 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
         
     | 
| 2 | 
         
            +
            agentica-org/DeepCoder-14B-Preview,"---
         
     | 
| 3 | 
         
            +
            license: mit
         
     | 
| 4 | 
         
            +
            library_name: transformers
         
     | 
| 5 | 
         
            +
            datasets:
         
     | 
| 6 | 
         
            +
            - PrimeIntellect/verifiable-coding-problems
         
     | 
| 7 | 
         
            +
            - likaixin/TACO-verified
         
     | 
| 8 | 
         
            +
            - livecodebench/code_generation_lite
         
     | 
| 9 | 
         
            +
            language:
         
     | 
| 10 | 
         
            +
            - en
         
     | 
| 11 | 
         
            +
            base_model:
         
     | 
| 12 | 
         
            +
            - deepseek-ai/DeepSeek-R1-Distill-Qwen-14B
         
     | 
| 13 | 
         
            +
            pipeline_tag: text-generation
         
     | 
| 14 | 
         
            +
            ---
         
     | 
| 15 | 
         
            +
             
     | 
| 16 | 
         
            +
            <div align=""center"">
         
     | 
| 17 | 
         
            +
            <span style=""font-family: default; font-size: 1.5em;"">DeepCoder-14B-Preview</span>
         
     | 
| 18 | 
         
            +
            <div>
         
     | 
| 19 | 
         
            +
            🚀 Democratizing Reinforcement Learning for LLMs (RLLM) 🌟
         
     | 
| 20 | 
         
            +
            </div>
         
     | 
| 21 | 
         
            +
            </div>
         
     | 
| 22 | 
         
            +
            <br>
         
     | 
| 23 | 
         
            +
            <div align=""center"" style=""line-height: 1;"">
         
     | 
| 24 | 
         
            +
              <a href=""https://github.com/agentica-project/rllm"" style=""margin: 2px;"">
         
     | 
| 25 | 
         
            +
                <img alt=""Code"" src=""https://img.shields.io/badge/RLLM-000000?style=for-the-badge&logo=github&logoColor=000&logoColor=white"" style=""display: inline-block; vertical-align: middle;""/>
         
     | 
| 26 | 
         
            +
              </a>
         
     | 
| 27 | 
         
            +
              <a href=""https://pretty-radio-b75.notion.site/DeepCoder-A-Fully-Open-Source-14B-Coder-at-O3-mini-Level-1cf81902c14680b3bee5eb349a512a51"" target=""_blank"" style=""margin: 2px;"">
         
     | 
| 28 | 
         
            +
                <img alt=""Blog"" src=""https://img.shields.io/badge/Notion-%23000000.svg?style=for-the-badge&logo=notion&logoColor=white"" style=""display: inline-block; vertical-align: middle;""/>
         
     | 
| 29 | 
         
            +
              </a>
         
     | 
| 30 | 
         
            +
              <a href=""https://x.com/Agentica_"" style=""margin: 2px;"">
         
     | 
| 31 | 
         
            +
                <img alt=""X.ai"" src=""https://img.shields.io/badge/Agentica-white?style=for-the-badge&logo=X&logoColor=000&color=000&labelColor=white"" style=""display: inline-block; vertical-align: middle;""/>
         
     | 
| 32 | 
         
            +
              </a>
         
     | 
| 33 | 
         
            +
              <a href=""https://huggingface.co/agentica-org"" style=""margin: 2px;"">
         
     | 
| 34 | 
         
            +
                <img alt=""Hugging Face"" src=""https://img.shields.io/badge/Agentica-fcd022?style=for-the-badge&logo=huggingface&logoColor=000&labelColor"" style=""display: inline-block; vertical-align: middle;""/>
         
     | 
| 35 | 
         
            +
              </a>
         
     | 
| 36 | 
         
            +
              <a href=""https://www.together.ai"" style=""margin: 2px;"">
         
     | 
| 37 | 
         
            +
                <img alt=""Together AI"" src=""https://img.shields.io/badge/-Together_AI%20-white?style=for-the-badge&logo=data%3Aimage%2Fpng%3Bbase64%2CiVBORw0KGgoAAAANSUhEUgAAAUAAAAFACAMAAAD6TlWYAAAC7lBMVEUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAPb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8AAAAPb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADIBDt6AAAA%2BnRSTlMAAiQEKgcdKQwiHBMUzrtSUEmjhmZGH96yv8n1ey7nL3y1U%2FZfCaIo1WFg1NrcsHYrA2%2Fv80J%2BMeilnpefqKw%2B64%2BQlSbYZGVnBGkCV%2BxW8XJube6WJ9kZF9bSzBALRynPQfLhIjvwyBEAXOTLp3o%2FJA9Y9%2F7%2F9FEKDhIVFo4GHkVzjGz8icrHzY39iHR1i0M8Jj14LLZUvb7DxMXGoQEFeQcgSBOHaPvm4uOdRLMMqcDTLbcII0sNuVn4TKaRd6RKIeDd37Svra6xuLpaW17lXUAlHh8WGxUPIS4JGQoFECMsBg4gFwsRJRIrCC0oAycaFC8NMDIzMRgBsVt9rwAAD25JREFUeNrs3QVzG0kWB%2FA3ikHhZeYwk3LMbF7GcBasOGw9hb3MzLyKw8zMzMx2rsokhySNY2mmR1N4xXV3a7sHuzWu%2BX2Ef3XPG%2Br3wOVyuVwul8vlcrlcLpfL5XK5dOlXOHTIvLnb27Xd%2FasBvrt9A%2B7r1bbdTTffcmuXwhzgTYwk6q%2BHr2RWlcclRYqXV2VeCV%2Bvr4mIkCJKZ83uc9NLC0fMD%2BD%2FCswfMfLtzh%2FeelsJcKJW19SG66KSTP6fLEXrwrU11Srw5Z8zbuzePcUBbFyg%2BPY7Pv%2Bs0A%2Bsid7ayiqFNEWp8iS9Ir%2F0Cl957bkRAaQLFLz15sBBfpbpJc7FJKKFFGuV4JJh6N573g6idr7vP%2F8iC9iI1NZJRDupLnlRBbaW3XjTfQHUJ3D8d68MBtsJiTNRold5uEYAdibkHgqiESMefGi9zfFVeCRihOS5LLJafV99XYxGddgwabKt8SmEyEQ%2FmRDlSoUA9gsNvKMDmhE8MC4L7OFtSYmPFmFlAmzm%2F9tfH0Oz8v6yFmxQ3SpOiY8eYTwjHew0%2BB9%2FD6B5ga4dLd%2FHQus0SnzaIrzWWgDb9P19MVqjw01dwFLpYYVYQymLgD1Kjj6J1umaHwLLqJfpy0%2FHIryqgg2mvetDKxXMnQMWEa9LxEpSqxZguS%2B%2BfA%2Bt9cZBi7ZxeqVMX376FqEnAtbyv7ISrTfspB%2FM82bq3r70BNMSYKV%2Bo4rQDiPzc8Csy1Fih%2BhVsE7o0cfQHnn%2FygJz6uNEJtaTSfy8ChYpnelDuxQ8HAIT1LOS8fwoCSq1FiVYcs%2FdaJ%2FgNhMJqrWKqfwoCSYtSTA08260U%2FBh47v4LDU%2F%2FgnmPOJDexX86ycwpp6yf80neB7M8o96DO2Wl2%2Bw%2FlLrh%2FlKYroW31qE9ht5EgzwRs3nR00wmgBTVq1EFtp2Ad0imdbkR0kwLQImTP8S2eg9B3QSKwkbHhPPxSUzAsjGe3P1luLrMmGklQpGjfIhKwU6C8llibBJUCaS4UKy6klkp0cX0CE9zcr8KAlei4Ahy36PLHXuBJqpYcJSmQBG3LIJWerQETS7qhCWlHowoMvfka2Va0Gjaus3MGUTp4NuWY8ja3%2FuB9q0IqydBt1eeQxZ%2B9MfQRNvnLAWT%2BiuIEuRvT9MBg3UlkQmbMmkUgB9cjsge8EbQIMLCmFPuQy6DPoGeVi9HqgED5EJazL5VAQ9Nm5CHjq0B6oKhZCUX4LrNyAfSycDhVBJZMKeTK4IoN26IPJRsAQoEhLhQ7kAmoV%2Bjbwspt0LniF8yKRMBa1%2B%2BSvkZVFfaFIkSngpvwha%2FQL56QNNqiX8%2FBs0mnMX8vPtBGiCWEf4iYmgzey7kZ8Rw6EJXonwo9SANn9GnuZCE84RnlqBJm3aIk8vFUKjxBjhKbMFaDHQhzy9%2BAI06pJEeJIS%2FGuwBn1M1WD%2BdXjNauSrdwk0Qq0kfHlUoFs7Evnq9TI0orqK8BVN1%2FIcvAn56vAKNCKhEDruz8NjkbdXOV4CKZJA1W8M8vbjT9CwMOGtDKjmjEbefpgCDRLqCB33p7kvipC3kc83UkOihLdohF5DfMjbiBf43UZTSPQq8vobyNsbudCgyzLhTT4PNK8hpmoZPkv4awU0y5G%2F1%2Fj90WG%2BDK9ATNX7mDDh71OgWYn83RHi9yRMkQY0I5G%2FOydDA4RPCX9RoMlD%2Fu6a0mCAMcJfHGh8yN%2BwqdAAMZPwJwFNB%2BRv5TRoQIs0wp%2FiiAB7TG%2B2Abor0L0GmiO5VdicuHsfaE7UfRIxJ80Rz8Kdnfss7L6NoShz8vvAWsLfOUe8kZ7o5DfSm1Pgm8gnTv4msqoIzXC%2FyrUZjWa434XdPxOoRZjiHjTD%2FTcGNm9Cg9y%2Fs9z%2FAymi1e4fqqZ4VPcfaQZnlQYGkacXP3H6X%2FrT2qIZ7jkR%2BAvy9L5jTyq5Z%2BUolBpHnNYc5PDTmubrsHtemOeJ9aJmcWI9tAV5%2BQ29Z4Kc%2Bj0TYHOQVwl5pVl07YD1h9EMt28MHOHUueihZtK5CArvRB4OTWkuvbNgYjGyF5wEGlQ4oXsbrF%2BK7O2fDBoIPPoHegQndLAc14w6WELot8jaX5pVD1Xo8iSy1WM8nzbcFMZbcf%2BLcR%2Fp7qBZayf0kYZly5GlzpOd3Mmcfy%2F9rl1AhwjTXvoXwaATDKc55Dp6mgP%2FeSLvZ4E%2B55wwTwSmr0Y2Djp6og3%2FmUrDhqbuTKWLYMqQ42i%2FkcNTdqpXeQ2Y4z82AO2Wl8txrpz5AkLRr38Q7TUiOydlJxueBfNCYzugnYKvOn62JkXpA3YmGPy8xPnTXanzhYP27d8PSvjPFzafH0Wov12VJC87ZSdcS2dVsEy%2FE8fRDgtznTFj3Tz%2FrT3QesOGO2bKv3mrVr%2BH1nrjjqFgiUilTGRr8%2FNEwHLTZ%2FisLR9vzgGLiOckYiWpVQuwQcmonmidZ3JDYBn1chohslXL79pVFWzh%2F2L5JrRG8fahYKlIWCHWUMoiYJtl%2F3wygOYFunabDBYTWmtdhJTlVy%2BAjfxPPP4YmpW3dTzYID0jTo%2BQEl88Ix1sFlqytAOacfe%2Bk1lgD29LxXiEMiFKZUIF%2By3L%2F6YYjSpu134w2EaouEKPsNH4rlwWgI0JEzcE0Qjfl19NAVsJFR6JGCF5LovAzrId2%2B8LoD6BBT8OGQy2E2rCUaJXebhGALZC9z%2FwUhC18%2F0wc1UWsBFJ1klEOymWvKgCe%2F7CW999xxdAusCI0R99PMgP7IiJczFJY3qtEiLw8tOckw88uKs40FR4xXuWzvzjVD%2BwJnqTlVUKaYpS5Ul6ReCsdOeOmVveKgq%2Bh%2F%2FvveCiu7Zvmz2rFDhRq2tqw7GoJJP%2FJ0vRWFmyplqF1NBv0KmTJz7fumX1d889%2B8yTzzz73Ldfbtm6bdS48RNygDcx3Xu1NqPMUxdLS7uWlhar85RlJK9600VIOf6c0mWDpj391NNtBg0uyfFDSlEF8T%2Ft3eFyqjwTwPGNiKq9eq%2BtqiCeoxZVEcRW4mK%2Bvc%2F5%2Bk7bBSDZOJPfFfwHWkEMG%2B%2BfXChwHMdxHMdxHMdxHMdxHMdxHIeV4yiR%2FyOUS6tHfBxP88Vse74N%2F7mdt7PF%2FHT8EFakbYg0XupvMZ%2Fddt%2F%2Ber27zebFX%2BXSfpQfD%2BMLsX7iMp4fc460%2BfgiqbSD1jSCGH1WXAV1v32OhOm0O1Yh9aUR0sNUYnVyekjBEH9eL%2B2mIY2gilmGdWXvhTKQNnpvkDYrBJgjNluJTchtIDSnBY3TNgLMUEGvbL4Qvhco3WkPbOS%2FNAEGjMay1bsEMjyCJsewXVo5HoFuH5P2b7OsJh9a0har1mn3tmkElXTzPlU%2FUd2nDfnTKH53b%2FTN%2FI7TZp2l7X3QZNPlO6X9jb1pJwUa5J8SuyQ%2Fc2vTFjl0zu%2F8vfrH2O8obdx52jaFjmmZ7HAdQQeOVw1pwxF0StNskd0GWtvsUIfsBB3SNt3m%2FgUtva1402jEfCXm%2BUBLjWkHBZ2gJ3zxHcG51JhWdnQENc%2BYk3O2vz%2F6CEJrBqYcyi9o6E172hJaMjJn876BRjYG0k7QiqFJr7tRo7SdgbSsgBaMzRoe%2BlCbfzWTlkILxqZdj%2FPaaWM0Y%2BtBUwbnrT8%2BoaZPY2kLBc2Ynfi%2FgVo2BtNO0JDRPSf6PtTgm0y7pNCI2KNJewWVqZnZNAH1md93J4HKEsNpb1Abw85P%2FQ%2Bo6GNoOs2H%2BgZo2gQqWqBpA6iNY%2Fe7EVRyXNm%2FMR%2FP%2FotjBRWokCFtK6AOrh1AA6ggkBxpG6hFnImzzLUFKNv2uOec5Q9Qw3kO7N%2BgmT7LjB81asuU1hNQXSyRhyyAULClxVDdHh%2FI4YEzIMzY0vZQWZQhlyyFX6V8aasIqnoinwP86oB8nlBRfkM%2Btxx%2BIaZWpNGf03zkCH4xYk0r7PiuTljALz6R0wQqya%2FI6ZrTHy78acS%2FCSd5hB8dmdNGdlyDCQfiGmz7dVhtkddWWZvWU0D72CGv3Qf84O%2BFP40Wl8irLOAHBXtaDLQDoq0fgnPk9gTaHrnt4Qcz5Bba8T2OcBPwLUGnWXAnmGbILfP5Lm%2BELLX3WSp9v3q0IC0GytcDuT1O8K2TBWlLq58kEJfhOfJbACVEfhN7z20IlDPy2xM3WIymQBkiv57i%2ByZM6ANlh%2FymAr6hpshvB5QVoqW3q%2BKK%2FO5AkchvmMM38iHyk0ApkV%2Ffg294feRXugPoDiCr0n0GtiPdVbid%2BwvfB4op8svcN5F2%2Bu67cDvTV34aM0F%2B4Ss%2FDzzYcW4JSwse%2Byav%2FETa4t9ERhakBS%2F9q5wFaRH%2F6kDaNbf3d2EPXuAyvLd30UQItCdyO9i7bOf5EquzYnvTgpdeH8iflvlAUz3kZf8KVcs%2FBJ%2F2rl1cQxWFvUvhR8xpBVThDfnvAu28SR16UMkEOS3sfdQxgGri0tp%2Fk0Lac39l6T%2FKLbd2AfLVg4rW9t7rPy24BtOiFXJZRda%2BTL%2F6A1Wp0N7BBHu2tFBBZUGJPGRs7QPfMrB9cBExnIV7pM1ZQA0nrvFA9qYlUEc%2B5R9QZddYrymdxn%2Bey5O9g%2BUSqEf0rB3SJ7YMaT0BNRUMEywLa9NkDHWpdzRtYO9413cFtaUXw6NyL76VA4abj%2BL%2BMjys%2BcvaEdePJTQhxmhSKGqkhWjSWEAj0cXagfWpybRdBA0lpbktExJrN5oo36ApNUFTJqpm2gJNGShozOuhGT3P2rSzBy1EfSMbF%2FVTqC01lBZBK%2FHK2q2zisxA2iqGlhKpf%2FO2pGHaXXuafOPfGZKMLJeMO0MSaXNoTz1LvRtYPhXftqlE2lpBB9SayOQ6fgDqqTXtk07jzKSPH00dpL60tbJ9h%2Bb2%2BzODWt7tSKM34tZhlUBrSaYn7Q06Ffc1bKXfj6EDhQ1ptOhcP5OI7EXQibTXedo5gs55gxK7VE68ztImstu0gQcaqGSH%2BOjqHF8S1WXapcO03ZsCPaLxA7tRhhF0Kg1L7MZjHIE24os%2B05X%2B%2FL6ErWm7pQCd0ndJdxKN93cfNPDf763T5CwFzVTcK%2BnOXxrLXqE0pRXbtmmxAv3EaUp3%2Ftg4PQlL0x7TRIAZeXIusYnyfMo1p50apyU5mCOCcIV1rcJA2J9mivqzvpZYXXldR8pQWlQ77Y8CBnk8GFYLlcNBnJtNmwwlVlH%2Bl%2BYBG69Yn7Py98Ksty48lrQemXY2kEZRfvAMr5l84P97yOwaPgNfWZq2NpZG86JgPhlP%2B9ldlo9S3rP%2BdDyZB5FnRdqygzTHcRzHcRzHcRzHcRzHcZz%2FAbyvLkVmYcs9AAAAAElFTkSuQmCC&link=https%3A%2F%2Fwww.together.ai"" style=""display: inline-block; vertical-align: middle;""/>
         
     | 
| 38 | 
         
            +
              </a>
         
     | 
| 39 | 
         
            +
            </div>
         
     | 
| 40 | 
         
            +
            </div>
         
     | 
| 41 | 
         
            +
            </div>
         
     | 
| 42 | 
         
            +
             
     | 
| 43 | 
         
            +
            ## DeepCoder Overview
         
     | 
| 44 | 
         
            +
            DeepCoder-14B-Preview is a code reasoning LLM fine-tuned from DeepSeek-R1-Distilled-Qwen-14B using distributed reinforcement learning (RL) to scale up to long context lengths. The model achieves 60.6% Pass@1 accuracy on LiveCodeBench v5 (8/1/24-2/1/25), representing a 8% improvement over the base model (53%) and achieving similar performance to OpenAI's o3-mini with just 14B parameters.
         
     | 
| 45 | 
         
            +
             
     | 
| 46 | 
         
            +
            <div style=""margin: 0 auto;"">
         
     | 
| 47 | 
         
            +
              <img src=""https://cdn-uploads.huggingface.co/production/uploads/654037be97949fd2304aab7f/r3-vzkItOCrMf1qldW0Mj.png"" style=""width: 100%;"" />
         
     | 
| 48 | 
         
            +
            </div>
         
     | 
| 49 | 
         
            +
             
     | 
| 50 | 
         
            +
            ## Data
         
     | 
| 51 | 
         
            +
            Our training dataset consists of approximately 24K unique problem-tests pairs compiled from:
         
     | 
| 52 | 
         
            +
            - Taco-Verified
         
     | 
| 53 | 
         
            +
            - PrimeIntellect SYNTHETIC-1
         
     | 
| 54 | 
         
            +
            - LiveCodeBench v5 (5/1/23-7/31/24)
         
     | 
| 55 | 
         
            +
             
     | 
| 56 | 
         
            +
            ## Training Recipe
         
     | 
| 57 | 
         
            +
             
     | 
| 58 | 
         
            +
            Our training recipe relies on an improved version of GRPO (GRPO+) and iterative context lengthening, introduced in DeepScaleR.
         
     | 
| 59 | 
         
            +
             
     | 
| 60 | 
         
            +
            ### GRPO+
         
     | 
| 61 | 
         
            +
             
     | 
| 62 | 
         
            +
            We enhance the original GRPO algorithm with insights from DAPO to enable more stable training:
         
     | 
| 63 | 
         
            +
             
     | 
| 64 | 
         
            +
            - **Offline Difficulty Filtering:** DAPO employs online dynamic sampling, discarding both entirely correct and entirely incorrect samples on the fly. While this helps maintain a more stable effective batch size, it introduces significant runtime overhead due to rejection sampling. Instead, we perform offline difficulty filtering on a subset of coding problems to ensure the training dataset remains within a suitable difficulty range.
         
     | 
| 65 | 
         
            +
            - **No Entropy Loss:** We observed that including an entropy loss term often led to instability, with entropy growing exponentially and ultimately collapsing training. To mitigate this, we eliminate the entropy loss entirely.
         
     | 
| 66 | 
         
            +
            - **No KL Loss:** Eliminating KL loss prevents the LLM from staying within trust region of the original SFT model. This removal also obviates the need to compute log probabilities for the reference policy, thereby accelerating training.
         
     | 
| 67 | 
         
            +
            - **Overlong Filtering** **(from DAPO):** To preserve long-context reasoning, we mask the loss for truncated sequences. This technique enables DeepCoder to generalize to 64K-context inference despite being trained with a 32K context.
         
     | 
| 68 | 
         
            +
            - **Clip High (from DAPO):** By increasing the upper bound in GRPO/PPO’s surrogate loss, we encourage more exploration and more stable entropy.
         
     | 
| 69 | 
         
            +
             
     | 
| 70 | 
         
            +
            ### Iterative Context Lengthening
         
     | 
| 71 | 
         
            +
             
     | 
| 72 | 
         
            +
            Our original `Deepscaler-1.5B-Preview` scaled long context training from 8K→16K→24K, achieving 33→38→43% on AIME respectively. Similarly, `Deepcoder-14B-Preview` is trained on 16K→32K, achieving 54→58% on LiveCodeBench (v5). `DeepCoder-14B-Preview` successfully generalizes to longer contexts when evaluated at 64K context, reaching 60.6%. 
         
     | 
| 73 | 
         
            +
             
     | 
| 74 | 
         
            +
            DeepCoder generalizes better to long contexts than the base distilled model, due to DAPO's overlong filtering. However, it's longer responses are often truncated when the max length is capped at 16K, which can lower its scores.
         
     | 
| 75 | 
         
            +
             
     | 
| 76 | 
         
            +
            | **Model** | **16K** | **32K** | **64K** |
         
     | 
| 77 | 
         
            +
            | --- | --- | --- | --- |
         
     | 
| 78 | 
         
            +
            | **DeepCoder-14B-Preview** | 45.6 | 57.9 | 60.6 |
         
     | 
| 79 | 
         
            +
            | **DeepSeek-R1-Distill-Qwen-14B** | 50.2 | 53.0 | 53.0 |
         
     | 
| 80 | 
         
            +
             
     | 
| 81 | 
         
            +
            A more detailed description of the training recipe can be found in our [blog post](https://pretty-radio-b75.notion.site/DeepCoder-A-Fully-Open-Source-14B-Coder-at-O3-mini-Level-1cf81902c14680b3bee5eb349a512a51).
         
     | 
| 82 | 
         
            +
             
     | 
| 83 | 
         
            +
            ## Evaluation
         
     | 
| 84 | 
         
            +
             
     | 
| 85 | 
         
            +
            We evaluate `Deepcoder-14B-Preview` on various coding benchmarks, including LiveCodeBench (LCBv5), Codeforces, and HumanEval+. 
         
     | 
| 86 | 
         
            +
             
     | 
| 87 | 
         
            +
            | **Model** | LCB (v5)(8/1/24-2/1/25) | Codeforces Rating | Codeforces Percentile | HumanEval+ |
         
     | 
| 88 | 
         
            +
            | --- | --- | --- | --- | --- |
         
     | 
| 89 | 
         
            +
            | **DeepCoder-14B-Preview (ours)** | ***60.6*** | ***1936*** | ***95.3*** | ***92.6*** |
         
     | 
| 90 | 
         
            +
            | **DeepSeek-R1-Distill-Qwen-14B** | 53.0 | 1791 | 92.7 | 92.0 |
         
     | 
| 91 | 
         
            +
            | **O1-2024-12-17 (Low)** | 59.5 | **1991** | **96.1** | 90.8 |
         
     | 
| 92 | 
         
            +
            | **O3-Mini-2025-1-31 (Low)** | **60.9** | 1918 | 94.9 | 92.6 |
         
     | 
| 93 | 
         
            +
            | **O1-Preview** | 42.7 | 1658 | 88.5 | 89 |
         
     | 
| 94 | 
         
            +
            | **Deepseek-R1** | 62.8 | 1948 | 95.4 | 92.6 |
         
     | 
| 95 | 
         
            +
            | **Llama-4-Behemoth** | 49.4 | - | - | - |
         
     | 
| 96 | 
         
            +
             
     | 
| 97 | 
         
            +
            ## Serving DeepCoder
         
     | 
| 98 | 
         
            +
            Our model can be served using popular high-performance inference systems:
         
     | 
| 99 | 
         
            +
            - vLLM
         
     | 
| 100 | 
         
            +
            - Hugging Face Text Generation Inference (TGI)
         
     | 
| 101 | 
         
            +
            - SGLang
         
     | 
| 102 | 
         
            +
            - TensorRT-LLM
         
     | 
| 103 | 
         
            +
             
     | 
| 104 | 
         
            +
            All these systems support the OpenAI Chat Completions API format.
         
     | 
| 105 | 
         
            +
             
     | 
| 106 | 
         
            +
            ### Usage Recommendations
         
     | 
| 107 | 
         
            +
            Our usage recommendations are similar to those of R1 and R1 Distill series:
         
     | 
| 108 | 
         
            +
            1. Avoid adding a system prompt; all instructions should be contained within the user prompt.
         
     | 
| 109 | 
         
            +
            2. `temperature = 0.6`
         
     | 
| 110 | 
         
            +
            3. `top_p = 0.95`
         
     | 
| 111 | 
         
            +
            4. This model performs best with `max_tokens` set to at least `64000` 
         
     | 
| 112 | 
         
            +
             
     | 
| 113 | 
         
            +
            ## License
         
     | 
| 114 | 
         
            +
            This project is released under the MIT License, reflecting our commitment to open and accessible AI development.
         
     | 
| 115 | 
         
            +
            We believe in democratizing AI technology by making our work freely available for anyone to use, modify, and build upon.
         
     | 
| 116 | 
         
            +
            This permissive license ensures that researchers, developers, and enthusiasts worldwide can leverage and extend our work without restrictions, fostering innovation and collaboration in the AI community.
         
     | 
| 117 | 
         
            +
             
     | 
| 118 | 
         
            +
            ## Acknowledgement
         
     | 
| 119 | 
         
            +
            - Our training experiments are powered by our heavily modified fork of [Verl](https://github.com/agentica-project/verl), an open-source post-training library.
         
     | 
| 120 | 
         
            +
            - Our model is trained on top of [`DeepSeek-R1-Distill-Qwen-14B`](https://huggingface.co/deepseek-ai/DeepSeek-R1-Distill-Qwen-14B).
         
     | 
| 121 | 
         
            +
            - Our work is done as part of  [Berkeley Sky Computing Lab](https://skycomputing.berkeley.edu/) and [Berkeley AI Research](https://bair.berkeley.edu/).
         
     | 
| 122 | 
         
            +
             
     | 
| 123 | 
         
            +
            ## Citation 
         
     | 
| 124 | 
         
            +
            ```bibtex
         
     | 
| 125 | 
         
            +
            @misc{deepcoder2025,
         
     | 
| 126 | 
         
            +
              title={DeepCoder: A Fully Open-Source 14B Coder at O3-mini Level},
         
     | 
| 127 | 
         
            +
              author={Michael Luo, Sijun Tan, Roy Huang, Ameen Patel, Alpay Ariyak, Qingyang Wu, Xiaoxiang Shi, Rachel Xin, Colin Cai, Maurice Weber, Ce Zhang, Li Erran Li, Raluca Ada Popa, Ion Stoica},
         
     | 
| 128 | 
         
            +
              howpublished={\url{https://pretty-radio-b75.notion.site/DeepCoder-A-Fully-Open-Source-14B-Coder-at-O3-mini-Level-1cf81902c14680b3bee5eb349a512a51}},
         
     | 
| 129 | 
         
            +
              note={Notion Blog},
         
     | 
| 130 | 
         
            +
              year={2025}
         
     | 
| 131 | 
         
            +
            }
         
     | 
| 132 | 
         
            +
            ```","{""id"": ""agentica-org/DeepCoder-14B-Preview"", ""author"": ""agentica-org"", ""sha"": ""b8d891051ba35b18545cc6ee14817a6f6b06b186"", ""last_modified"": ""2025-04-09 21:12:07+00:00"", ""created_at"": ""2025-04-07 10:39:13+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 39699, ""downloads_all_time"": null, ""likes"": 608, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""qwen2"", ""text-generation"", ""conversational"", ""en"", ""dataset:PrimeIntellect/verifiable-coding-problems"", ""dataset:likaixin/TACO-verified"", ""dataset:livecodebench/code_generation_lite"", ""base_model:deepseek-ai/DeepSeek-R1-Distill-Qwen-14B"", ""base_model:finetune:deepseek-ai/DeepSeek-R1-Distill-Qwen-14B"", ""license:mit"", ""autotrain_compatible"", ""text-generation-inference"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- deepseek-ai/DeepSeek-R1-Distill-Qwen-14B\ndatasets:\n- PrimeIntellect/verifiable-coding-problems\n- likaixin/TACO-verified\n- livecodebench/code_generation_lite\nlanguage:\n- en\nlibrary_name: transformers\nlicense: mit\npipeline_tag: text-generation"", ""widget_data"": [{""text"": ""Hi, what can you help me with?""}, {""text"": ""What is 84 * 3 / 2?""}, {""text"": ""Tell me an interesting fact about the universe!""}, {""text"": ""Explain quantum computing in simple terms.""}], ""model_index"": null, ""config"": {""architectures"": [""Qwen2ForCausalLM""], ""model_type"": ""qwen2"", ""tokenizer_config"": {""bos_token"": ""<\uff5cbegin\u2581of\u2581sentence\uff5c>"", ""chat_template"": ""{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<\uff5cUser\uff5c>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<\uff5cAssistant\uff5c><\uff5ctool\u2581calls\u2581begin\uff5c><\uff5ctool\u2581call\u2581begin\uff5c>' + tool['type'] + '<\uff5ctool\u2581sep\uff5c>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<\uff5ctool\u2581call\u2581end\uff5c>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<\uff5ctool\u2581call\u2581begin\uff5c>' + tool['type'] + '<\uff5ctool\u2581sep\uff5c>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<\uff5ctool\u2581call\u2581end\uff5c>'}}{{'<\uff5ctool\u2581calls\u2581end\uff5c><\uff5cend\u2581of\u2581sentence\uff5c>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<\uff5ctool\u2581outputs\u2581end\uff5c>' + message['content'] + '<\uff5cend\u2581of\u2581sentence\uff5c>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '</think>' in content %}{% set content = content.split('</think>')[-1] %}{% endif %}{{'<\uff5cAssistant\uff5c>' + content + '<\uff5cend\u2581of\u2581sentence\uff5c>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<\uff5ctool\u2581outputs\u2581begin\uff5c><\uff5ctool\u2581output\u2581begin\uff5c>' + message['content'] + '<\uff5ctool\u2581output\u2581end\uff5c>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<\uff5ctool\u2581output\u2581begin\uff5c>' + message['content'] + '<\uff5ctool\u2581output\u2581end\uff5c>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<\uff5ctool\u2581outputs\u2581end\uff5c>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<\uff5cAssistant\uff5c><think>\\n'}}{% endif %}"", ""eos_token"": ""<\uff5cend\u2581of\u2581sentence\uff5c>"", ""pad_token"": ""<\uff5cend\u2581of\u2581sentence\uff5c>"", ""unk_token"": null, ""use_default_system_prompt"": false}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00001-of-00012.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00002-of-00012.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00003-of-00012.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00004-of-00012.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00005-of-00012.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00006-of-00012.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00007-of-00012.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00008-of-00012.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00009-of-00012.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00010-of-00012.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00011-of-00012.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00012-of-00012.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='together-ai-branding-lightOnDark.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [""hadadrjt/ai"", ""IamOmer4148/Test"", ""sierrafr/test"", ""naxwinn/Aura-2""], ""safetensors"": {""parameters"": {""F32"": 14770033664}, ""total"": 14770033664}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-04-09 21:12:07+00:00"", ""cardData"": ""base_model:\n- deepseek-ai/DeepSeek-R1-Distill-Qwen-14B\ndatasets:\n- PrimeIntellect/verifiable-coding-problems\n- likaixin/TACO-verified\n- livecodebench/code_generation_lite\nlanguage:\n- en\nlibrary_name: transformers\nlicense: mit\npipeline_tag: text-generation"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""_id"": ""67f3ab51e59f071a85391aa2"", ""modelId"": ""agentica-org/DeepCoder-14B-Preview"", ""usedStorage"": 59091623642}",0,"https://huggingface.co/EpistemeAI/DeepCoder-14B-Preview-safety-alignment-unsloth, https://huggingface.co/EpistemeAI/SAI-DeepCoder-14B-Preview-v1.0, https://huggingface.co/mlx-community/DeepCoder-14B-Preview-bf16, https://huggingface.co/Gapeleon/DeepCoder-14B-Preview-int4-awq-ov, https://huggingface.co/secmlr/DS-Noisy_DS-Clean_QWQ-Noisy_QWQ-Clean_DeepCoder-14B-Preview_full_sft_1e-5, https://huggingface.co/Apel-sin/deepcoder-14B-preview-exl2, https://huggingface.co/wasim845/dfgh, https://huggingface.co/rieon/DeepCoder-14B-Preview-Suger",8,,0,"https://huggingface.co/bartowski/agentica-org_DeepCoder-14B-Preview-GGUF, https://huggingface.co/mlx-community/DeepCoder-14B-Preview-4bit, https://huggingface.co/mlx-community/DeepCoder-14B-Preview-6bit, https://huggingface.co/mlx-community/DeepCoder-14B-Preview-8bit, https://huggingface.co/achitech/DeepCoder-14B-Preview-Q4_K_M-GGUF, https://huggingface.co/lmstudio-community/DeepCoder-14B-Preview-GGUF, https://huggingface.co/DevQuasar/agentica-org.DeepCoder-14B-Preview-GGUF, https://huggingface.co/Joumdane/DeepCoder-14B-Preview-GGUF, https://huggingface.co/miike-ai/deepcoder-14b-fp8, https://huggingface.co/cgus/DeepCoder-14B-Preview-exl2, https://huggingface.co/numen-tech/DeepCoder-14B-Preview-GPTQ-Int4, https://huggingface.co/achitech/DeepCoder-14B-Preview-Q6_K-GGUF, https://huggingface.co/justinmeans/DeepCoder-14B-Preview-mlx-8Bit, https://huggingface.co/achitech/DeepCoder-14B-Preview-Q8_0-GGUF, https://huggingface.co/achitech/DeepCoder-14B-Preview-Q3_K_M-GGUF, https://huggingface.co/mradermacher/DeepCoder-14B-Preview-GGUF, https://huggingface.co/justinmeans/DeepCoder-14B-Preview-mlx-2Bit, https://huggingface.co/justinmeans/DeepCoder-14B-Preview-mlx-4Bit, https://huggingface.co/okamototk/DeepCoder-14B-Preview-imatrix-GGUF, https://huggingface.co/noneUsername/DeepCoder-14B-Preview-W8A8, https://huggingface.co/WSDW/DeepCoder-14B-Preview-Q3_K_M-GGUF, https://huggingface.co/WSDW/DeepCoder-14B-Preview-Q2_K-GGUF, https://huggingface.co/BenevolenceMessiah/DeepCoder-14B-Preview-Q8_0-GGUF, https://huggingface.co/EpistemeAI/DeepCoder-14B-Preview-GGUF, https://huggingface.co/gercamjr/DeepCoder-14B-Preview-Q4_K_M-GGUF, https://huggingface.co/tensorblock/agentica-org_DeepCoder-14B-Preview-GGUF",26,"https://huggingface.co/YOYO-AI/YOYO-O1-14B, https://huggingface.co/mergekit-community/mergekit-sce-sudfgqi, https://huggingface.co/spacematt/Qwen2.5-Channel-Coder-14B-Instruct",3,"IamOmer4148/Test, hadadrjt/ai, huggingface/InferenceSupport/discussions/518, naxwinn/Aura-2, sierrafr/test",5
         
     | 
| 133 | 
         
            +
            EpistemeAI/DeepCoder-14B-Preview-safety-alignment-unsloth,"---
         
     | 
| 134 | 
         
            +
            base_model: agentica-org/DeepCoder-14B-Preview
         
     | 
| 135 | 
         
            +
            tags:
         
     | 
| 136 | 
         
            +
            - text-generation-inference
         
     | 
| 137 | 
         
            +
            - transformers
         
     | 
| 138 | 
         
            +
            - unsloth
         
     | 
| 139 | 
         
            +
            - qwen2
         
     | 
| 140 | 
         
            +
            - trl
         
     | 
| 141 | 
         
            +
            license: mit
         
     | 
| 142 | 
         
            +
            language:
         
     | 
| 143 | 
         
            +
            - en
         
     | 
| 144 | 
         
            +
            ---
         
     | 
| 145 | 
         
            +
             
     | 
| 146 | 
         
            +
            ## please better model - [SIA DeepCoder 14B model](https://huggingface.co/EpistemeAI/SA-DeepCoder-14B-Preview-unsloth-v1.0)
         
     | 
| 147 | 
         
            +
             
     | 
| 148 | 
         
            +
            ## This model is supervised fine tuning with [gretelai's safety and alignment](https://huggingface.co/datasets/gretelai/gretel-safety-alignment-en-v1)  with [Unsloth](https://github.com/unslothai/unsloth)
         
     | 
| 149 | 
         
            +
             
     | 
| 150 | 
         
            +
            ## Episteme alignment and safety technique
         
     | 
| 151 | 
         
            +
             
     | 
| 152 | 
         
            +
            ### To use think, add < think > to your prompt
         
     | 
| 153 | 
         
            +
             
     | 
| 154 | 
         
            +
             
     | 
| 155 | 
         
            +
            ## Model Card
         
     | 
| 156 | 
         
            +
             
     | 
| 157 | 
         
            +
            <div align=""center"">
         
     | 
| 158 | 
         
            +
            <span style=""font-family: default; font-size: 1.5em;"">DeepCoder-14B-Preview</span>
         
     | 
| 159 | 
         
            +
            <div>
         
     | 
| 160 | 
         
            +
            🚀 Democratizing Reinforcement Learning for LLMs (RLLM) 🌟
         
     | 
| 161 | 
         
            +
            </div>
         
     | 
| 162 | 
         
            +
            </div>
         
     | 
| 163 | 
         
            +
            <br>
         
     | 
| 164 | 
         
            +
            <div align=""center"" style=""line-height: 1;"">
         
     | 
| 165 | 
         
            +
              <a href=""https://github.com/agentica-project/rllm"" style=""margin: 2px;"">
         
     | 
| 166 | 
         
            +
                <img alt=""Code"" src=""https://img.shields.io/badge/RLLM-000000?style=for-the-badge&logo=github&logoColor=000&logoColor=white"" style=""display: inline-block; vertical-align: middle;""/>
         
     | 
| 167 | 
         
            +
              </a>
         
     | 
| 168 | 
         
            +
              <a href=""https://pretty-radio-b75.notion.site/DeepCoder-A-Fully-Open-Source-14B-Coder-at-O3-mini-Level-1cf81902c14680b3bee5eb349a512a51"" target=""_blank"" style=""margin: 2px;"">
         
     | 
| 169 | 
         
            +
                <img alt=""Blog"" src=""https://img.shields.io/badge/Notion-%23000000.svg?style=for-the-badge&logo=notion&logoColor=white"" style=""display: inline-block; vertical-align: middle;""/>
         
     | 
| 170 | 
         
            +
              </a>
         
     | 
| 171 | 
         
            +
              <a href=""https://x.com/Agentica_"" style=""margin: 2px;"">
         
     | 
| 172 | 
         
            +
                <img alt=""X.ai"" src=""https://img.shields.io/badge/Agentica-white?style=for-the-badge&logo=X&logoColor=000&color=000&labelColor=white"" style=""display: inline-block; vertical-align: middle;""/>
         
     | 
| 173 | 
         
            +
              </a>
         
     | 
| 174 | 
         
            +
              <a href=""https://huggingface.co/agentica-org"" style=""margin: 2px;"">
         
     | 
| 175 | 
         
            +
                <img alt=""Hugging Face"" src=""https://img.shields.io/badge/Agentica-fcd022?style=for-the-badge&logo=huggingface&logoColor=000&labelColor"" style=""display: inline-block; vertical-align: middle;""/>
         
     | 
| 176 | 
         
            +
              </a>
         
     | 
| 177 | 
         
            +
              <a href=""https://www.together.ai"" style=""margin: 2px;"">
         
     | 
| 178 | 
         
            +
                <img alt=""Together AI"" src=""https://img.shields.io/badge/-Together_AI%20-white?style=for-the-badge&logo=data%3Aimage%2Fpng%3Bbase64%2CiVBORw0KGgoAAAANSUhEUgAAAUAAAAFACAMAAAD6TlWYAAAC7lBMVEUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAPb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8AAAAPb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADIBDt6AAAA%2BnRSTlMAAiQEKgcdKQwiHBMUzrtSUEmjhmZGH96yv8n1ey7nL3y1U%2FZfCaIo1WFg1NrcsHYrA2%2Fv80J%2BMeilnpefqKw%2B64%2BQlSbYZGVnBGkCV%2BxW8XJube6WJ9kZF9bSzBALRynPQfLhIjvwyBEAXOTLp3o%2FJA9Y9%2F7%2F9FEKDhIVFo4GHkVzjGz8icrHzY39iHR1i0M8Jj14LLZUvb7DxMXGoQEFeQcgSBOHaPvm4uOdRLMMqcDTLbcII0sNuVn4TKaRd6RKIeDd37Svra6xuLpaW17lXUAlHh8WGxUPIS4JGQoFECMsBg4gFwsRJRIrCC0oAycaFC8NMDIzMRgBsVt9rwAAD25JREFUeNrs3QVzG0kWB%2FA3ikHhZeYwk3LMbF7GcBasOGw9hb3MzLyKw8zMzMx2rsokhySNY2mmR1N4xXV3a7sHuzWu%2BX2Ef3XPG%2Br3wOVyuVwul8vlcrlcLpfL5XK5dOlXOHTIvLnb27Xd%2FasBvrt9A%2B7r1bbdTTffcmuXwhzgTYwk6q%2BHr2RWlcclRYqXV2VeCV%2Bvr4mIkCJKZ83uc9NLC0fMD%2BD%2FCswfMfLtzh%2FeelsJcKJW19SG66KSTP6fLEXrwrU11Srw5Z8zbuzePcUBbFyg%2BPY7Pv%2Bs0A%2Bsid7ayiqFNEWp8iS9Ir%2F0Cl957bkRAaQLFLz15sBBfpbpJc7FJKKFFGuV4JJh6N573g6idr7vP%2F8iC9iI1NZJRDupLnlRBbaW3XjTfQHUJ3D8d68MBtsJiTNRold5uEYAdibkHgqiESMefGi9zfFVeCRihOS5LLJafV99XYxGddgwabKt8SmEyEQ%2FmRDlSoUA9gsNvKMDmhE8MC4L7OFtSYmPFmFlAmzm%2F9tfH0Oz8v6yFmxQ3SpOiY8eYTwjHew0%2BB9%2FD6B5ga4dLd%2FHQus0SnzaIrzWWgDb9P19MVqjw01dwFLpYYVYQymLgD1Kjj6J1umaHwLLqJfpy0%2FHIryqgg2mvetDKxXMnQMWEa9LxEpSqxZguS%2B%2BfA%2Bt9cZBi7ZxeqVMX376FqEnAtbyv7ISrTfspB%2FM82bq3r70BNMSYKV%2Bo4rQDiPzc8Csy1Fih%2BhVsE7o0cfQHnn%2FygJz6uNEJtaTSfy8ChYpnelDuxQ8HAIT1LOS8fwoCSq1FiVYcs%2FdaJ%2FgNhMJqrWKqfwoCSYtSTA08260U%2FBh47v4LDU%2F%2FgnmPOJDexX86ycwpp6yf80neB7M8o96DO2Wl2%2Bw%2FlLrh%2FlKYroW31qE9ht5EgzwRs3nR00wmgBTVq1EFtp2Ad0imdbkR0kwLQImTP8S2eg9B3QSKwkbHhPPxSUzAsjGe3P1luLrMmGklQpGjfIhKwU6C8llibBJUCaS4UKy6klkp0cX0CE9zcr8KAlei4Ahy36PLHXuBJqpYcJSmQBG3LIJWerQETS7qhCWlHowoMvfka2Va0Gjaus3MGUTp4NuWY8ja3%2FuB9q0IqydBt1eeQxZ%2B9MfQRNvnLAWT%2BiuIEuRvT9MBg3UlkQmbMmkUgB9cjsge8EbQIMLCmFPuQy6DPoGeVi9HqgED5EJazL5VAQ9Nm5CHjq0B6oKhZCUX4LrNyAfSycDhVBJZMKeTK4IoN26IPJRsAQoEhLhQ7kAmoV%2Bjbwspt0LniF8yKRMBa1%2B%2BSvkZVFfaFIkSngpvwha%2FQL56QNNqiX8%2FBs0mnMX8vPtBGiCWEf4iYmgzey7kZ8Rw6EJXonwo9SANn9GnuZCE84RnlqBJm3aIk8vFUKjxBjhKbMFaDHQhzy9%2BAI06pJEeJIS%2FGuwBn1M1WD%2BdXjNauSrdwk0Qq0kfHlUoFs7Evnq9TI0orqK8BVN1%2FIcvAn56vAKNCKhEDruz8NjkbdXOV4CKZJA1W8M8vbjT9CwMOGtDKjmjEbefpgCDRLqCB33p7kvipC3kc83UkOihLdohF5DfMjbiBf43UZTSPQq8vobyNsbudCgyzLhTT4PNK8hpmoZPkv4awU0y5G%2F1%2Fj90WG%2BDK9ATNX7mDDh71OgWYn83RHi9yRMkQY0I5G%2FOydDA4RPCX9RoMlD%2Fu6a0mCAMcJfHGh8yN%2BwqdAAMZPwJwFNB%2BRv5TRoQIs0wp%2FiiAB7TG%2B2Abor0L0GmiO5VdicuHsfaE7UfRIxJ80Rz8Kdnfss7L6NoShz8vvAWsLfOUe8kZ7o5DfSm1Pgm8gnTv4msqoIzXC%2FyrUZjWa434XdPxOoRZjiHjTD%2FTcGNm9Cg9y%2Fs9z%2FAymi1e4fqqZ4VPcfaQZnlQYGkacXP3H6X%2FrT2qIZ7jkR%2BAvy9L5jTyq5Z%2BUolBpHnNYc5PDTmubrsHtemOeJ9aJmcWI9tAV5%2BQ29Z4Kc%2Bj0TYHOQVwl5pVl07YD1h9EMt28MHOHUueihZtK5CArvRB4OTWkuvbNgYjGyF5wEGlQ4oXsbrF%2BK7O2fDBoIPPoHegQndLAc14w6WELot8jaX5pVD1Xo8iSy1WM8nzbcFMZbcf%2BLcR%2Fp7qBZayf0kYZly5GlzpOd3Mmcfy%2F9rl1AhwjTXvoXwaATDKc55Dp6mgP%2FeSLvZ4E%2B55wwTwSmr0Y2Djp6og3%2FmUrDhqbuTKWLYMqQ42i%2FkcNTdqpXeQ2Y4z82AO2Wl8txrpz5AkLRr38Q7TUiOydlJxueBfNCYzugnYKvOn62JkXpA3YmGPy8xPnTXanzhYP27d8PSvjPFzafH0Wov12VJC87ZSdcS2dVsEy%2FE8fRDgtznTFj3Tz%2FrT3QesOGO2bKv3mrVr%2BH1nrjjqFgiUilTGRr8%2FNEwHLTZ%2FisLR9vzgGLiOckYiWpVQuwQcmonmidZ3JDYBn1chohslXL79pVFWzh%2F2L5JrRG8fahYKlIWCHWUMoiYJtl%2F3wygOYFunabDBYTWmtdhJTlVy%2BAjfxPPP4YmpW3dTzYID0jTo%2BQEl88Ix1sFlqytAOacfe%2Bk1lgD29LxXiEMiFKZUIF%2By3L%2F6YYjSpu134w2EaouEKPsNH4rlwWgI0JEzcE0Qjfl19NAVsJFR6JGCF5LovAzrId2%2B8LoD6BBT8OGQy2E2rCUaJXebhGALZC9z%2FwUhC18%2F0wc1UWsBFJ1klEOymWvKgCe%2F7CW999xxdAusCI0R99PMgP7IiJczFJY3qtEiLw8tOckw88uKs40FR4xXuWzvzjVD%2BwJnqTlVUKaYpS5Ul6ReCsdOeOmVveKgq%2Bh%2F%2FvveCiu7Zvmz2rFDhRq2tqw7GoJJP%2FJ0vRWFmyplqF1NBv0KmTJz7fumX1d889%2B8yTzzz73Ldfbtm6bdS48RNygDcx3Xu1NqPMUxdLS7uWlhar85RlJK9600VIOf6c0mWDpj391NNtBg0uyfFDSlEF8T%2Ft3eFyqjwTwPGNiKq9eq%2BtqiCeoxZVEcRW4mK%2Bvc%2F5%2Bk7bBSDZOJPfFfwHWkEMG%2B%2BfXChwHMdxHMdxHMdxHMdxHMdxHIeV4yiR%2FyOUS6tHfBxP88Vse74N%2F7mdt7PF%2FHT8EFakbYg0XupvMZ%2Fddt%2F%2Ber27zebFX%2BXSfpQfD%2BMLsX7iMp4fc460%2BfgiqbSD1jSCGH1WXAV1v32OhOm0O1Yh9aUR0sNUYnVyekjBEH9eL%2B2mIY2gilmGdWXvhTKQNnpvkDYrBJgjNluJTchtIDSnBY3TNgLMUEGvbL4Qvhco3WkPbOS%2FNAEGjMay1bsEMjyCJsewXVo5HoFuH5P2b7OsJh9a0har1mn3tmkElXTzPlU%2FUd2nDfnTKH53b%2FTN%2FI7TZp2l7X3QZNPlO6X9jb1pJwUa5J8SuyQ%2Fc2vTFjl0zu%2F8vfrH2O8obdx52jaFjmmZ7HAdQQeOVw1pwxF0StNskd0GWtvsUIfsBB3SNt3m%2FgUtva1402jEfCXm%2BUBLjWkHBZ2gJ3zxHcG51JhWdnQENc%2BYk3O2vz%2F6CEJrBqYcyi9o6E172hJaMjJn876BRjYG0k7QiqFJr7tRo7SdgbSsgBaMzRoe%2BlCbfzWTlkILxqZdj%2FPaaWM0Y%2BtBUwbnrT8%2BoaZPY2kLBc2Ynfi%2FgVo2BtNO0JDRPSf6PtTgm0y7pNCI2KNJewWVqZnZNAH1md93J4HKEsNpb1Abw85P%2FQ%2Bo6GNoOs2H%2BgZo2gQqWqBpA6iNY%2Fe7EVRyXNm%2FMR%2FP%2FotjBRWokCFtK6AOrh1AA6ggkBxpG6hFnImzzLUFKNv2uOec5Q9Qw3kO7N%2BgmT7LjB81asuU1hNQXSyRhyyAULClxVDdHh%2FI4YEzIMzY0vZQWZQhlyyFX6V8aasIqnoinwP86oB8nlBRfkM%2Btxx%2BIaZWpNGf03zkCH4xYk0r7PiuTljALz6R0wQqya%2FI6ZrTHy78acS%2FCSd5hB8dmdNGdlyDCQfiGmz7dVhtkddWWZvWU0D72CGv3Qf84O%2BFP40Wl8irLOAHBXtaDLQDoq0fgnPk9gTaHrnt4Qcz5Bba8T2OcBPwLUGnWXAnmGbILfP5Lm%2BELLX3WSp9v3q0IC0GytcDuT1O8K2TBWlLq58kEJfhOfJbACVEfhN7z20IlDPy2xM3WIymQBkiv57i%2ByZM6ANlh%2FymAr6hpshvB5QVoqW3q%2BKK%2FO5AkchvmMM38iHyk0ApkV%2Ffg294feRXugPoDiCr0n0GtiPdVbid%2BwvfB4op8svcN5F2%2Bu67cDvTV34aM0F%2B4Ss%2FDzzYcW4JSwse%2Byav%2FETa4t9ERhakBS%2F9q5wFaRH%2F6kDaNbf3d2EPXuAyvLd30UQItCdyO9i7bOf5EquzYnvTgpdeH8iflvlAUz3kZf8KVcs%2FBJ%2F2rl1cQxWFvUvhR8xpBVThDfnvAu28SR16UMkEOS3sfdQxgGri0tp%2Fk0Lac39l6T%2FKLbd2AfLVg4rW9t7rPy24BtOiFXJZRda%2BTL%2F6A1Wp0N7BBHu2tFBBZUGJPGRs7QPfMrB9cBExnIV7pM1ZQA0nrvFA9qYlUEc%2B5R9QZddYrymdxn%2Bey5O9g%2BUSqEf0rB3SJ7YMaT0BNRUMEywLa9NkDHWpdzRtYO9413cFtaUXw6NyL76VA4abj%2BL%2BMjys%2BcvaEdePJTQhxmhSKGqkhWjSWEAj0cXagfWpybRdBA0lpbktExJrN5oo36ApNUFTJqpm2gJNGShozOuhGT3P2rSzBy1EfSMbF%2FVTqC01lBZBK%2FHK2q2zisxA2iqGlhKpf%2FO2pGHaXXuafOPfGZKMLJeMO0MSaXNoTz1LvRtYPhXftqlE2lpBB9SayOQ6fgDqqTXtk07jzKSPH00dpL60tbJ9h%2Bb2%2BzODWt7tSKM34tZhlUBrSaYn7Q06Ffc1bKXfj6EDhQ1ptOhcP5OI7EXQibTXedo5gs55gxK7VE68ztImstu0gQcaqGSH%2BOjqHF8S1WXapcO03ZsCPaLxA7tRhhF0Kg1L7MZjHIE24os%2B05X%2B%2FL6ErWm7pQCd0ndJdxKN93cfNPDf763T5CwFzVTcK%2BnOXxrLXqE0pRXbtmmxAv3EaUp3%2Ftg4PQlL0x7TRIAZeXIusYnyfMo1p50apyU5mCOCcIV1rcJA2J9mivqzvpZYXXldR8pQWlQ77Y8CBnk8GFYLlcNBnJtNmwwlVlH%2Bl%2BYBG69Yn7Py98Ksty48lrQemXY2kEZRfvAMr5l84P97yOwaPgNfWZq2NpZG86JgPhlP%2B9ldlo9S3rP%2BdDyZB5FnRdqygzTHcRzHcRzHcRzHcRzHcZz%2FAbyvLkVmYcs9AAAAAElFTkSuQmCC&link=https%3A%2F%2Fwww.together.ai"" style=""display: inline-block; vertical-align: middle;""/>
         
     | 
| 179 | 
         
            +
              </a>
         
     | 
| 180 | 
         
            +
            </div>
         
     | 
| 181 | 
         
            +
            </div>
         
     | 
| 182 | 
         
            +
            </div>
         
     | 
| 183 | 
         
            +
             
     | 
| 184 | 
         
            +
            ## DeepCoder Overview
         
     | 
| 185 | 
         
            +
            DeepCoder-14B-Preview is a code reasoning LLM fine-tuned from DeepSeek-R1-Distilled-Qwen-14B using distributed reinforcement learning (RL) to scale up to long context lengths. The model achieves 60.6% Pass@1 accuracy on LiveCodeBench v5 (8/1/24-2/1/25), representing a 8% improvement over the base model (53%) and achieving similar performance to OpenAI's o3-mini with just 14B parameters.
         
     | 
| 186 | 
         
            +
             
     | 
| 187 | 
         
            +
            <div style=""margin: 0 auto;"">
         
     | 
| 188 | 
         
            +
              <img src=""https://cdn-uploads.huggingface.co/production/uploads/654037be97949fd2304aab7f/r3-vzkItOCrMf1qldW0Mj.png"" style=""width: 100%;"" />
         
     | 
| 189 | 
         
            +
            </div>
         
     | 
| 190 | 
         
            +
             
     | 
| 191 | 
         
            +
            ## Data
         
     | 
| 192 | 
         
            +
            Our training dataset consists of approximately 24K unique problem-tests pairs compiled from:
         
     | 
| 193 | 
         
            +
            - Taco-Verified
         
     | 
| 194 | 
         
            +
            - PrimeIntellect SYNTHETIC-1
         
     | 
| 195 | 
         
            +
            - LiveCodeBench v5 (5/1/23-7/31/24)
         
     | 
| 196 | 
         
            +
             
     | 
| 197 | 
         
            +
            ## Training Recipe
         
     | 
| 198 | 
         
            +
             
     | 
| 199 | 
         
            +
            Our training recipe relies on an improved version of GRPO (GRPO+) and iterative context lengthening, introduced in DeepScaleR.
         
     | 
| 200 | 
         
            +
             
     | 
| 201 | 
         
            +
            ### GRPO+
         
     | 
| 202 | 
         
            +
             
     | 
| 203 | 
         
            +
            We enhance the original GRPO algorithm with insights from DAPO to enable more stable training:
         
     | 
| 204 | 
         
            +
             
     | 
| 205 | 
         
            +
            - **Offline Difficulty Filtering:** DAPO employs online dynamic sampling, discarding both entirely correct and entirely incorrect samples on the fly. While this helps maintain a more stable effective batch size, it introduces significant runtime overhead due to rejection sampling. Instead, we perform offline difficulty filtering on a subset of coding problems to ensure the training dataset remains within a suitable difficulty range.
         
     | 
| 206 | 
         
            +
            - **No Entropy Loss:** We observed that including an entropy loss term often led to instability, with entropy growing exponentially and ultimately collapsing training. To mitigate this, we eliminate the entropy loss entirely.
         
     | 
| 207 | 
         
            +
            - **No KL Loss:** Eliminating KL loss prevents the LLM from staying within trust region of the original SFT model. This removal also obviates the need to compute log probabilities for the reference policy, thereby accelerating training.
         
     | 
| 208 | 
         
            +
            - **Overlong Filtering** **(from DAPO):** To preserve long-context reasoning, we mask the loss for truncated sequences. This technique enables DeepCoder to generalize to 64K-context inference despite being trained with a 32K context.
         
     | 
| 209 | 
         
            +
            - **Clip High (from DAPO):** By increasing the upper bound in GRPO/PPO’s surrogate loss, we encourage more exploration and more stable entropy.
         
     | 
| 210 | 
         
            +
             
     | 
| 211 | 
         
            +
            ### Iterative Context Lengthening
         
     | 
| 212 | 
         
            +
             
     | 
| 213 | 
         
            +
            Our original `Deepscaler-1.5B-Preview` scaled long context training from 8K→16K→24K, achieving 33→38→43% on AIME respectively. Similarly, `Deepcoder-14B-Preview` is trained on 16K→32K, achieving 54→58% on LiveCodeBench (v5). `DeepCoder-14B-Preview` successfully generalizes to longer contexts when evaluated at 64K context, reaching 60.6%. 
         
     | 
| 214 | 
         
            +
             
     | 
| 215 | 
         
            +
            DeepCoder generalizes better to long contexts than the base distilled model, due to DAPO's overlong filtering. However, it's longer responses are often truncated when the max length is capped at 16K, which can lower its scores.
         
     | 
| 216 | 
         
            +
             
     | 
| 217 | 
         
            +
            | **Model** | **16K** | **32K** | **64K** |
         
     | 
| 218 | 
         
            +
            | --- | --- | --- | --- |
         
     | 
| 219 | 
         
            +
            | **DeepCoder-14B-Preview** | 45.6 | 57.9 | 60.6 |
         
     | 
| 220 | 
         
            +
            | **DeepSeek-R1-Distill-Qwen-14B** | 50.2 | 53.0 | 53.0 |
         
     | 
| 221 | 
         
            +
             
     | 
| 222 | 
         
            +
            A more detailed description of the training recipe can be found in our [blog post](https://pretty-radio-b75.notion.site/DeepCoder-A-Fully-Open-Source-14B-Coder-at-O3-mini-Level-1cf81902c14680b3bee5eb349a512a51).
         
     | 
| 223 | 
         
            +
             
     | 
| 224 | 
         
            +
            ## Evaluation
         
     | 
| 225 | 
         
            +
             
     | 
| 226 | 
         
            +
            We evaluate `Deepcoder-14B-Preview` on various coding benchmarks, including LiveCodeBench (LCBv5), Codeforces, and HumanEval+. 
         
     | 
| 227 | 
         
            +
             
     | 
| 228 | 
         
            +
            | **Model** | LCB (v5)(8/1/24-2/1/25) | Codeforces Rating | Codeforces Percentile | HumanEval+ |
         
     | 
| 229 | 
         
            +
            | --- | --- | --- | --- | --- |
         
     | 
| 230 | 
         
            +
            | **DeepCoder-14B-Preview (ours)** | ***60.6*** | ***1936*** | ***95.3*** | ***92.6*** |
         
     | 
| 231 | 
         
            +
            | **DeepSeek-R1-Distill-Qwen-14B** | 53.0 | 1791 | 92.7 | 92.0 |
         
     | 
| 232 | 
         
            +
            | **O1-2024-12-17 (Low)** | 59.5 | **1991** | **96.1** | 90.8 |
         
     | 
| 233 | 
         
            +
            | **O3-Mini-2025-1-31 (Low)** | **60.9** | 1918 | 94.9 | 92.6 |
         
     | 
| 234 | 
         
            +
            | **O1-Preview** | 42.7 | 1658 | 88.5 | 89 |
         
     | 
| 235 | 
         
            +
            | **Deepseek-R1** | 62.8 | 1948 | 95.4 | 92.6 |
         
     | 
| 236 | 
         
            +
            | **Llama-4-Behemoth** | 49.4 | - | - | - |
         
     | 
| 237 | 
         
            +
             
     | 
| 238 | 
         
            +
            ## Serving DeepCoder
         
     | 
| 239 | 
         
            +
            Our model can be served using popular high-performance inference systems:
         
     | 
| 240 | 
         
            +
            - vLLM
         
     | 
| 241 | 
         
            +
            - Hugging Face Text Generation Inference (TGI)
         
     | 
| 242 | 
         
            +
            - SGLang
         
     | 
| 243 | 
         
            +
            - TensorRT-LLM
         
     | 
| 244 | 
         
            +
             
     | 
| 245 | 
         
            +
            All these systems support the OpenAI Chat Completions API format.
         
     | 
| 246 | 
         
            +
             
     | 
| 247 | 
         
            +
            ### Usage Recommendations
         
     | 
| 248 | 
         
            +
            Our usage recommendations are similar to those of R1 and R1 Distill series:
         
     | 
| 249 | 
         
            +
            1. Avoid adding a system prompt; all instructions should be contained within the user prompt.
         
     | 
| 250 | 
         
            +
            2. `temperature = 0.6`
         
     | 
| 251 | 
         
            +
            3. `top_p = 0.95`
         
     | 
| 252 | 
         
            +
            4. This model performs best with `max_tokens` set to at least `64000` 
         
     | 
| 253 | 
         
            +
             
     | 
| 254 | 
         
            +
            ## EpistemeAI Training script
         
     | 
| 255 | 
         
            +
            [Fine tune DeepCoder with unsloth](https://colab.research.google.com/drive/1If_NwF2aNvQrG7lyCClhJIFVbdHhMN8c?usp=sharing)
         
     | 
| 256 | 
         
            +
             
     | 
| 257 | 
         
            +
             
     | 
| 258 | 
         
            +
            ## License
         
     | 
| 259 | 
         
            +
            This project is released under the MIT License, reflecting our commitment to open and accessible AI development.
         
     | 
| 260 | 
         
            +
            We believe in democratizing AI technology by making our work freely available for anyone to use, modify, and build upon.
         
     | 
| 261 | 
         
            +
            This permissive license ensures that researchers, developers, and enthusiasts worldwide can leverage and extend our work without restrictions, fostering innovation and collaboration in the AI community.
         
     | 
| 262 | 
         
            +
             
     | 
| 263 | 
         
            +
            ## Acknowledgement
         
     | 
| 264 | 
         
            +
            - Our training experiments are powered by our heavily modified fork of [Verl](https://github.com/agentica-project/verl), an open-source post-training library.
         
     | 
| 265 | 
         
            +
            - Our model is trained on top of [`DeepSeek-R1-Distill-Qwen-14B`](https://huggingface.co/deepseek-ai/DeepSeek-R1-Distill-Qwen-14B).
         
     | 
| 266 | 
         
            +
            - Our work is done as part of  [Berkeley Sky Computing Lab](https://skycomputing.berkeley.edu/) and [Berkeley AI Research](https://bair.berkeley.edu/).
         
     | 
| 267 | 
         
            +
             
     | 
| 268 | 
         
            +
            ## Citation 
         
     | 
| 269 | 
         
            +
            ```bibtex
         
     | 
| 270 | 
         
            +
            @misc{deepcoder2025,
         
     | 
| 271 | 
         
            +
              title={DeepCoder: A Fully Open-Source 14B Coder at O3-mini Level},
         
     | 
| 272 | 
         
            +
              author={Michael Luo, Sijun Tan, Roy Huang, Ameen Patel, Alpay Ariyak, Qingyang Wu, Xiaoxiang Shi, Rachel Xin, Colin Cai, Maurice Weber, Ce Zhang, Li Erran Li, Raluca Ada Popa, Ion Stoica},
         
     | 
| 273 | 
         
            +
              howpublished={\url{https://pretty-radio-b75.notion.site/DeepCoder-A-Fully-Open-Source-14B-Coder-at-O3-mini-Level-1cf81902c14680b3bee5eb349a512a51}},
         
     | 
| 274 | 
         
            +
              note={Notion Blog},
         
     | 
| 275 | 
         
            +
              year={2025}
         
     | 
| 276 | 
         
            +
            }
         
     | 
| 277 | 
         
            +
            ```
         
     | 
| 278 | 
         
            +
            # Uploaded  model
         
     | 
| 279 | 
         
            +
             
     | 
| 280 | 
         
            +
            - **Developed by:** EpistemeAI
         
     | 
| 281 | 
         
            +
            - **License:** apache-2.0
         
     | 
| 282 | 
         
            +
            - **Finetuned from model :** agentica-org/DeepCoder-14B-Preview
         
     | 
| 283 | 
         
            +
             
     | 
| 284 | 
         
            +
            This qwen2 model was trained 2x faster with [Unsloth](https://github.com/unslothai/unsloth) and Huggingface's TRL library.
         
     | 
| 285 | 
         
            +
             
     | 
| 286 | 
         
            +
            [<img src=""https://raw.githubusercontent.com/unslothai/unsloth/main/images/unsloth%20made%20with%20love.png"" width=""200""/>](https://github.com/unslothai/unsloth)
         
     | 
| 287 | 
         
            +
             
     | 
| 288 | 
         
            +
            ","{""id"": ""EpistemeAI/DeepCoder-14B-Preview-safety-alignment-unsloth"", ""author"": ""EpistemeAI"", ""sha"": ""5a7aeb6c1af19f92861f9a3505013ba176361618"", ""last_modified"": ""2025-04-15 14:50:09+00:00"", ""created_at"": ""2025-04-09 22:09:05+00:00"", ""private"": false, ""gated"": ""auto"", ""disabled"": false, ""downloads"": 9, ""downloads_all_time"": null, ""likes"": 1, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""qwen2"", ""text-generation"", ""text-generation-inference"", ""unsloth"", ""trl"", ""conversational"", ""en"", ""base_model:agentica-org/DeepCoder-14B-Preview"", ""base_model:finetune:agentica-org/DeepCoder-14B-Preview"", ""license:mit"", ""autotrain_compatible"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: agentica-org/DeepCoder-14B-Preview\nlanguage:\n- en\nlicense: mit\ntags:\n- text-generation-inference\n- transformers\n- unsloth\n- qwen2\n- trl"", ""widget_data"": [{""text"": ""Hi, what can you help me with?""}, {""text"": ""What is 84 * 3 / 2?""}, {""text"": ""Tell me an interesting fact about the universe!""}, {""text"": ""Explain quantum computing in simple terms.""}], ""model_index"": null, ""config"": {""architectures"": [""Qwen2ForCausalLM""], ""model_type"": ""qwen2"", ""tokenizer_config"": {""bos_token"": ""<\uff5cbegin\u2581of\u2581sentence\uff5c>"", ""chat_template"": ""{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<\uff5cUser\uff5c>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<\uff5cAssistant\uff5c><\uff5ctool\u2581calls\u2581begin\uff5c><\uff5ctool\u2581call\u2581begin\uff5c>' + tool['type'] + '<\uff5ctool\u2581sep\uff5c>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<\uff5ctool\u2581call\u2581end\uff5c>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<\uff5ctool\u2581call\u2581begin\uff5c>' + tool['type'] + '<\uff5ctool\u2581sep\uff5c>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<\uff5ctool\u2581call\u2581end\uff5c>'}}{{'<\uff5ctool\u2581calls\u2581end\uff5c><\uff5cend\u2581of\u2581sentence\uff5c>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<\uff5ctool\u2581outputs\u2581end\uff5c>' + message['content'] + '<\uff5cend\u2581of\u2581sentence\uff5c>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '</think>' in content %}{% set content = content.split('</think>')[-1] %}{% endif %}{{'<\uff5cAssistant\uff5c>' + content + '<\uff5cend\u2581of\u2581sentence\uff5c>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<\uff5ctool\u2581outputs\u2581begin\uff5c><\uff5ctool\u2581output\u2581begin\uff5c>' + message['content'] + '<\uff5ctool\u2581output\u2581end\uff5c>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<\uff5ctool\u2581output\u2581begin\uff5c>' + message['content'] + '<\uff5ctool\u2581output\u2581end\uff5c>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<\uff5ctool\u2581outputs\u2581end\uff5c>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<\uff5cAssistant\uff5c><think>\\n'}}{% endif %}"", ""eos_token"": ""<\uff5cend\u2581of\u2581sentence\uff5c>"", ""pad_token"": ""<|vision_pad|>"", ""unk_token"": null, ""use_default_system_prompt"": false}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00001-of-00006.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00002-of-00006.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00003-of-00006.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00004-of-00006.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00005-of-00006.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00006-of-00006.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": {""parameters"": {""BF16"": 14770033664}, ""total"": 14770033664}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-04-15 14:50:09+00:00"", ""cardData"": ""base_model: agentica-org/DeepCoder-14B-Preview\nlanguage:\n- en\nlicense: mit\ntags:\n- text-generation-inference\n- transformers\n- unsloth\n- qwen2\n- trl"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""_id"": ""67f6f001f018b75ed18178f6"", ""modelId"": ""EpistemeAI/DeepCoder-14B-Preview-safety-alignment-unsloth"", ""usedStorage"": 29551556738}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=EpistemeAI/DeepCoder-14B-Preview-safety-alignment-unsloth&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BEpistemeAI%2FDeepCoder-14B-Preview-safety-alignment-unsloth%5D(%2FEpistemeAI%2FDeepCoder-14B-Preview-safety-alignment-unsloth)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
         
     | 
| 289 | 
         
            +
            https://huggingface.co/EpistemeAI/SAI-DeepCoder-14B-Preview-v1.0,N/A,N/A,1,,0,,0,,0,,0,,0
         
     | 
| 290 | 
         
            +
            mlx-community/DeepCoder-14B-Preview-bf16,"---
         
     | 
| 291 | 
         
            +
            license: mit
         
     | 
| 292 | 
         
            +
            library_name: mlx
         
     | 
| 293 | 
         
            +
            datasets:
         
     | 
| 294 | 
         
            +
            - PrimeIntellect/verifiable-coding-problems
         
     | 
| 295 | 
         
            +
            - likaixin/TACO-verified
         
     | 
| 296 | 
         
            +
            - livecodebench/code_generation_lite
         
     | 
| 297 | 
         
            +
            language:
         
     | 
| 298 | 
         
            +
            - en
         
     | 
| 299 | 
         
            +
            base_model: agentica-org/DeepCoder-14B-Preview
         
     | 
| 300 | 
         
            +
            pipeline_tag: text-generation
         
     | 
| 301 | 
         
            +
            tags:
         
     | 
| 302 | 
         
            +
            - mlx
         
     | 
| 303 | 
         
            +
            ---
         
     | 
| 304 | 
         
            +
             
     | 
| 305 | 
         
            +
            # mlx-community/DeepCoder-14B-Preview-bf16
         
     | 
| 306 | 
         
            +
             
     | 
| 307 | 
         
            +
            This model [mlx-community/DeepCoder-14B-Preview-bf16](https://huggingface.co/mlx-community/DeepCoder-14B-Preview-bf16) was
         
     | 
| 308 | 
         
            +
            converted to MLX format from [agentica-org/DeepCoder-14B-Preview](https://huggingface.co/agentica-org/DeepCoder-14B-Preview)
         
     | 
| 309 | 
         
            +
            using mlx-lm version **0.22.3**.
         
     | 
| 310 | 
         
            +
             
     | 
| 311 | 
         
            +
            ## Use with mlx
         
     | 
| 312 | 
         
            +
             
     | 
| 313 | 
         
            +
            ```bash
         
     | 
| 314 | 
         
            +
            pip install mlx-lm
         
     | 
| 315 | 
         
            +
            ```
         
     | 
| 316 | 
         
            +
             
     | 
| 317 | 
         
            +
            ```python
         
     | 
| 318 | 
         
            +
            from mlx_lm import load, generate
         
     | 
| 319 | 
         
            +
             
     | 
| 320 | 
         
            +
            model, tokenizer = load(""mlx-community/DeepCoder-14B-Preview-bf16"")
         
     | 
| 321 | 
         
            +
             
     | 
| 322 | 
         
            +
            prompt = ""hello""
         
     | 
| 323 | 
         
            +
             
     | 
| 324 | 
         
            +
            if tokenizer.chat_template is not None:
         
     | 
| 325 | 
         
            +
                messages = [{""role"": ""user"", ""content"": prompt}]
         
     | 
| 326 | 
         
            +
                prompt = tokenizer.apply_chat_template(
         
     | 
| 327 | 
         
            +
                    messages, add_generation_prompt=True
         
     | 
| 328 | 
         
            +
                )
         
     | 
| 329 | 
         
            +
             
     | 
| 330 | 
         
            +
            response = generate(model, tokenizer, prompt=prompt, verbose=True)
         
     | 
| 331 | 
         
            +
            ```
         
     | 
| 332 | 
         
            +
            ","{""id"": ""mlx-community/DeepCoder-14B-Preview-bf16"", ""author"": ""mlx-community"", ""sha"": ""b36e3085dca80389d5f40f81cbf1341b34864fe1"", ""last_modified"": ""2025-04-08 21:51:04+00:00"", ""created_at"": ""2025-04-08 20:49:38+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 159, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""mlx"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""mlx"", ""safetensors"", ""qwen2"", ""text-generation"", ""conversational"", ""en"", ""dataset:PrimeIntellect/verifiable-coding-problems"", ""dataset:likaixin/TACO-verified"", ""dataset:livecodebench/code_generation_lite"", ""base_model:agentica-org/DeepCoder-14B-Preview"", ""base_model:finetune:agentica-org/DeepCoder-14B-Preview"", ""license:mit"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: agentica-org/DeepCoder-14B-Preview\ndatasets:\n- PrimeIntellect/verifiable-coding-problems\n- likaixin/TACO-verified\n- livecodebench/code_generation_lite\nlanguage:\n- en\nlibrary_name: mlx\nlicense: mit\npipeline_tag: text-generation\ntags:\n- mlx"", ""widget_data"": [{""text"": ""Hi, what can you help me with?""}, {""text"": ""What is 84 * 3 / 2?""}, {""text"": ""Tell me an interesting fact about the universe!""}, {""text"": ""Explain quantum computing in simple terms.""}], ""model_index"": null, ""config"": {""architectures"": [""Qwen2ForCausalLM""], ""model_type"": ""qwen2"", ""tokenizer_config"": {""bos_token"": ""<\uff5cbegin\u2581of\u2581sentence\uff5c>"", ""chat_template"": ""{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<\uff5cUser\uff5c>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<\uff5cAssistant\uff5c><\uff5ctool\u2581calls\u2581begin\uff5c><\uff5ctool\u2581call\u2581begin\uff5c>' + tool['type'] + '<\uff5ctool\u2581sep\uff5c>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<\uff5ctool\u2581call\u2581end\uff5c>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<\uff5ctool\u2581call\u2581begin\uff5c>' + tool['type'] + '<\uff5ctool\u2581sep\uff5c>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<\uff5ctool\u2581call\u2581end\uff5c>'}}{{'<\uff5ctool\u2581calls\u2581end\uff5c><\uff5cend\u2581of\u2581sentence\uff5c>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<\uff5ctool\u2581outputs\u2581end\uff5c>' + message['content'] + '<\uff5cend\u2581of\u2581sentence\uff5c>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '</think>' in content %}{% set content = content.split('</think>')[-1] %}{% endif %}{{'<\uff5cAssistant\uff5c>' + content + '<\uff5cend\u2581of\u2581sentence\uff5c>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<\uff5ctool\u2581outputs\u2581begin\uff5c><\uff5ctool\u2581output\u2581begin\uff5c>' + message['content'] + '<\uff5ctool\u2581output\u2581end\uff5c>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<\uff5ctool\u2581output\u2581begin\uff5c>' + message['content'] + '<\uff5ctool\u2581output\u2581end\uff5c>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<\uff5ctool\u2581outputs\u2581end\uff5c>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<\uff5cAssistant\uff5c><think>\\n'}}{% endif %}"", ""eos_token"": ""<\uff5cend\u2581of\u2581sentence\uff5c>"", ""pad_token"": ""<\uff5cend\u2581of\u2581sentence\uff5c>"", ""unk_token"": null, ""use_default_system_prompt"": false}}, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00001-of-00006.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00002-of-00006.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00003-of-00006.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00004-of-00006.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00005-of-00006.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00006-of-00006.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": {""parameters"": {""BF16"": 14770033664}, ""total"": 14770033664}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-04-08 21:51:04+00:00"", ""cardData"": ""base_model: agentica-org/DeepCoder-14B-Preview\ndatasets:\n- PrimeIntellect/verifiable-coding-problems\n- likaixin/TACO-verified\n- livecodebench/code_generation_lite\nlanguage:\n- en\nlibrary_name: mlx\nlicense: mit\npipeline_tag: text-generation\ntags:\n- mlx"", ""transformersInfo"": null, ""_id"": ""67f58be237c0eed4d3219721"", ""modelId"": ""mlx-community/DeepCoder-14B-Preview-bf16"", ""usedStorage"": 29551556644}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=mlx-community/DeepCoder-14B-Preview-bf16&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bmlx-community%2FDeepCoder-14B-Preview-bf16%5D(%2Fmlx-community%2FDeepCoder-14B-Preview-bf16)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
         
     | 
| 333 | 
         
            +
            Gapeleon/DeepCoder-14B-Preview-int4-awq-ov,"---
         
     | 
| 334 | 
         
            +
            license: mit
         
     | 
| 335 | 
         
            +
            base_model:
         
     | 
| 336 | 
         
            +
            - agentica-org/DeepCoder-14B-Preview
         
     | 
| 337 | 
         
            +
            ---
         
     | 
| 338 | 
         
            +
             
     | 
| 339 | 
         
            +
            # OpenVINO quant of [agentica-org/DeepCoder-14B-Preview](https://huggingface.co/agentica-org/DeepCoder-14B-Preview-int4-awq-ov)
         
     | 
| 340 | 
         
            +
             
     | 
| 341 | 
         
            +
            - Requires 12GB of VRAM (eg. Intel Arc A770 / B580).
         
     | 
| 342 | 
         
            +
            - Won't fit on 8GB A750
         
     | 
| 343 | 
         
            +
             
     | 
| 344 | 
         
            +
            # Performance on an A770 with [OpenArc](https://github.com/SearchSavior/OpenArc)
         
     | 
| 345 | 
         
            +
             
     | 
| 346 | 
         
            +
            ```
         
     | 
| 347 | 
         
            +
            === Streaming Performance ===
         
     | 
| 348 | 
         
            +
            Total generation time: 65.078 seconds
         
     | 
| 349 | 
         
            +
            Prompt evaluation: 1376 tokens in 0.841 seconds (1636.58 T/s)
         
     | 
| 350 | 
         
            +
            Response generation: 982 tokens in (15.09 T/s)
         
     | 
| 351 | 
         
            +
            ```","{""id"": ""Gapeleon/DeepCoder-14B-Preview-int4-awq-ov"", ""author"": ""Gapeleon"", ""sha"": ""d47bf0f554c171f5ac75ec9b4f3ed9dee4b6e860"", ""last_modified"": ""2025-04-23 01:55:29+00:00"", ""created_at"": ""2025-04-09 11:06:09+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 12, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": null, ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""openvino"", ""qwen2"", ""base_model:agentica-org/DeepCoder-14B-Preview"", ""base_model:finetune:agentica-org/DeepCoder-14B-Preview"", ""license:mit"", ""region:us""], ""pipeline_tag"": null, ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- agentica-org/DeepCoder-14B-Preview\nlicense: mit"", ""widget_data"": null, ""model_index"": null, ""config"": {""architectures"": [""Qwen2ForCausalLM""], ""model_type"": ""qwen2"", ""tokenizer_config"": {""bos_token"": ""<\uff5cbegin\u2581of\u2581sentence\uff5c>"", ""chat_template"": ""{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<\uff5cUser\uff5c>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<\uff5cAssistant\uff5c><\uff5ctool\u2581calls\u2581begin\uff5c><\uff5ctool\u2581call\u2581begin\uff5c>' + tool['type'] + '<\uff5ctool\u2581sep\uff5c>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<\uff5ctool\u2581call\u2581end\uff5c>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<\uff5ctool\u2581call\u2581begin\uff5c>' + tool['type'] + '<\uff5ctool\u2581sep\uff5c>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<\uff5ctool\u2581call\u2581end\uff5c>'}}{{'<\uff5ctool\u2581calls\u2581end\uff5c><\uff5cend\u2581of\u2581sentence\uff5c>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<\uff5ctool\u2581outputs\u2581end\uff5c>' + message['content'] + '<\uff5cend\u2581of\u2581sentence\uff5c>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '</think>' in content %}{% set content = content.split('</think>')[-1] %}{% endif %}{{'<\uff5cAssistant\uff5c>' + content + '<\uff5cend\u2581of\u2581sentence\uff5c>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<\uff5ctool\u2581outputs\u2581begin\uff5c><\uff5ctool\u2581output\u2581begin\uff5c>' + message['content'] + '<\uff5ctool\u2581output\u2581end\uff5c>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<\uff5ctool\u2581output\u2581begin\uff5c>' + message['content'] + '<\uff5ctool\u2581output\u2581end\uff5c>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<\uff5ctool\u2581outputs\u2581end\uff5c>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<\uff5cAssistant\uff5c><think>\\n'}}{% endif %}"", ""eos_token"": ""<\uff5cend\u2581of\u2581sentence\uff5c>"", ""pad_token"": ""<\uff5cend\u2581of\u2581sentence\uff5c>"", ""unk_token"": null, ""use_default_system_prompt"": false}}, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='openvino_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='openvino_detokenizer.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='openvino_detokenizer.xml', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='openvino_model.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='openvino_model.xml', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='openvino_tokenizer.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='openvino_tokenizer.xml', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-04-23 01:55:29+00:00"", ""cardData"": ""base_model:\n- agentica-org/DeepCoder-14B-Preview\nlicense: mit"", ""transformersInfo"": null, ""_id"": ""67f654a18fd7052199edb79a"", ""modelId"": ""Gapeleon/DeepCoder-14B-Preview-int4-awq-ov"", ""usedStorage"": 16938244268}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=Gapeleon/DeepCoder-14B-Preview-int4-awq-ov&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BGapeleon%2FDeepCoder-14B-Preview-int4-awq-ov%5D(%2FGapeleon%2FDeepCoder-14B-Preview-int4-awq-ov)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
         
     | 
| 352 | 
         
            +
            secmlr/DS-Noisy_DS-Clean_QWQ-Noisy_QWQ-Clean_DeepCoder-14B-Preview_full_sft_1e-5,"---
         
     | 
| 353 | 
         
            +
            library_name: transformers
         
     | 
| 354 | 
         
            +
            license: mit
         
     | 
| 355 | 
         
            +
            base_model: agentica-org/DeepCoder-14B-Preview
         
     | 
| 356 | 
         
            +
            tags:
         
     | 
| 357 | 
         
            +
            - llama-factory
         
     | 
| 358 | 
         
            +
            - full
         
     | 
| 359 | 
         
            +
            - generated_from_trainer
         
     | 
| 360 | 
         
            +
            model-index:
         
     | 
| 361 | 
         
            +
            - name: DS-Noisy_DS-Clean_QWQ-Noisy_QWQ-Clean_DeepCoder-14B-Preview_full_sft_1e-5
         
     | 
| 362 | 
         
            +
              results: []
         
     | 
| 363 | 
         
            +
            ---
         
     | 
| 364 | 
         
            +
             
     | 
| 365 | 
         
            +
            <!-- This model card has been generated automatically according to the information the Trainer had access to. You
         
     | 
| 366 | 
         
            +
            should probably proofread and complete it, then remove this comment. -->
         
     | 
| 367 | 
         
            +
             
     | 
| 368 | 
         
            +
            # DS-Noisy_DS-Clean_QWQ-Noisy_QWQ-Clean_DeepCoder-14B-Preview_full_sft_1e-5
         
     | 
| 369 | 
         
            +
             
     | 
| 370 | 
         
            +
            This model is a fine-tuned version of [agentica-org/DeepCoder-14B-Preview](https://huggingface.co/agentica-org/DeepCoder-14B-Preview) on the DS-Noisy, the DS-Clean, the QWQ-Noisy and the QWQ-Clean datasets.
         
     | 
| 371 | 
         
            +
             
     | 
| 372 | 
         
            +
            ## Model description
         
     | 
| 373 | 
         
            +
             
     | 
| 374 | 
         
            +
            More information needed
         
     | 
| 375 | 
         
            +
             
     | 
| 376 | 
         
            +
            ## Intended uses & limitations
         
     | 
| 377 | 
         
            +
             
     | 
| 378 | 
         
            +
            More information needed
         
     | 
| 379 | 
         
            +
             
     | 
| 380 | 
         
            +
            ## Training and evaluation data
         
     | 
| 381 | 
         
            +
             
     | 
| 382 | 
         
            +
            More information needed
         
     | 
| 383 | 
         
            +
             
     | 
| 384 | 
         
            +
            ## Training procedure
         
     | 
| 385 | 
         
            +
             
     | 
| 386 | 
         
            +
            ### Training hyperparameters
         
     | 
| 387 | 
         
            +
             
     | 
| 388 | 
         
            +
            The following hyperparameters were used during training:
         
     | 
| 389 | 
         
            +
            - learning_rate: 1e-05
         
     | 
| 390 | 
         
            +
            - train_batch_size: 1
         
     | 
| 391 | 
         
            +
            - eval_batch_size: 8
         
     | 
| 392 | 
         
            +
            - seed: 42
         
     | 
| 393 | 
         
            +
            - distributed_type: multi-GPU
         
     | 
| 394 | 
         
            +
            - num_devices: 4
         
     | 
| 395 | 
         
            +
            - gradient_accumulation_steps: 12
         
     | 
| 396 | 
         
            +
            - total_train_batch_size: 48
         
     | 
| 397 | 
         
            +
            - total_eval_batch_size: 32
         
     | 
| 398 | 
         
            +
            - optimizer: Use OptimizerNames.ADAMW_TORCH with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
         
     | 
| 399 | 
         
            +
            - lr_scheduler_type: cosine
         
     | 
| 400 | 
         
            +
            - lr_scheduler_warmup_ratio: 0.1
         
     | 
| 401 | 
         
            +
            - num_epochs: 1.0
         
     | 
| 402 | 
         
            +
             
     | 
| 403 | 
         
            +
            ### Training results
         
     | 
| 404 | 
         
            +
             
     | 
| 405 | 
         
            +
             
     | 
| 406 | 
         
            +
             
     | 
| 407 | 
         
            +
            ### Framework versions
         
     | 
| 408 | 
         
            +
             
     | 
| 409 | 
         
            +
            - Transformers 4.50.0
         
     | 
| 410 | 
         
            +
            - Pytorch 2.6.0+cu124
         
     | 
| 411 | 
         
            +
            - Datasets 3.1.0
         
     | 
| 412 | 
         
            +
            - Tokenizers 0.21.0
         
     | 
| 413 | 
         
            +
            ","{""id"": ""secmlr/DS-Noisy_DS-Clean_QWQ-Noisy_QWQ-Clean_DeepCoder-14B-Preview_full_sft_1e-5"", ""author"": ""secmlr"", ""sha"": ""0faf77cef5efe84efc0ad7057d697f5ef5d1b5eb"", ""last_modified"": ""2025-04-14 04:07:02+00:00"", ""created_at"": ""2025-04-14 00:59:03+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""qwen2"", ""text-generation"", ""llama-factory"", ""full"", ""generated_from_trainer"", ""conversational"", ""base_model:agentica-org/DeepCoder-14B-Preview"", ""base_model:finetune:agentica-org/DeepCoder-14B-Preview"", ""license:mit"", ""autotrain_compatible"", ""text-generation-inference"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: agentica-org/DeepCoder-14B-Preview\nlibrary_name: transformers\nlicense: mit\ntags:\n- llama-factory\n- full\n- generated_from_trainer\nmodel-index:\n- name: DS-Noisy_DS-Clean_QWQ-Noisy_QWQ-Clean_DeepCoder-14B-Preview_full_sft_1e-5\n  results: []"", ""widget_data"": [{""text"": ""Hi, what can you help me with?""}, {""text"": ""What is 84 * 3 / 2?""}, {""text"": ""Tell me an interesting fact about the universe!""}, {""text"": ""Explain quantum computing in simple terms.""}], ""model_index"": [{""name"": ""DS-Noisy_DS-Clean_QWQ-Noisy_QWQ-Clean_DeepCoder-14B-Preview_full_sft_1e-5"", ""results"": []}], ""config"": {""architectures"": [""Qwen2ForCausalLM""], ""model_type"": ""qwen2"", ""tokenizer_config"": {""bos_token"": ""<\uff5cbegin\u2581of\u2581sentence\uff5c>"", ""chat_template"": ""{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<\uff5cUser\uff5c>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<\uff5cAssistant\uff5c><\uff5ctool\u2581calls\u2581begin\uff5c><\uff5ctool\u2581call\u2581begin\uff5c>' + tool['type'] + '<\uff5ctool\u2581sep\uff5c>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<\uff5ctool\u2581call\u2581end\uff5c>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<\uff5ctool\u2581call\u2581begin\uff5c>' + tool['type'] + '<\uff5ctool\u2581sep\uff5c>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<\uff5ctool\u2581call\u2581end\uff5c>'}}{{'<\uff5ctool\u2581calls\u2581end\uff5c><\uff5cend\u2581of\u2581sentence\uff5c>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<\uff5ctool\u2581outputs\u2581end\uff5c>' + message['content'] + '<\uff5cend\u2581of\u2581sentence\uff5c>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '</think>' in content %}{% set content = content.split('</think>')[-1] %}{% endif %}{{'<\uff5cAssistant\uff5c>' + content + '<\uff5cend\u2581of\u2581sentence\uff5c>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<\uff5ctool\u2581outputs\u2581begin\uff5c><\uff5ctool\u2581output\u2581begin\uff5c>' + message['content'] + '<\uff5ctool\u2581output\u2581end\uff5c>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<\uff5ctool\u2581output\u2581begin\uff5c>' + message['content'] + '<\uff5ctool\u2581output\u2581end\uff5c>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<\uff5ctool\u2581outputs\u2581end\uff5c>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<\uff5cAssistant\uff5c><think>\\n'}}{% endif %}"", ""eos_token"": ""<\uff5cend\u2581of\u2581sentence\uff5c>"", ""pad_token"": ""<\uff5cend\u2581of\u2581sentence\uff5c>"", ""unk_token"": null, ""use_default_system_prompt"": false}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='all_results.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00001-of-00006.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00002-of-00006.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00003-of-00006.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00004-of-00006.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00005-of-00006.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00006-of-00006.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='train_results.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='trainer_log.jsonl', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='trainer_state.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='training_args.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='training_loss.png', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": {""parameters"": {""BF16"": 14770033664}, ""total"": 14770033664}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-04-14 04:07:02+00:00"", ""cardData"": ""base_model: agentica-org/DeepCoder-14B-Preview\nlibrary_name: transformers\nlicense: mit\ntags:\n- llama-factory\n- full\n- generated_from_trainer\nmodel-index:\n- name: DS-Noisy_DS-Clean_QWQ-Noisy_QWQ-Clean_DeepCoder-14B-Preview_full_sft_1e-5\n  results: []"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""_id"": ""67fc5dd79808e92cc1f4a913"", ""modelId"": ""secmlr/DS-Noisy_DS-Clean_QWQ-Noisy_QWQ-Clean_DeepCoder-14B-Preview_full_sft_1e-5"", ""usedStorage"": 29551564853}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=secmlr/DS-Noisy_DS-Clean_QWQ-Noisy_QWQ-Clean_DeepCoder-14B-Preview_full_sft_1e-5&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bsecmlr%2FDS-Noisy_DS-Clean_QWQ-Noisy_QWQ-Clean_DeepCoder-14B-Preview_full_sft_1e-5%5D(%2Fsecmlr%2FDS-Noisy_DS-Clean_QWQ-Noisy_QWQ-Clean_DeepCoder-14B-Preview_full_sft_1e-5)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
         
     | 
| 414 | 
         
            +
            Apel-sin/deepcoder-14B-preview-exl2,"---
         
     | 
| 415 | 
         
            +
            license: mit
         
     | 
| 416 | 
         
            +
            library_name: transformers
         
     | 
| 417 | 
         
            +
            datasets:
         
     | 
| 418 | 
         
            +
            - PrimeIntellect/verifiable-coding-problems
         
     | 
| 419 | 
         
            +
            - likaixin/TACO-verified
         
     | 
| 420 | 
         
            +
            - livecodebench/code_generation_lite
         
     | 
| 421 | 
         
            +
            language:
         
     | 
| 422 | 
         
            +
            - en
         
     | 
| 423 | 
         
            +
            base_model:
         
     | 
| 424 | 
         
            +
            - agentica-org/DeepCoder-14B-Preview
         
     | 
| 425 | 
         
            +
            pipeline_tag: text-generation
         
     | 
| 426 | 
         
            +
            ---
         
     | 
| 427 | 
         
            +
             
     | 
| 428 | 
         
            +
            <div align=""center"">
         
     | 
| 429 | 
         
            +
            <span style=""font-family: default; font-size: 1.5em;"">DeepCoder-14B-Preview</span>
         
     | 
| 430 | 
         
            +
            <div>
         
     | 
| 431 | 
         
            +
            🚀 Democratizing Reinforcement Learning for LLMs (RLLM) 🌟
         
     | 
| 432 | 
         
            +
            </div>
         
     | 
| 433 | 
         
            +
            </div>
         
     | 
| 434 | 
         
            +
            <br>
         
     | 
| 435 | 
         
            +
            <div align=""center"" style=""line-height: 1;"">
         
     | 
| 436 | 
         
            +
              <a href=""https://github.com/agentica-project/rllm"" style=""margin: 2px;"">
         
     | 
| 437 | 
         
            +
                <img alt=""Code"" src=""https://img.shields.io/badge/RLLM-000000?style=for-the-badge&logo=github&logoColor=000&logoColor=white"" style=""display: inline-block; vertical-align: middle;""/>
         
     | 
| 438 | 
         
            +
              </a>
         
     | 
| 439 | 
         
            +
              <a href=""https://pretty-radio-b75.notion.site/DeepCoder-A-Fully-Open-Source-14B-Coder-at-O3-mini-Level-1cf81902c14680b3bee5eb349a512a51"" target=""_blank"" style=""margin: 2px;"">
         
     | 
| 440 | 
         
            +
                <img alt=""Blog"" src=""https://img.shields.io/badge/Notion-%23000000.svg?style=for-the-badge&logo=notion&logoColor=white"" style=""display: inline-block; vertical-align: middle;""/>
         
     | 
| 441 | 
         
            +
              </a>
         
     | 
| 442 | 
         
            +
              <a href=""https://x.com/Agentica_"" style=""margin: 2px;"">
         
     | 
| 443 | 
         
            +
                <img alt=""X.ai"" src=""https://img.shields.io/badge/Agentica-white?style=for-the-badge&logo=X&logoColor=000&color=000&labelColor=white"" style=""display: inline-block; vertical-align: middle;""/>
         
     | 
| 444 | 
         
            +
              </a>
         
     | 
| 445 | 
         
            +
              <a href=""https://huggingface.co/agentica-org"" style=""margin: 2px;"">
         
     | 
| 446 | 
         
            +
                <img alt=""Hugging Face"" src=""https://img.shields.io/badge/Agentica-fcd022?style=for-the-badge&logo=huggingface&logoColor=000&labelColor"" style=""display: inline-block; vertical-align: middle;""/>
         
     | 
| 447 | 
         
            +
              </a>
         
     | 
| 448 | 
         
            +
              <a href=""https://www.together.ai"" style=""margin: 2px;"">
         
     | 
| 449 | 
         
            +
                <img alt=""Together AI"" src=""https://img.shields.io/badge/-Together_AI%20-white?style=for-the-badge&logo=data%3Aimage%2Fpng%3Bbase64%2CiVBORw0KGgoAAAANSUhEUgAAAUAAAAFACAMAAAD6TlWYAAAC7lBMVEUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAPb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8AAAAPb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADIBDt6AAAA%2BnRSTlMAAiQEKgcdKQwiHBMUzrtSUEmjhmZGH96yv8n1ey7nL3y1U%2FZfCaIo1WFg1NrcsHYrA2%2Fv80J%2BMeilnpefqKw%2B64%2BQlSbYZGVnBGkCV%2BxW8XJube6WJ9kZF9bSzBALRynPQfLhIjvwyBEAXOTLp3o%2FJA9Y9%2F7%2F9FEKDhIVFo4GHkVzjGz8icrHzY39iHR1i0M8Jj14LLZUvb7DxMXGoQEFeQcgSBOHaPvm4uOdRLMMqcDTLbcII0sNuVn4TKaRd6RKIeDd37Svra6xuLpaW17lXUAlHh8WGxUPIS4JGQoFECMsBg4gFwsRJRIrCC0oAycaFC8NMDIzMRgBsVt9rwAAD25JREFUeNrs3QVzG0kWB%2FA3ikHhZeYwk3LMbF7GcBasOGw9hb3MzLyKw8zMzMx2rsokhySNY2mmR1N4xXV3a7sHuzWu%2BX2Ef3XPG%2Br3wOVyuVwul8vlcrlcLpfL5XK5dOlXOHTIvLnb27Xd%2FasBvrt9A%2B7r1bbdTTffcmuXwhzgTYwk6q%2BHr2RWlcclRYqXV2VeCV%2Bvr4mIkCJKZ83uc9NLC0fMD%2BD%2FCswfMfLtzh%2FeelsJcKJW19SG66KSTP6fLEXrwrU11Srw5Z8zbuzePcUBbFyg%2BPY7Pv%2Bs0A%2Bsid7ayiqFNEWp8iS9Ir%2F0Cl957bkRAaQLFLz15sBBfpbpJc7FJKKFFGuV4JJh6N573g6idr7vP%2F8iC9iI1NZJRDupLnlRBbaW3XjTfQHUJ3D8d68MBtsJiTNRold5uEYAdibkHgqiESMefGi9zfFVeCRihOS5LLJafV99XYxGddgwabKt8SmEyEQ%2FmRDlSoUA9gsNvKMDmhE8MC4L7OFtSYmPFmFlAmzm%2F9tfH0Oz8v6yFmxQ3SpOiY8eYTwjHew0%2BB9%2FD6B5ga4dLd%2FHQus0SnzaIrzWWgDb9P19MVqjw01dwFLpYYVYQymLgD1Kjj6J1umaHwLLqJfpy0%2FHIryqgg2mvetDKxXMnQMWEa9LxEpSqxZguS%2B%2BfA%2Bt9cZBi7ZxeqVMX376FqEnAtbyv7ISrTfspB%2FM82bq3r70BNMSYKV%2Bo4rQDiPzc8Csy1Fih%2BhVsE7o0cfQHnn%2FygJz6uNEJtaTSfy8ChYpnelDuxQ8HAIT1LOS8fwoCSq1FiVYcs%2FdaJ%2FgNhMJqrWKqfwoCSYtSTA08260U%2FBh47v4LDU%2F%2FgnmPOJDexX86ycwpp6yf80neB7M8o96DO2Wl2%2Bw%2FlLrh%2FlKYroW31qE9ht5EgzwRs3nR00wmgBTVq1EFtp2Ad0imdbkR0kwLQImTP8S2eg9B3QSKwkbHhPPxSUzAsjGe3P1luLrMmGklQpGjfIhKwU6C8llibBJUCaS4UKy6klkp0cX0CE9zcr8KAlei4Ahy36PLHXuBJqpYcJSmQBG3LIJWerQETS7qhCWlHowoMvfka2Va0Gjaus3MGUTp4NuWY8ja3%2FuB9q0IqydBt1eeQxZ%2B9MfQRNvnLAWT%2BiuIEuRvT9MBg3UlkQmbMmkUgB9cjsge8EbQIMLCmFPuQy6DPoGeVi9HqgED5EJazL5VAQ9Nm5CHjq0B6oKhZCUX4LrNyAfSycDhVBJZMKeTK4IoN26IPJRsAQoEhLhQ7kAmoV%2Bjbwspt0LniF8yKRMBa1%2B%2BSvkZVFfaFIkSngpvwha%2FQL56QNNqiX8%2FBs0mnMX8vPtBGiCWEf4iYmgzey7kZ8Rw6EJXonwo9SANn9GnuZCE84RnlqBJm3aIk8vFUKjxBjhKbMFaDHQhzy9%2BAI06pJEeJIS%2FGuwBn1M1WD%2BdXjNauSrdwk0Qq0kfHlUoFs7Evnq9TI0orqK8BVN1%2FIcvAn56vAKNCKhEDruz8NjkbdXOV4CKZJA1W8M8vbjT9CwMOGtDKjmjEbefpgCDRLqCB33p7kvipC3kc83UkOihLdohF5DfMjbiBf43UZTSPQq8vobyNsbudCgyzLhTT4PNK8hpmoZPkv4awU0y5G%2F1%2Fj90WG%2BDK9ATNX7mDDh71OgWYn83RHi9yRMkQY0I5G%2FOydDA4RPCX9RoMlD%2Fu6a0mCAMcJfHGh8yN%2BwqdAAMZPwJwFNB%2BRv5TRoQIs0wp%2FiiAB7TG%2B2Abor0L0GmiO5VdicuHsfaE7UfRIxJ80Rz8Kdnfss7L6NoShz8vvAWsLfOUe8kZ7o5DfSm1Pgm8gnTv4msqoIzXC%2FyrUZjWa434XdPxOoRZjiHjTD%2FTcGNm9Cg9y%2Fs9z%2FAymi1e4fqqZ4VPcfaQZnlQYGkacXP3H6X%2FrT2qIZ7jkR%2BAvy9L5jTyq5Z%2BUolBpHnNYc5PDTmubrsHtemOeJ9aJmcWI9tAV5%2BQ29Z4Kc%2Bj0TYHOQVwl5pVl07YD1h9EMt28MHOHUueihZtK5CArvRB4OTWkuvbNgYjGyF5wEGlQ4oXsbrF%2BK7O2fDBoIPPoHegQndLAc14w6WELot8jaX5pVD1Xo8iSy1WM8nzbcFMZbcf%2BLcR%2Fp7qBZayf0kYZly5GlzpOd3Mmcfy%2F9rl1AhwjTXvoXwaATDKc55Dp6mgP%2FeSLvZ4E%2B55wwTwSmr0Y2Djp6og3%2FmUrDhqbuTKWLYMqQ42i%2FkcNTdqpXeQ2Y4z82AO2Wl8txrpz5AkLRr38Q7TUiOydlJxueBfNCYzugnYKvOn62JkXpA3YmGPy8xPnTXanzhYP27d8PSvjPFzafH0Wov12VJC87ZSdcS2dVsEy%2FE8fRDgtznTFj3Tz%2FrT3QesOGO2bKv3mrVr%2BH1nrjjqFgiUilTGRr8%2FNEwHLTZ%2FisLR9vzgGLiOckYiWpVQuwQcmonmidZ3JDYBn1chohslXL79pVFWzh%2F2L5JrRG8fahYKlIWCHWUMoiYJtl%2F3wygOYFunabDBYTWmtdhJTlVy%2BAjfxPPP4YmpW3dTzYID0jTo%2BQEl88Ix1sFlqytAOacfe%2Bk1lgD29LxXiEMiFKZUIF%2By3L%2F6YYjSpu134w2EaouEKPsNH4rlwWgI0JEzcE0Qjfl19NAVsJFR6JGCF5LovAzrId2%2B8LoD6BBT8OGQy2E2rCUaJXebhGALZC9z%2FwUhC18%2F0wc1UWsBFJ1klEOymWvKgCe%2F7CW999xxdAusCI0R99PMgP7IiJczFJY3qtEiLw8tOckw88uKs40FR4xXuWzvzjVD%2BwJnqTlVUKaYpS5Ul6ReCsdOeOmVveKgq%2Bh%2F%2FvveCiu7Zvmz2rFDhRq2tqw7GoJJP%2FJ0vRWFmyplqF1NBv0KmTJz7fumX1d889%2B8yTzzz73Ldfbtm6bdS48RNygDcx3Xu1NqPMUxdLS7uWlhar85RlJK9600VIOf6c0mWDpj391NNtBg0uyfFDSlEF8T%2Ft3eFyqjwTwPGNiKq9eq%2BtqiCeoxZVEcRW4mK%2Bvc%2F5%2Bk7bBSDZOJPfFfwHWkEMG%2B%2BfXChwHMdxHMdxHMdxHMdxHMdxHIeV4yiR%2FyOUS6tHfBxP88Vse74N%2F7mdt7PF%2FHT8EFakbYg0XupvMZ%2Fddt%2F%2Ber27zebFX%2BXSfpQfD%2BMLsX7iMp4fc460%2BfgiqbSD1jSCGH1WXAV1v32OhOm0O1Yh9aUR0sNUYnVyekjBEH9eL%2B2mIY2gilmGdWXvhTKQNnpvkDYrBJgjNluJTchtIDSnBY3TNgLMUEGvbL4Qvhco3WkPbOS%2FNAEGjMay1bsEMjyCJsewXVo5HoFuH5P2b7OsJh9a0har1mn3tmkElXTzPlU%2FUd2nDfnTKH53b%2FTN%2FI7TZp2l7X3QZNPlO6X9jb1pJwUa5J8SuyQ%2Fc2vTFjl0zu%2F8vfrH2O8obdx52jaFjmmZ7HAdQQeOVw1pwxF0StNskd0GWtvsUIfsBB3SNt3m%2FgUtva1402jEfCXm%2BUBLjWkHBZ2gJ3zxHcG51JhWdnQENc%2BYk3O2vz%2F6CEJrBqYcyi9o6E172hJaMjJn876BRjYG0k7QiqFJr7tRo7SdgbSsgBaMzRoe%2BlCbfzWTlkILxqZdj%2FPaaWM0Y%2BtBUwbnrT8%2BoaZPY2kLBc2Ynfi%2FgVo2BtNO0JDRPSf6PtTgm0y7pNCI2KNJewWVqZnZNAH1md93J4HKEsNpb1Abw85P%2FQ%2Bo6GNoOs2H%2BgZo2gQqWqBpA6iNY%2Fe7EVRyXNm%2FMR%2FP%2FotjBRWokCFtK6AOrh1AA6ggkBxpG6hFnImzzLUFKNv2uOec5Q9Qw3kO7N%2BgmT7LjB81asuU1hNQXSyRhyyAULClxVDdHh%2FI4YEzIMzY0vZQWZQhlyyFX6V8aasIqnoinwP86oB8nlBRfkM%2Btxx%2BIaZWpNGf03zkCH4xYk0r7PiuTljALz6R0wQqya%2FI6ZrTHy78acS%2FCSd5hB8dmdNGdlyDCQfiGmz7dVhtkddWWZvWU0D72CGv3Qf84O%2BFP40Wl8irLOAHBXtaDLQDoq0fgnPk9gTaHrnt4Qcz5Bba8T2OcBPwLUGnWXAnmGbILfP5Lm%2BELLX3WSp9v3q0IC0GytcDuT1O8K2TBWlLq58kEJfhOfJbACVEfhN7z20IlDPy2xM3WIymQBkiv57i%2ByZM6ANlh%2FymAr6hpshvB5QVoqW3q%2BKK%2FO5AkchvmMM38iHyk0ApkV%2Ffg294feRXugPoDiCr0n0GtiPdVbid%2BwvfB4op8svcN5F2%2Bu67cDvTV34aM0F%2B4Ss%2FDzzYcW4JSwse%2Byav%2FETa4t9ERhakBS%2F9q5wFaRH%2F6kDaNbf3d2EPXuAyvLd30UQItCdyO9i7bOf5EquzYnvTgpdeH8iflvlAUz3kZf8KVcs%2FBJ%2F2rl1cQxWFvUvhR8xpBVThDfnvAu28SR16UMkEOS3sfdQxgGri0tp%2Fk0Lac39l6T%2FKLbd2AfLVg4rW9t7rPy24BtOiFXJZRda%2BTL%2F6A1Wp0N7BBHu2tFBBZUGJPGRs7QPfMrB9cBExnIV7pM1ZQA0nrvFA9qYlUEc%2B5R9QZddYrymdxn%2Bey5O9g%2BUSqEf0rB3SJ7YMaT0BNRUMEywLa9NkDHWpdzRtYO9413cFtaUXw6NyL76VA4abj%2BL%2BMjys%2BcvaEdePJTQhxmhSKGqkhWjSWEAj0cXagfWpybRdBA0lpbktExJrN5oo36ApNUFTJqpm2gJNGShozOuhGT3P2rSzBy1EfSMbF%2FVTqC01lBZBK%2FHK2q2zisxA2iqGlhKpf%2FO2pGHaXXuafOPfGZKMLJeMO0MSaXNoTz1LvRtYPhXftqlE2lpBB9SayOQ6fgDqqTXtk07jzKSPH00dpL60tbJ9h%2Bb2%2BzODWt7tSKM34tZhlUBrSaYn7Q06Ffc1bKXfj6EDhQ1ptOhcP5OI7EXQibTXedo5gs55gxK7VE68ztImstu0gQcaqGSH%2BOjqHF8S1WXapcO03ZsCPaLxA7tRhhF0Kg1L7MZjHIE24os%2B05X%2B%2FL6ErWm7pQCd0ndJdxKN93cfNPDf763T5CwFzVTcK%2BnOXxrLXqE0pRXbtmmxAv3EaUp3%2Ftg4PQlL0x7TRIAZeXIusYnyfMo1p50apyU5mCOCcIV1rcJA2J9mivqzvpZYXXldR8pQWlQ77Y8CBnk8GFYLlcNBnJtNmwwlVlH%2Bl%2BYBG69Yn7Py98Ksty48lrQemXY2kEZRfvAMr5l84P97yOwaPgNfWZq2NpZG86JgPhlP%2B9ldlo9S3rP%2BdDyZB5FnRdqygzTHcRzHcRzHcRzHcRzHcZz%2FAbyvLkVmYcs9AAAAAElFTkSuQmCC&link=https%3A%2F%2Fwww.together.ai"" style=""display: inline-block; vertical-align: middle;""/>
         
     | 
| 450 | 
         
            +
              </a>
         
     | 
| 451 | 
         
            +
            </div>
         
     | 
| 452 | 
         
            +
            </div>
         
     | 
| 453 | 
         
            +
            </div>
         
     | 
| 454 | 
         
            +
             
     | 
| 455 | 
         
            +
            ## DeepCoder Overview
         
     | 
| 456 | 
         
            +
            DeepCoder-14B-Preview is a code reasoning LLM fine-tuned from DeepSeek-R1-Distilled-Qwen-14B using distributed reinforcement learning (RL) to scale up to long context lengths. The model achieves 60.6% Pass@1 accuracy on LiveCodeBench v5 (8/1/24-2/1/25), representing a 8% improvement over the base model (53%) and achieving similar performance to OpenAI's o3-mini with just 14B parameters.
         
     | 
| 457 | 
         
            +
             
     | 
| 458 | 
         
            +
            <div style=""margin: 0 auto;"">
         
     | 
| 459 | 
         
            +
              <img src=""https://cdn-uploads.huggingface.co/production/uploads/654037be97949fd2304aab7f/r3-vzkItOCrMf1qldW0Mj.png"" style=""width: 100%;"" />
         
     | 
| 460 | 
         
            +
            </div>
         
     | 
| 461 | 
         
            +
             
     | 
| 462 | 
         
            +
            ## Data
         
     | 
| 463 | 
         
            +
            Our training dataset consists of approximately 24K unique problem-tests pairs compiled from:
         
     | 
| 464 | 
         
            +
            - Taco-Verified
         
     | 
| 465 | 
         
            +
            - PrimeIntellect SYNTHETIC-1
         
     | 
| 466 | 
         
            +
            - LiveCodeBench v5 (5/1/23-7/31/24)
         
     | 
| 467 | 
         
            +
             
     | 
| 468 | 
         
            +
            ## Training Recipe
         
     | 
| 469 | 
         
            +
             
     | 
| 470 | 
         
            +
            Our training recipe relies on an improved version of GRPO (GRPO+) and iterative context lengthening, introduced in DeepScaleR.
         
     | 
| 471 | 
         
            +
             
     | 
| 472 | 
         
            +
            ### GRPO+
         
     | 
| 473 | 
         
            +
             
     | 
| 474 | 
         
            +
            We enhance the original GRPO algorithm with insights from DAPO to enable more stable training:
         
     | 
| 475 | 
         
            +
             
     | 
| 476 | 
         
            +
            - **Offline Difficulty Filtering:** DAPO employs online dynamic sampling, discarding both entirely correct and entirely incorrect samples on the fly. While this helps maintain a more stable effective batch size, it introduces significant runtime overhead due to rejection sampling. Instead, we perform offline difficulty filtering on a subset of coding problems to ensure the training dataset remains within a suitable difficulty range.
         
     | 
| 477 | 
         
            +
            - **No Entropy Loss:** We observed that including an entropy loss term often led to instability, with entropy growing exponentially and ultimately collapsing training. To mitigate this, we eliminate the entropy loss entirely.
         
     | 
| 478 | 
         
            +
            - **No KL Loss:** Eliminating KL loss prevents the LLM from staying within trust region of the original SFT model. This removal also obviates the need to compute log probabilities for the reference policy, thereby accelerating training.
         
     | 
| 479 | 
         
            +
            - **Overlong Filtering** **(from DAPO):** To preserve long-context reasoning, we mask the loss for truncated sequences. This technique enables DeepCoder to generalize to 64K-context inference despite being trained with a 32K context.
         
     | 
| 480 | 
         
            +
            - **Clip High (from DAPO):** By increasing the upper bound in GRPO/PPO’s surrogate loss, we encourage more exploration and more stable entropy.
         
     | 
| 481 | 
         
            +
             
     | 
| 482 | 
         
            +
            ### Iterative Context Lengthening
         
     | 
| 483 | 
         
            +
             
     | 
| 484 | 
         
            +
            Our original `Deepscaler-1.5B-Preview` scaled long context training from 8K→16K→24K, achieving 33→38→43% on AIME respectively. Similarly, `Deepcoder-14B-Preview` is trained on 16K→32K, achieving 54→58% on LiveCodeBench (v5). `DeepCoder-14B-Preview` successfully generalizes to longer contexts when evaluated at 64K context, reaching 60.6%. 
         
     | 
| 485 | 
         
            +
             
     | 
| 486 | 
         
            +
            DeepCoder generalizes better to long contexts than the base distilled model, due to DAPO's overlong filtering. However, it's longer responses are often truncated when the max length is capped at 16K, which can lower its scores.
         
     | 
| 487 | 
         
            +
             
     | 
| 488 | 
         
            +
            | **Model** | **16K** | **32K** | **64K** |
         
     | 
| 489 | 
         
            +
            | --- | --- | --- | --- |
         
     | 
| 490 | 
         
            +
            | **DeepCoder-14B-Preview** | 45.6 | 57.9 | 60.6 |
         
     | 
| 491 | 
         
            +
            | **DeepSeek-R1-Distill-Qwen-14B** | 50.2 | 53.0 | 53.0 |
         
     | 
| 492 | 
         
            +
             
     | 
| 493 | 
         
            +
            A more detailed description of the training recipe can be found in our [blog post](https://pretty-radio-b75.notion.site/DeepCoder-A-Fully-Open-Source-14B-Coder-at-O3-mini-Level-1cf81902c14680b3bee5eb349a512a51).
         
     | 
| 494 | 
         
            +
             
     | 
| 495 | 
         
            +
            ## Evaluation
         
     | 
| 496 | 
         
            +
             
     | 
| 497 | 
         
            +
            We evaluate `Deepcoder-14B-Preview` on various coding benchmarks, including LiveCodeBench (LCBv5), Codeforces, and HumanEval+. 
         
     | 
| 498 | 
         
            +
             
     | 
| 499 | 
         
            +
            | **Model** | LCB (v5)(8/1/24-2/1/25) | Codeforces Rating | Codeforces Percentile | HumanEval+ |
         
     | 
| 500 | 
         
            +
            | --- | --- | --- | --- | --- |
         
     | 
| 501 | 
         
            +
            | **DeepCoder-14B-Preview (ours)** | ***60.6*** | ***1936*** | ***95.3*** | ***92.6*** |
         
     | 
| 502 | 
         
            +
            | **DeepSeek-R1-Distill-Qwen-14B** | 53.0 | 1791 | 92.7 | 92.0 |
         
     | 
| 503 | 
         
            +
            | **O1-2024-12-17 (Low)** | 59.5 | **1991** | **96.1** | 90.8 |
         
     | 
| 504 | 
         
            +
            | **O3-Mini-2025-1-31 (Low)** | **60.9** | 1918 | 94.9 | 92.6 |
         
     | 
| 505 | 
         
            +
            | **O1-Preview** | 42.7 | 1658 | 88.5 | 89 |
         
     | 
| 506 | 
         
            +
            | **Deepseek-R1** | 62.8 | 1948 | 95.4 | 92.6 |
         
     | 
| 507 | 
         
            +
            | **Llama-4-Behemoth** | 49.4 | - | - | - |
         
     | 
| 508 | 
         
            +
             
     | 
| 509 | 
         
            +
            ## Serving DeepCoder
         
     | 
| 510 | 
         
            +
            Our model can be served using popular high-performance inference systems:
         
     | 
| 511 | 
         
            +
            - vLLM
         
     | 
| 512 | 
         
            +
            - Hugging Face Text Generation Inference (TGI)
         
     | 
| 513 | 
         
            +
            - SGLang
         
     | 
| 514 | 
         
            +
            - TensorRT-LLM
         
     | 
| 515 | 
         
            +
             
     | 
| 516 | 
         
            +
            All these systems support the OpenAI Chat Completions API format.
         
     | 
| 517 | 
         
            +
             
     | 
| 518 | 
         
            +
            ## License
         
     | 
| 519 | 
         
            +
            This project is released under the MIT License, reflecting our commitment to open and accessible AI development.
         
     | 
| 520 | 
         
            +
            We believe in democratizing AI technology by making our work freely available for anyone to use, modify, and build upon.
         
     | 
| 521 | 
         
            +
            This permissive license ensures that researchers, developers, and enthusiasts worldwide can leverage and extend our work without restrictions, fostering innovation and collaboration in the AI community.
         
     | 
| 522 | 
         
            +
             
     | 
| 523 | 
         
            +
            ## Acknowledgement
         
     | 
| 524 | 
         
            +
            - Our training experiments are powered by our heavily modified fork of [Verl](https://github.com/agentica-project/verl), an open-source post-training library.
         
     | 
| 525 | 
         
            +
            - Our model is trained on top of [`DeepSeek-R1-Distill-Qwen-14B`](https://huggingface.co/deepseek-ai/DeepSeek-R1-Distill-Qwen-14B).
         
     | 
| 526 | 
         
            +
            - Our work is done as part of  [Berkeley Sky Computing Lab](https://skycomputing.berkeley.edu/) and [Berkeley AI Research](https://bair.berkeley.edu/).
         
     | 
| 527 | 
         
            +
             
     | 
| 528 | 
         
            +
            ## Citation 
         
     | 
| 529 | 
         
            +
            ```bibtex
         
     | 
| 530 | 
         
            +
            @misc{deepcoder2025,
         
     | 
| 531 | 
         
            +
              title={DeepCoder: A Fully Open-Source 14B Coder at O3-mini Level},
         
     | 
| 532 | 
         
            +
              author={Michael Luo, Sijun Tan, Roy Huang, Ameen Patel, Alpay Ariyak, Qingyang Wu, Xiaoxiang Shi, Rachel Xin, Colin Cai, Maurice Weber, Ce Zhang, Li Erran Li, Raluca Ada Popa, Ion Stoica, Tianjun Zhang},
         
     | 
| 533 | 
         
            +
              howpublished={\url{https://pretty-radio-b75.notion.site/DeepCoder-A-Fully-Open-Source-14B-Coder-at-O3-mini-Level-1cf81902c14680b3bee5eb349a512a51}},
         
     | 
| 534 | 
         
            +
              note={Notion Blog},
         
     | 
| 535 | 
         
            +
              year={2025}
         
     | 
| 536 | 
         
            +
            }
         
     | 
| 537 | 
         
            +
            ```","{""id"": ""Apel-sin/deepcoder-14B-preview-exl2"", ""author"": ""Apel-sin"", ""sha"": ""95b6fb26de354f6f87bc6d121ce6649bbe6bf405"", ""last_modified"": ""2025-04-16 12:52:30+00:00"", ""created_at"": ""2025-04-16 12:51:45+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 1, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""text-generation"", ""en"", ""dataset:PrimeIntellect/verifiable-coding-problems"", ""dataset:likaixin/TACO-verified"", ""dataset:livecodebench/code_generation_lite"", ""base_model:agentica-org/DeepCoder-14B-Preview"", ""base_model:finetune:agentica-org/DeepCoder-14B-Preview"", ""license:mit"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- agentica-org/DeepCoder-14B-Preview\ndatasets:\n- PrimeIntellect/verifiable-coding-problems\n- likaixin/TACO-verified\n- livecodebench/code_generation_lite\nlanguage:\n- en\nlibrary_name: transformers\nlicense: mit\npipeline_tag: text-generation"", ""widget_data"": [{""text"": ""My name is Julien and I like to""}, {""text"": ""I like traveling by train because""}, {""text"": ""Paris is an amazing place to visit,""}, {""text"": ""Once upon a time,""}], ""model_index"": null, ""config"": null, ""transformers_info"": {""auto_model"": ""AutoModel"", ""custom_class"": null, ""pipeline_tag"": null, ""processor"": null}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='measurement.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-04-16 12:52:30+00:00"", ""cardData"": ""base_model:\n- agentica-org/DeepCoder-14B-Preview\ndatasets:\n- PrimeIntellect/verifiable-coding-problems\n- likaixin/TACO-verified\n- livecodebench/code_generation_lite\nlanguage:\n- en\nlibrary_name: transformers\nlicense: mit\npipeline_tag: text-generation"", ""transformersInfo"": {""auto_model"": ""AutoModel"", ""custom_class"": null, ""pipeline_tag"": null, ""processor"": null}, ""_id"": ""67ffa7e1f71379c75f70b5da"", ""modelId"": ""Apel-sin/deepcoder-14B-preview-exl2"", ""usedStorage"": 15550291930}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=Apel-sin/deepcoder-14B-preview-exl2&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BApel-sin%2Fdeepcoder-14B-preview-exl2%5D(%2FApel-sin%2Fdeepcoder-14B-preview-exl2)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
         
     | 
| 538 | 
         
            +
            wasim845/dfgh,"---
         
     | 
| 539 | 
         
            +
            language:
         
     | 
| 540 | 
         
            +
            - af
         
     | 
| 541 | 
         
            +
            metrics:
         
     | 
| 542 | 
         
            +
            - cer
         
     | 
| 543 | 
         
            +
            base_model:
         
     | 
| 544 | 
         
            +
            - agentica-org/DeepCoder-14B-Preview
         
     | 
| 545 | 
         
            +
            ---","{""id"": ""wasim845/dfgh"", ""author"": ""wasim845"", ""sha"": ""71e0c07e2c4e331839754d53d274984eb53945a8"", ""last_modified"": ""2025-04-21 18:38:51+00:00"", ""created_at"": ""2025-04-21 18:38:31+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": null, ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""af"", ""base_model:agentica-org/DeepCoder-14B-Preview"", ""base_model:finetune:agentica-org/DeepCoder-14B-Preview"", ""region:us""], ""pipeline_tag"": null, ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- agentica-org/DeepCoder-14B-Preview\nlanguage:\n- af\nmetrics:\n- cer"", ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-04-21 18:38:51+00:00"", ""cardData"": ""base_model:\n- agentica-org/DeepCoder-14B-Preview\nlanguage:\n- af\nmetrics:\n- cer"", ""transformersInfo"": null, ""_id"": ""680690a771c70b05daf501cf"", ""modelId"": ""wasim845/dfgh"", ""usedStorage"": 0}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=wasim845/dfgh&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bwasim845%2Fdfgh%5D(%2Fwasim845%2Fdfgh)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
         
     | 
| 546 | 
         
            +
            rieon/DeepCoder-14B-Preview-Suger,"---
         
     | 
| 547 | 
         
            +
            license: apache-2.0
         
     | 
| 548 | 
         
            +
            pipeline_tag: text-generation
         
     | 
| 549 | 
         
            +
            base_model:
         
     | 
| 550 | 
         
            +
            - agentica-org/DeepCoder-14B-Preview
         
     | 
| 551 | 
         
            +
            ---","{""id"": ""rieon/DeepCoder-14B-Preview-Suger"", ""author"": ""rieon"", ""sha"": ""dec3e9f412ae94cd14d1cbfc88a289d7b2a88593"", ""last_modified"": ""2025-04-24 23:08:31+00:00"", ""created_at"": ""2025-04-24 21:43:18+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 9, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": null, ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""safetensors"", ""qwen2"", ""text-generation"", ""conversational"", ""base_model:agentica-org/DeepCoder-14B-Preview"", ""base_model:finetune:agentica-org/DeepCoder-14B-Preview"", ""license:apache-2.0"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- agentica-org/DeepCoder-14B-Preview\nlicense: apache-2.0\npipeline_tag: text-generation"", ""widget_data"": [{""text"": ""Hi, what can you help me with?""}, {""text"": ""What is 84 * 3 / 2?""}, {""text"": ""Tell me an interesting fact about the universe!""}, {""text"": ""Explain quantum computing in simple terms.""}], ""model_index"": null, ""config"": {""architectures"": [""Qwen2ForCausalLM""], ""model_type"": ""qwen2"", ""tokenizer_config"": {""bos_token"": ""<\uff5cbegin\u2581of\u2581sentence\uff5c>"", ""chat_template"": ""{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<\uff5cUser\uff5c>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<\uff5cAssistant\uff5c><\uff5ctool\u2581calls\u2581begin\uff5c><\uff5ctool\u2581call\u2581begin\uff5c>' + tool['type'] + '<\uff5ctool\u2581sep\uff5c>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<\uff5ctool\u2581call\u2581end\uff5c>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<\uff5ctool\u2581call\u2581begin\uff5c>' + tool['type'] + '<\uff5ctool\u2581sep\uff5c>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<\uff5ctool\u2581call\u2581end\uff5c>'}}{{'<\uff5ctool\u2581calls\u2581end\uff5c><\uff5cend\u2581of\u2581sentence\uff5c>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<\uff5ctool\u2581outputs\u2581end\uff5c>' + message['content'] + '<\uff5cend\u2581of\u2581sentence\uff5c>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '</think>' in content %}{% set content = content.split('</think>')[-1] %}{% endif %}{{'<\uff5cAssistant\uff5c>' + content + '<\uff5cend\u2581of\u2581sentence\uff5c>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<\uff5ctool\u2581outputs\u2581begin\uff5c><\uff5ctool\u2581output\u2581begin\uff5c>' + message['content'] + '<\uff5ctool\u2581output\u2581end\uff5c>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<\uff5ctool\u2581output\u2581begin\uff5c>' + message['content'] + '<\uff5ctool\u2581output\u2581end\uff5c>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<\uff5ctool\u2581outputs\u2581end\uff5c>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<\uff5cAssistant\uff5c><think>\\n'}}{% endif %}"", ""eos_token"": ""<\uff5cend\u2581of\u2581sentence\uff5c>"", ""pad_token"": ""<\uff5cend\u2581of\u2581sentence\uff5c>"", ""unk_token"": null, ""use_default_system_prompt"": false}}, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00001-of-00006.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00002-of-00006.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00003-of-00006.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00004-of-00006.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00005-of-00006.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00006-of-00006.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": {""parameters"": {""BF16"": 14770033664}, ""total"": 14770033664}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-04-24 23:08:31+00:00"", ""cardData"": ""base_model:\n- agentica-org/DeepCoder-14B-Preview\nlicense: apache-2.0\npipeline_tag: text-generation"", ""transformersInfo"": null, ""_id"": ""680ab07645884469f943b21d"", ""modelId"": ""rieon/DeepCoder-14B-Preview-Suger"", ""usedStorage"": 29551556738}",1,,0,,0,https://huggingface.co/mradermacher/DeepCoder-14B-Preview-Suger-GGUF,1,,0,huggingface/InferenceSupport/discussions/new?title=rieon/DeepCoder-14B-Preview-Suger&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Brieon%2FDeepCoder-14B-Preview-Suger%5D(%2Frieon%2FDeepCoder-14B-Preview-Suger)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
         
     | 
    	
        DeepSeek-Coder-V2-Lite-Instruct_finetunes_20250425_165642.csv_finetunes_20250425_165642.csv
    ADDED
    
    | 
         @@ -0,0 +1,328 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
         
     | 
| 2 | 
         
            +
            deepseek-ai/DeepSeek-Coder-V2-Lite-Instruct,"---
         
     | 
| 3 | 
         
            +
            license: other
         
     | 
| 4 | 
         
            +
            license_name: deepseek-license
         
     | 
| 5 | 
         
            +
            license_link: LICENSE
         
     | 
| 6 | 
         
            +
            ---
         
     | 
| 7 | 
         
            +
            <!-- markdownlint-disable first-line-h1 -->
         
     | 
| 8 | 
         
            +
            <!-- markdownlint-disable html -->
         
     | 
| 9 | 
         
            +
            <!-- markdownlint-disable no-duplicate-header -->
         
     | 
| 10 | 
         
            +
             
     | 
| 11 | 
         
            +
            <div align=""center"">
         
     | 
| 12 | 
         
            +
              <img src=""https://github.com/deepseek-ai/DeepSeek-V2/blob/main/figures/logo.svg?raw=true"" width=""60%"" alt=""DeepSeek-V2"" />
         
     | 
| 13 | 
         
            +
            </div>
         
     | 
| 14 | 
         
            +
            <hr>
         
     | 
| 15 | 
         
            +
            <div align=""center"" style=""line-height: 1;"">
         
     | 
| 16 | 
         
            +
              <a href=""https://www.deepseek.com/"" target=""_blank"" style=""margin: 2px;"">
         
     | 
| 17 | 
         
            +
                <img alt=""Homepage"" src=""https://github.com/deepseek-ai/DeepSeek-V2/blob/main/figures/badge.svg?raw=true"" style=""display: inline-block; vertical-align: middle;""/>
         
     | 
| 18 | 
         
            +
              </a>
         
     | 
| 19 | 
         
            +
              <a href=""https://chat.deepseek.com/"" target=""_blank"" style=""margin: 2px;"">
         
     | 
| 20 | 
         
            +
                <img alt=""Chat"" src=""https://img.shields.io/badge/🤖%20Chat-DeepSeek%20V2-536af5?color=536af5&logoColor=white"" style=""display: inline-block; vertical-align: middle;""/>
         
     | 
| 21 | 
         
            +
              </a>
         
     | 
| 22 | 
         
            +
              <a href=""https://huggingface.co/deepseek-ai"" target=""_blank"" style=""margin: 2px;"">
         
     | 
| 23 | 
         
            +
                <img alt=""Hugging Face"" src=""https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-DeepSeek%20AI-ffc107?color=ffc107&logoColor=white"" style=""display: inline-block; vertical-align: middle;""/>
         
     | 
| 24 | 
         
            +
              </a>
         
     | 
| 25 | 
         
            +
            </div>
         
     | 
| 26 | 
         
            +
             
     | 
| 27 | 
         
            +
            <div align=""center"" style=""line-height: 1;"">
         
     | 
| 28 | 
         
            +
              <a href=""https://discord.gg/Tc7c45Zzu5"" target=""_blank"" style=""margin: 2px;"">
         
     | 
| 29 | 
         
            +
                <img alt=""Discord"" src=""https://img.shields.io/badge/Discord-DeepSeek%20AI-7289da?logo=discord&logoColor=white&color=7289da"" style=""display: inline-block; vertical-align: middle;""/>
         
     | 
| 30 | 
         
            +
              </a>
         
     | 
| 31 | 
         
            +
              <a href=""https://github.com/deepseek-ai/DeepSeek-V2/blob/main/figures/qr.jpeg?raw=true"" target=""_blank"" style=""margin: 2px;"">
         
     | 
| 32 | 
         
            +
                <img alt=""Wechat"" src=""https://img.shields.io/badge/WeChat-DeepSeek%20AI-brightgreen?logo=wechat&logoColor=white"" style=""display: inline-block; vertical-align: middle;""/>
         
     | 
| 33 | 
         
            +
              </a>
         
     | 
| 34 | 
         
            +
              <a href=""https://twitter.com/deepseek_ai"" target=""_blank"" style=""margin: 2px;"">
         
     | 
| 35 | 
         
            +
                <img alt=""Twitter Follow"" src=""https://img.shields.io/badge/Twitter-deepseek_ai-white?logo=x&logoColor=white"" style=""display: inline-block; vertical-align: middle;""/>
         
     | 
| 36 | 
         
            +
              </a>
         
     | 
| 37 | 
         
            +
            </div>
         
     | 
| 38 | 
         
            +
             
     | 
| 39 | 
         
            +
            <div align=""center"" style=""line-height: 1;"">
         
     | 
| 40 | 
         
            +
              <a href=""https://github.com/deepseek-ai/DeepSeek-V2/blob/main/LICENSE-CODE"" style=""margin: 2px;"">
         
     | 
| 41 | 
         
            +
                <img alt=""Code License"" src=""https://img.shields.io/badge/Code_License-MIT-f5de53?&color=f5de53"" style=""display: inline-block; vertical-align: middle;""/>
         
     | 
| 42 | 
         
            +
              </a>
         
     | 
| 43 | 
         
            +
              <a href=""https://github.com/deepseek-ai/DeepSeek-V2/blob/main/LICENSE-MODEL"" style=""margin: 2px;"">
         
     | 
| 44 | 
         
            +
                <img alt=""Model License"" src=""https://img.shields.io/badge/Model_License-Model_Agreement-f5de53?&color=f5de53"" style=""display: inline-block; vertical-align: middle;""/>
         
     | 
| 45 | 
         
            +
              </a>
         
     | 
| 46 | 
         
            +
            </div>
         
     | 
| 47 | 
         
            +
            <p align=""center"">
         
     | 
| 48 | 
         
            +
              <a href=""#4-api-platform"">API Platform</a> |
         
     | 
| 49 | 
         
            +
              <a href=""#5-how-to-run-locally"">How to Use</a> |
         
     | 
| 50 | 
         
            +
              <a href=""#6-license"">License</a> |
         
     | 
| 51 | 
         
            +
            </p>
         
     | 
| 52 | 
         
            +
             
     | 
| 53 | 
         
            +
             
     | 
| 54 | 
         
            +
            <p align=""center"">
         
     | 
| 55 | 
         
            +
              <a href=""https://github.com/deepseek-ai/DeepSeek-Coder-V2/blob/main/paper.pdf""><b>Paper Link</b>👁️</a>
         
     | 
| 56 | 
         
            +
            </p>
         
     | 
| 57 | 
         
            +
             
     | 
| 58 | 
         
            +
            # DeepSeek-Coder-V2: Breaking the Barrier of Closed-Source Models in Code Intelligence
         
     | 
| 59 | 
         
            +
             
     | 
| 60 | 
         
            +
            ## 1. Introduction
         
     | 
| 61 | 
         
            +
            We present DeepSeek-Coder-V2, an open-source Mixture-of-Experts (MoE) code language model that achieves performance comparable to GPT4-Turbo in code-specific tasks. Specifically, DeepSeek-Coder-V2 is further pre-trained from an intermediate checkpoint of DeepSeek-V2 with additional 6 trillion tokens. Through this continued pre-training, DeepSeek-Coder-V2 substantially enhances the coding and mathematical reasoning capabilities of DeepSeek-V2, while maintaining comparable performance in general language tasks. Compared to DeepSeek-Coder-33B, DeepSeek-Coder-V2 demonstrates significant advancements in various aspects of code-related tasks, as well as reasoning and general capabilities. Additionally, DeepSeek-Coder-V2 expands its support for programming languages from 86 to 338, while extending the context length from 16K to 128K. 
         
     | 
| 62 | 
         
            +
             
     | 
| 63 | 
         
            +
            <p align=""center"">
         
     | 
| 64 | 
         
            +
              <img width=""100%"" src=""https://github.com/deepseek-ai/DeepSeek-Coder-V2/blob/main/figures/performance.png?raw=true"">
         
     | 
| 65 | 
         
            +
            </p>
         
     | 
| 66 | 
         
            +
             
     | 
| 67 | 
         
            +
             
     | 
| 68 | 
         
            +
            In standard benchmark evaluations, DeepSeek-Coder-V2 achieves superior performance compared to closed-source models such as GPT4-Turbo, Claude 3 Opus, and Gemini 1.5 Pro in coding and math benchmarks.  The list of supported programming languages can be found [here](https://github.com/deepseek-ai/DeepSeek-Coder-V2/blob/main/supported_langs.txt).
         
     | 
| 69 | 
         
            +
             
     | 
| 70 | 
         
            +
            ## 2. Model Downloads
         
     | 
| 71 | 
         
            +
             
     | 
| 72 | 
         
            +
            We release the DeepSeek-Coder-V2 with 16B and 236B parameters based on the [DeepSeekMoE](https://arxiv.org/pdf/2401.06066) framework, which has actived parameters of only 2.4B and 21B , including base and instruct models, to the public. 
         
     | 
| 73 | 
         
            +
             
     | 
| 74 | 
         
            +
            <div align=""center"">
         
     | 
| 75 | 
         
            +
             
     | 
| 76 | 
         
            +
            |            **Model**            | **#Total Params** | **#Active Params** | **Context Length** |                         **Download**                         |
         
     | 
| 77 | 
         
            +
            | :-----------------------------: | :---------------: | :----------------: | :----------------: | :----------------------------------------------------------: |
         
     | 
| 78 | 
         
            +
            |   DeepSeek-Coder-V2-Lite-Base   |        16B        |        2.4B        |        128k        | [🤗 HuggingFace](https://huggingface.co/deepseek-ai/DeepSeek-Coder-V2-Lite-Base) |
         
     | 
| 79 | 
         
            +
            | DeepSeek-Coder-V2-Lite-Instruct |        16B        |        2.4B        |        128k        | [🤗 HuggingFace](https://huggingface.co/deepseek-ai/DeepSeek-Coder-V2-Lite-Instruct) |
         
     | 
| 80 | 
         
            +
            |     DeepSeek-Coder-V2-Base      |       236B        |        21B         |        128k        | [🤗 HuggingFace](https://huggingface.co/deepseek-ai/DeepSeek-Coder-V2-Base) |
         
     | 
| 81 | 
         
            +
            |   DeepSeek-Coder-V2-Instruct    |       236B        |        21B         |        128k        | [🤗 HuggingFace](https://huggingface.co/deepseek-ai/DeepSeek-Coder-V2-Instruct) |
         
     | 
| 82 | 
         
            +
             
     | 
| 83 | 
         
            +
            </div>
         
     | 
| 84 | 
         
            +
             
     | 
| 85 | 
         
            +
             
     | 
| 86 | 
         
            +
            ## 3. Chat Website
         
     | 
| 87 | 
         
            +
             
     | 
| 88 | 
         
            +
            You can chat with the DeepSeek-Coder-V2 on DeepSeek's official website: [coder.deepseek.com](https://coder.deepseek.com/sign_in)
         
     | 
| 89 | 
         
            +
             
     | 
| 90 | 
         
            +
            ## 4. API Platform
         
     | 
| 91 | 
         
            +
            We also provide OpenAI-Compatible API at DeepSeek Platform: [platform.deepseek.com](https://platform.deepseek.com/), and you can also pay-as-you-go at an unbeatable price.
         
     | 
| 92 | 
         
            +
            <p align=""center"">
         
     | 
| 93 | 
         
            +
              <img width=""40%"" src=""https://github.com/deepseek-ai/DeepSeek-Coder-V2/blob/main/figures/model_price.jpg?raw=true"">
         
     | 
| 94 | 
         
            +
            </p>
         
     | 
| 95 | 
         
            +
             
     | 
| 96 | 
         
            +
             
     | 
| 97 | 
         
            +
            ## 5. How to run locally
         
     | 
| 98 | 
         
            +
            **Here, we provide some examples of how to use DeepSeek-Coder-V2-Lite model. If you want to utilize DeepSeek-Coder-V2 in BF16 format for inference, 80GB*8 GPUs are required.**
         
     | 
| 99 | 
         
            +
             
     | 
| 100 | 
         
            +
            ### Inference with Huggingface's Transformers
         
     | 
| 101 | 
         
            +
            You can directly employ [Huggingface's Transformers](https://github.com/huggingface/transformers) for model inference.
         
     | 
| 102 | 
         
            +
             
     | 
| 103 | 
         
            +
            #### Code Completion
         
     | 
| 104 | 
         
            +
            ```python
         
     | 
| 105 | 
         
            +
            from transformers import AutoTokenizer, AutoModelForCausalLM
         
     | 
| 106 | 
         
            +
            import torch
         
     | 
| 107 | 
         
            +
            tokenizer = AutoTokenizer.from_pretrained(""deepseek-ai/DeepSeek-Coder-V2-Lite-Base"", trust_remote_code=True)
         
     | 
| 108 | 
         
            +
            model = AutoModelForCausalLM.from_pretrained(""deepseek-ai/DeepSeek-Coder-V2-Lite-Base"", trust_remote_code=True, torch_dtype=torch.bfloat16).cuda()
         
     | 
| 109 | 
         
            +
            input_text = ""#write a quick sort algorithm""
         
     | 
| 110 | 
         
            +
            inputs = tokenizer(input_text, return_tensors=""pt"").to(model.device)
         
     | 
| 111 | 
         
            +
            outputs = model.generate(**inputs, max_length=128)
         
     | 
| 112 | 
         
            +
            print(tokenizer.decode(outputs[0], skip_special_tokens=True))
         
     | 
| 113 | 
         
            +
            ```
         
     | 
| 114 | 
         
            +
             
     | 
| 115 | 
         
            +
            #### Code Insertion
         
     | 
| 116 | 
         
            +
            ```python
         
     | 
| 117 | 
         
            +
            from transformers import AutoTokenizer, AutoModelForCausalLM
         
     | 
| 118 | 
         
            +
            import torch
         
     | 
| 119 | 
         
            +
            tokenizer = AutoTokenizer.from_pretrained(""deepseek-ai/DeepSeek-Coder-V2-Lite-Base"", trust_remote_code=True)
         
     | 
| 120 | 
         
            +
            model = AutoModelForCausalLM.from_pretrained(""deepseek-ai/DeepSeek-Coder-V2-Lite-Base"", trust_remote_code=True, torch_dtype=torch.bfloat16).cuda()
         
     | 
| 121 | 
         
            +
            input_text = """"""<|fim▁begin|>def quick_sort(arr):
         
     | 
| 122 | 
         
            +
                if len(arr) <= 1:
         
     | 
| 123 | 
         
            +
                    return arr
         
     | 
| 124 | 
         
            +
                pivot = arr[0]
         
     | 
| 125 | 
         
            +
                left = []
         
     | 
| 126 | 
         
            +
                right = []
         
     | 
| 127 | 
         
            +
            <|fim▁hole|>
         
     | 
| 128 | 
         
            +
                    if arr[i] < pivot:
         
     | 
| 129 | 
         
            +
                        left.append(arr[i])
         
     | 
| 130 | 
         
            +
                    else:
         
     | 
| 131 | 
         
            +
                        right.append(arr[i])
         
     | 
| 132 | 
         
            +
                return quick_sort(left) + [pivot] + quick_sort(right)<|fim▁end|>""""""
         
     | 
| 133 | 
         
            +
            inputs = tokenizer(input_text, return_tensors=""pt"").to(model.device)
         
     | 
| 134 | 
         
            +
            outputs = model.generate(**inputs, max_length=128)
         
     | 
| 135 | 
         
            +
            print(tokenizer.decode(outputs[0], skip_special_tokens=True)[len(input_text):])
         
     | 
| 136 | 
         
            +
            ```
         
     | 
| 137 | 
         
            +
             
     | 
| 138 | 
         
            +
            #### Chat Completion
         
     | 
| 139 | 
         
            +
             
     | 
| 140 | 
         
            +
            ```python
         
     | 
| 141 | 
         
            +
            from transformers import AutoTokenizer, AutoModelForCausalLM
         
     | 
| 142 | 
         
            +
            import torch
         
     | 
| 143 | 
         
            +
            tokenizer = AutoTokenizer.from_pretrained(""deepseek-ai/DeepSeek-Coder-V2-Lite-Instruct"", trust_remote_code=True)
         
     | 
| 144 | 
         
            +
            model = AutoModelForCausalLM.from_pretrained(""deepseek-ai/DeepSeek-Coder-V2-Lite-Instruct"", trust_remote_code=True, torch_dtype=torch.bfloat16).cuda()
         
     | 
| 145 | 
         
            +
            messages=[
         
     | 
| 146 | 
         
            +
                { 'role': 'user', 'content': ""write a quick sort algorithm in python.""}
         
     | 
| 147 | 
         
            +
            ]
         
     | 
| 148 | 
         
            +
            inputs = tokenizer.apply_chat_template(messages, add_generation_prompt=True, return_tensors=""pt"").to(model.device)
         
     | 
| 149 | 
         
            +
            # tokenizer.eos_token_id is the id of <|end▁of▁sentence|>  token
         
     | 
| 150 | 
         
            +
            outputs = model.generate(inputs, max_new_tokens=512, do_sample=False, top_k=50, top_p=0.95, num_return_sequences=1, eos_token_id=tokenizer.eos_token_id)
         
     | 
| 151 | 
         
            +
            print(tokenizer.decode(outputs[0][len(inputs[0]):], skip_special_tokens=True))
         
     | 
| 152 | 
         
            +
            ```
         
     | 
| 153 | 
         
            +
             
     | 
| 154 | 
         
            +
             
     | 
| 155 | 
         
            +
             
     | 
| 156 | 
         
            +
            The complete chat template can be found within `tokenizer_config.json` located in the huggingface model repository.
         
     | 
| 157 | 
         
            +
             
     | 
| 158 | 
         
            +
            An example of chat template is as belows:
         
     | 
| 159 | 
         
            +
             
     | 
| 160 | 
         
            +
            ```bash
         
     | 
| 161 | 
         
            +
            <|begin▁of▁sentence|>User: {user_message_1}
         
     | 
| 162 | 
         
            +
             
     | 
| 163 | 
         
            +
            Assistant: {assistant_message_1}<|end▁of▁sentence|>User: {user_message_2}
         
     | 
| 164 | 
         
            +
             
     | 
| 165 | 
         
            +
            Assistant:
         
     | 
| 166 | 
         
            +
            ```
         
     | 
| 167 | 
         
            +
             
     | 
| 168 | 
         
            +
            You can also add an optional system message:
         
     | 
| 169 | 
         
            +
             
     | 
| 170 | 
         
            +
            ```bash
         
     | 
| 171 | 
         
            +
            <|begin▁of▁sentence|>{system_message}
         
     | 
| 172 | 
         
            +
             
     | 
| 173 | 
         
            +
            User: {user_message_1}
         
     | 
| 174 | 
         
            +
             
     | 
| 175 | 
         
            +
            Assistant: {assistant_message_1}<|end▁of▁sentence|>User: {user_message_2}
         
     | 
| 176 | 
         
            +
             
     | 
| 177 | 
         
            +
            Assistant:
         
     | 
| 178 | 
         
            +
            ```
         
     | 
| 179 | 
         
            +
             
     | 
| 180 | 
         
            +
            ### Inference with vLLM (recommended)
         
     | 
| 181 | 
         
            +
            To utilize [vLLM](https://github.com/vllm-project/vllm) for model inference, please merge this Pull Request into your vLLM codebase: https://github.com/vllm-project/vllm/pull/4650.
         
     | 
| 182 | 
         
            +
             
     | 
| 183 | 
         
            +
            ```python
         
     | 
| 184 | 
         
            +
            from transformers import AutoTokenizer
         
     | 
| 185 | 
         
            +
            from vllm import LLM, SamplingParams
         
     | 
| 186 | 
         
            +
             
     | 
| 187 | 
         
            +
            max_model_len, tp_size = 8192, 1
         
     | 
| 188 | 
         
            +
            model_name = ""deepseek-ai/DeepSeek-Coder-V2-Lite-Instruct""
         
     | 
| 189 | 
         
            +
            tokenizer = AutoTokenizer.from_pretrained(model_name)
         
     | 
| 190 | 
         
            +
            llm = LLM(model=model_name, tensor_parallel_size=tp_size, max_model_len=max_model_len, trust_remote_code=True, enforce_eager=True)
         
     | 
| 191 | 
         
            +
            sampling_params = SamplingParams(temperature=0.3, max_tokens=256, stop_token_ids=[tokenizer.eos_token_id])
         
     | 
| 192 | 
         
            +
             
     | 
| 193 | 
         
            +
            messages_list = [
         
     | 
| 194 | 
         
            +
                [{""role"": ""user"", ""content"": ""Who are you?""}],
         
     | 
| 195 | 
         
            +
                [{""role"": ""user"", ""content"": ""write a quick sort algorithm in python.""}],
         
     | 
| 196 | 
         
            +
                [{""role"": ""user"", ""content"": ""Write a piece of quicksort code in C++.""}],
         
     | 
| 197 | 
         
            +
            ]
         
     | 
| 198 | 
         
            +
             
     | 
| 199 | 
         
            +
            prompt_token_ids = [tokenizer.apply_chat_template(messages, add_generation_prompt=True) for messages in messages_list]
         
     | 
| 200 | 
         
            +
             
     | 
| 201 | 
         
            +
            outputs = llm.generate(prompt_token_ids=prompt_token_ids, sampling_params=sampling_params)
         
     | 
| 202 | 
         
            +
             
     | 
| 203 | 
         
            +
            generated_text = [output.outputs[0].text for output in outputs]
         
     | 
| 204 | 
         
            +
            print(generated_text)
         
     | 
| 205 | 
         
            +
            ```
         
     | 
| 206 | 
         
            +
             
     | 
| 207 | 
         
            +
             
     | 
| 208 | 
         
            +
             
     | 
| 209 | 
         
            +
            ## 6. License
         
     | 
| 210 | 
         
            +
             
     | 
| 211 | 
         
            +
            This code repository is licensed under [the MIT License](https://github.com/deepseek-ai/DeepSeek-Coder-V2/blob/main/LICENSE-CODE). The use of DeepSeek-Coder-V2 Base/Instruct models is subject to [the Model License](https://github.com/deepseek-ai/DeepSeek-Coder-V2/blob/main/LICENSE-MODEL). DeepSeek-Coder-V2 series (including Base and Instruct) supports commercial use.
         
     | 
| 212 | 
         
            +
             
     | 
| 213 | 
         
            +
             
     | 
| 214 | 
         
            +
            ## 7. Contact
         
     | 
| 215 | 
         
            +
            If you have any questions, please raise an issue or contact us at [service@deepseek.com](service@deepseek.com).
         
     | 
| 216 | 
         
            +
            ","{""id"": ""deepseek-ai/DeepSeek-Coder-V2-Lite-Instruct"", ""author"": ""deepseek-ai"", ""sha"": ""e434a23f91ba5b4923cf6c9d9a238eb4a08e3a11"", ""last_modified"": ""2024-07-03 05:16:11+00:00"", ""created_at"": ""2024-06-14 06:23:33+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 157276, ""downloads_all_time"": null, ""likes"": 426, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": ""warm"", ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""deepseek_v2"", ""text-generation"", ""conversational"", ""custom_code"", ""arxiv:2401.06066"", ""license:other"", ""autotrain_compatible"", ""text-generation-inference"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""license: other\nlicense_name: deepseek-license\nlicense_link: LICENSE"", ""widget_data"": [{""text"": ""Hi, what can you help me with?""}, {""text"": ""What is 84 * 3 / 2?""}, {""text"": ""Tell me an interesting fact about the universe!""}, {""text"": ""Explain quantum computing in simple terms.""}], ""model_index"": null, ""config"": {""architectures"": [""DeepseekV2ForCausalLM""], ""auto_map"": {""AutoConfig"": ""configuration_deepseek.DeepseekV2Config"", ""AutoModel"": ""modeling_deepseek.DeepseekV2Model"", ""AutoModelForCausalLM"": ""modeling_deepseek.DeepseekV2ForCausalLM""}, ""model_type"": ""deepseek_v2"", ""tokenizer_config"": {""bos_token"": {""__type"": ""AddedToken"", ""content"": ""<\uff5cbegin\u2581of\u2581sentence\uff5c>"", ""lstrip"": false, ""normalized"": true, ""rstrip"": false, ""single_word"": false}, ""eos_token"": {""__type"": ""AddedToken"", ""content"": ""<\uff5cend\u2581of\u2581sentence\uff5c>"", ""lstrip"": false, ""normalized"": true, ""rstrip"": false, ""single_word"": false}, ""pad_token"": {""__type"": ""AddedToken"", ""content"": ""<\uff5cend\u2581of\u2581sentence\uff5c>"", ""lstrip"": false, ""normalized"": true, ""rstrip"": false, ""single_word"": false}, ""unk_token"": null, ""chat_template"": ""{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{{ bos_token }}{% for message in messages %}{% if message['role'] == 'user' %}{{ 'User: ' + message['content'] + '\n\n' }}{% elif message['role'] == 'assistant' %}{{ 'Assistant: ' + message['content'] + eos_token }}{% elif message['role'] == 'system' %}{{ message['content'] + '\n\n' }}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ 'Assistant:' }}{% endif %}""}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": ""modeling_deepseek.DeepseekV2ForCausalLM"", ""pipeline_tag"": ""text-generation"", ""processor"": null}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='configuration_deepseek.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00001-of-000004.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00002-of-000004.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00003-of-000004.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00004-of-000004.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='modeling_deepseek.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenization_deepseek_fast.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [""KBaba7/Quant"", ""bhaskartripathi/LLM_Quantization"", ""AlyxTeam/DeepSeek-Coder-V2-Lite-Instruct"", ""totolook/Quant"", ""FallnAI/Quantize-HF-Models"", ""patched-codes/patched-chat"", ""ruslanmv/convert_to_gguf"", ""learnmlf/MGDebugger"", ""lhilhix/deepseek-ai-DeepSeek-Coder-V2-Lite-Instruct"", ""ad4r5hgs/flan-small-text-gen"", ""msun415/Llamole"", ""kiled/pixel_odyssey"", ""luguog/deepseek-ai-DeepSeek-Coder-V2-Lite-Instruct"", ""eyosi1/deepseek-ai-DeepSeek-Coder-V2-Lite-Instruct"", ""K00B404/LLM_Quantization"", ""Dekhdemo/deepseek-rag-test"", ""minh12313132/LeetAI"", ""brukecam/deepseek-ai-DeepSeek-Coder-V2-Lite-Instruct"", ""lolololidk/deepseek-ai-DeepSeek-Coder-V2-Lite-Instruct"", ""davidizzle/LIA_CodiceDaVinci"", ""1488rid/deepseek-ai-DeepSeek-Coder-V2-Lite-Instruct"", ""muthunagarajan1/deepseek-ai-DeepSeek-Coder-V2-Lite-Instruct"", ""sam522/Coding_bot""], ""safetensors"": {""parameters"": {""BF16"": 15706484224}, ""total"": 15706484224}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-07-03 05:16:11+00:00"", ""cardData"": ""license: other\nlicense_name: deepseek-license\nlicense_link: LICENSE"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": ""modeling_deepseek.DeepseekV2ForCausalLM"", ""pipeline_tag"": ""text-generation"", ""processor"": null}, ""_id"": ""666be1e54e6657b7d1a78f80"", ""modelId"": ""deepseek-ai/DeepSeek-Coder-V2-Lite-Instruct"", ""usedStorage"": 31413626609}",0,https://huggingface.co/mfirth/agi-ds,1,"https://huggingface.co/johnnychang4/deepseek_coder_v2, https://huggingface.co/cfli/deepseek-direct",2,"https://huggingface.co/lmstudio-community/DeepSeek-Coder-V2-Lite-Instruct-GGUF, https://huggingface.co/CISCai/DeepSeek-Coder-V2-Lite-Instruct-SOTA-GGUF, https://huggingface.co/gaianet/DeepSeek-Coder-V2-Lite-Instruct-GGUF, https://huggingface.co/LLukas22/DeepSeek-Coder-V2-Lite-Instruct-Q4_0-GGUF, https://huggingface.co/mlx-community/DeepSeek-Coder-V2-Lite-Instruct-6bit, https://huggingface.co/gsxrchris/DeepSeek-Coder-V2-Lite-Instruct-Q4_K_M-GGUF, https://huggingface.co/legraphista/DeepSeek-Coder-V2-Lite-Instruct-IMat-GGUF, https://huggingface.co/mradermacher/DeepSeek-Coder-V2-Lite-Instruct-GGUF, https://huggingface.co/mradermacher/DeepSeek-Coder-V2-Lite-Instruct-i1-GGUF, https://huggingface.co/QuantFactory/DeepSeek-Coder-V2-Lite-Instruct-GGUF, https://huggingface.co/sugatoray/DeepSeek-Coder-V2-Lite-Instruct-Q4_K_M-GGUF, https://huggingface.co/qwp4w3hyb/DeepSeek-Coder-V2-Lite-Instruct-iMat-GGUF, https://huggingface.co/second-state/DeepSeek-Coder-V2-Lite-Instruct-GGUF, https://huggingface.co/Jianping746/DeepSeek-Coder-V2-Lite-Instruct-Q4_K_M-GGUF, https://huggingface.co/win10/DeepSeek-Coder-V2-Lite-Instruct-Q6_K-GGUF, https://huggingface.co/win10/DeepSeek-Coder-V2-Lite-Instruct-Q8_0-GGUF, https://huggingface.co/skratos115/DeepSeek-Coder-V2-Lite-Instruct-Q4_K_M-GGUF, https://huggingface.co/nm-testing/DeepSeek-Coder-V2-Lite-Instruct-FP8, https://huggingface.co/Aldaris/DeepSeek-Coder-V2-Lite-Instruct-Q4_K_M-GGUF, https://huggingface.co/spachava/DeepSeek-Coder-V2-Lite-Instruct-Q8_0-GGUF, https://huggingface.co/Theta-Lev/DeepSeek-Coder-V2-Lite-Instruct-Q8_0-GGUF, https://huggingface.co/double-em/DeepSeek-Coder-V2-Lite-Instruct-Q4_K_M-GGUF, https://huggingface.co/double-em/DeepSeek-Coder-V2-Lite-Instruct-Q2_K-GGUF, https://huggingface.co/brandonchen/DeepSeek-Coder-V2-Lite-Instruct-Q8_0-GGUF, https://huggingface.co/5fp/DeepSeek-Coder-V2-Lite-Instruct-Q4_K_S-GGUF, https://huggingface.co/phi0112358/DeepSeek-Coder-V2-Lite-Instruct-Q4_K_M-GGUF, https://huggingface.co/phi0112358/DeepSeek-Coder-V2-Lite-Instruct-Q8_0-GGUF, https://huggingface.co/legionarius/DeepSeek-Coder-V2-Lite-Instruct-Q4_K_M-GGUF, https://huggingface.co/venkatsriram/DeepSeek-Coder-V2-Lite-Instruct-Q4_0-GGUF, https://huggingface.co/jacobcarajo/DeepSeek-Coder-V2-Lite-Instruct-Q5_K_M-GGUF, https://huggingface.co/Ffftdtd5dtft/DeepSeek-Coder-V2-Lite-Instruct-Q2_K-GGUF, https://huggingface.co/RetroSteve0/DeepSeek-Coder-V2-Lite-Instruct-Q4_K_M-GGUF, https://huggingface.co/ijohn07/DeepSeek-Coder-V2-Lite-Instruct-Q6_K-GGUF, https://huggingface.co/pipilok/DeepSeek-Coder-V2-Lite-Instruct-Q4_0_4_8-GGUF, https://huggingface.co/Trisert/DeepSeek-Coder-V2-Lite-Instruct-Q4_0-GGUF, https://huggingface.co/ijohn07/DeepSeek-Coder-V2-Lite-Instruct-Q8_0-GGUF, https://huggingface.co/tensorblock/DeepSeek-Coder-V2-Lite-Instruct-GGUF, https://huggingface.co/AcidBomb/DeepSeek-Coder-V2-Lite-Instruct-Q8_0-GGUF, https://huggingface.co/AcidBomb/DeepSeek-Coder-V2-Lite-Instruct-Q5_K_M-GGUF, https://huggingface.co/itlwas/DeepSeek-Coder-V2-Lite-Instruct-Q4_K_M-GGUF, https://huggingface.co/paramedik/DeepSeek-Coder-V2-Lite-Instruct-Q8_0-GGUF, https://huggingface.co/brunopio/DeepSeek-Coder-V2-Lite-Instruct-nbits4-GS0-Axis0-HQQ-T, https://huggingface.co/FullnameNameUser/DeepSeek-Coder-V2-Lite-Instruct-Q4_K_M-GGUF, https://huggingface.co/gregbarbosa/DeepSeek-Coder-V2-Lite-Instruct-Q4-mlx, https://huggingface.co/huggingkot/DeepSeek-Coder-V2-Lite-Instruct-q4f16_1-MLC, https://huggingface.co/alphaduriendur/DeepSeek-Coder-V2-Lite-Instruct-Q4_K_M-GGUF, https://huggingface.co/ijohn07/DeepSeek-Coder-V2-Lite-Instruct-Q5_K_M-GGUF, https://huggingface.co/ijohn07/DeepSeek-Coder-V2-Lite-Instruct-Q4_K_M-GGUF, https://huggingface.co/rohith-yarramala/asyncapi-assistant-model-merged, https://huggingface.co/nm-testing/DeepSeek-Coder-V2-Lite-Instruct-quantized.w8a8",50,,0,"1488rid/deepseek-ai-DeepSeek-Coder-V2-Lite-Instruct, AlyxTeam/DeepSeek-Coder-V2-Lite-Instruct, FallnAI/Quantize-HF-Models, K00B404/LLM_Quantization, KBaba7/Quant, bhaskartripathi/LLM_Quantization, davidizzle/LIA_CodiceDaVinci, lhilhix/deepseek-ai-DeepSeek-Coder-V2-Lite-Instruct, lolololidk/deepseek-ai-DeepSeek-Coder-V2-Lite-Instruct, patched-codes/patched-chat, sam522/Coding_bot, totolook/Quant",12
         
     | 
| 217 | 
         
            +
            mfirth/agi-ds,"---
         
     | 
| 218 | 
         
            +
            library_name: transformers
         
     | 
| 219 | 
         
            +
            license: other
         
     | 
| 220 | 
         
            +
            base_model: deepseek-ai/DeepSeek-Coder-V2-Lite-Instruct
         
     | 
| 221 | 
         
            +
            tags:
         
     | 
| 222 | 
         
            +
            - generated_from_trainer
         
     | 
| 223 | 
         
            +
            datasets:
         
     | 
| 224 | 
         
            +
            - axolotl_format_deepseek_combined_wm.json
         
     | 
| 225 | 
         
            +
            model-index:
         
     | 
| 226 | 
         
            +
            - name: models/deepseek_wm
         
     | 
| 227 | 
         
            +
              results: []
         
     | 
| 228 | 
         
            +
            ---
         
     | 
| 229 | 
         
            +
             
     | 
| 230 | 
         
            +
            <!-- This model card has been generated automatically according to the information the Trainer had access to. You
         
     | 
| 231 | 
         
            +
            should probably proofread and complete it, then remove this comment. -->
         
     | 
| 232 | 
         
            +
             
     | 
| 233 | 
         
            +
            [<img src=""https://raw.githubusercontent.com/axolotl-ai-cloud/axolotl/main/image/axolotl-badge-web.png"" alt=""Built with Axolotl"" width=""200"" height=""32""/>](https://github.com/axolotl-ai-cloud/axolotl)
         
     | 
| 234 | 
         
            +
            <details><summary>See axolotl config</summary>
         
     | 
| 235 | 
         
            +
             
     | 
| 236 | 
         
            +
            axolotl version: `0.5.3.dev44+g5bef1906`
         
     | 
| 237 | 
         
            +
            ```yaml
         
     | 
| 238 | 
         
            +
            base_model: deepseek-ai/DeepSeek-Coder-V2-Lite-Instruct
         
     | 
| 239 | 
         
            +
            trust_remote_code: true
         
     | 
| 240 | 
         
            +
             
     | 
| 241 | 
         
            +
            plugins:
         
     | 
| 242 | 
         
            +
              - axolotl.integrations.liger.LigerPlugin
         
     | 
| 243 | 
         
            +
            liger_rope: true
         
     | 
| 244 | 
         
            +
            liger_rms_norm: true
         
     | 
| 245 | 
         
            +
            liger_glu_activation: true
         
     | 
| 246 | 
         
            +
            liger_layer_norm: true
         
     | 
| 247 | 
         
            +
            liger_fused_linear_cross_entropy: true
         
     | 
| 248 | 
         
            +
             
     | 
| 249 | 
         
            +
            datasets:
         
     | 
| 250 | 
         
            +
              - path: axolotl_format_deepseek_combined_wm.json
         
     | 
| 251 | 
         
            +
                type: input_output
         
     | 
| 252 | 
         
            +
            dataset_prepared_path: last_run_prepared_deepseek
         
     | 
| 253 | 
         
            +
                
         
     | 
| 254 | 
         
            +
            output_dir: ./models/deepseek_wm
         
     | 
| 255 | 
         
            +
            sequence_len: 4096
         
     | 
| 256 | 
         
            +
             
     | 
| 257 | 
         
            +
            wandb_project: agent-v0
         
     | 
| 258 | 
         
            +
            wandb_name: deepseek_wm
         
     | 
| 259 | 
         
            +
             
     | 
| 260 | 
         
            +
            train_on_inputs: false
         
     | 
| 261 | 
         
            +
            gradient_checkpointing: true
         
     | 
| 262 | 
         
            +
            gradient_checkpointing_kwargs:
         
     | 
| 263 | 
         
            +
              use_reentrant: false
         
     | 
| 264 | 
         
            +
            gradient_accumulation_steps: 1
         
     | 
| 265 | 
         
            +
            micro_batch_size: 4
         
     | 
| 266 | 
         
            +
            num_epochs: 3
         
     | 
| 267 | 
         
            +
            optimizer: adamw_torch
         
     | 
| 268 | 
         
            +
            learning_rate: 2e-5
         
     | 
| 269 | 
         
            +
            xformers_attention:
         
     | 
| 270 | 
         
            +
            flash_attention: true
         
     | 
| 271 | 
         
            +
             
     | 
| 272 | 
         
            +
            logging_steps: 5
         
     | 
| 273 | 
         
            +
             
     | 
| 274 | 
         
            +
            warmup_steps: 5
         
     | 
| 275 | 
         
            +
            saves_per_epoch: 1
         
     | 
| 276 | 
         
            +
            weight_decay: 0.0
         
     | 
| 277 | 
         
            +
             
     | 
| 278 | 
         
            +
            deepspeed: axolotl/deepspeed_configs/zero3_bf16_cpuoffload_all.json
         
     | 
| 279 | 
         
            +
             
     | 
| 280 | 
         
            +
            ```
         
     | 
| 281 | 
         
            +
             
     | 
| 282 | 
         
            +
            </details><br>
         
     | 
| 283 | 
         
            +
             
     | 
| 284 | 
         
            +
            # models/deepseek_wm
         
     | 
| 285 | 
         
            +
             
     | 
| 286 | 
         
            +
            This model is a fine-tuned version of [deepseek-ai/DeepSeek-Coder-V2-Lite-Instruct](https://huggingface.co/deepseek-ai/DeepSeek-Coder-V2-Lite-Instruct) on the axolotl_format_deepseek_combined_wm.json dataset.
         
     | 
| 287 | 
         
            +
             
     | 
| 288 | 
         
            +
            ## Model description
         
     | 
| 289 | 
         
            +
             
     | 
| 290 | 
         
            +
            More information needed
         
     | 
| 291 | 
         
            +
             
     | 
| 292 | 
         
            +
            ## Intended uses & limitations
         
     | 
| 293 | 
         
            +
             
     | 
| 294 | 
         
            +
            More information needed
         
     | 
| 295 | 
         
            +
             
     | 
| 296 | 
         
            +
            ## Training and evaluation data
         
     | 
| 297 | 
         
            +
             
     | 
| 298 | 
         
            +
            More information needed
         
     | 
| 299 | 
         
            +
             
     | 
| 300 | 
         
            +
            ## Training procedure
         
     | 
| 301 | 
         
            +
             
     | 
| 302 | 
         
            +
            ### Training hyperparameters
         
     | 
| 303 | 
         
            +
             
     | 
| 304 | 
         
            +
            The following hyperparameters were used during training:
         
     | 
| 305 | 
         
            +
            - learning_rate: 2e-05
         
     | 
| 306 | 
         
            +
            - train_batch_size: 4
         
     | 
| 307 | 
         
            +
            - eval_batch_size: 4
         
     | 
| 308 | 
         
            +
            - seed: 42
         
     | 
| 309 | 
         
            +
            - distributed_type: multi-GPU
         
     | 
| 310 | 
         
            +
            - num_devices: 8
         
     | 
| 311 | 
         
            +
            - total_train_batch_size: 32
         
     | 
| 312 | 
         
            +
            - total_eval_batch_size: 32
         
     | 
| 313 | 
         
            +
            - optimizer: Use OptimizerNames.ADAMW_TORCH with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
         
     | 
| 314 | 
         
            +
            - lr_scheduler_type: cosine
         
     | 
| 315 | 
         
            +
            - lr_scheduler_warmup_steps: 5
         
     | 
| 316 | 
         
            +
            - num_epochs: 3
         
     | 
| 317 | 
         
            +
             
     | 
| 318 | 
         
            +
            ### Training results
         
     | 
| 319 | 
         
            +
             
     | 
| 320 | 
         
            +
             
     | 
| 321 | 
         
            +
             
     | 
| 322 | 
         
            +
            ### Framework versions
         
     | 
| 323 | 
         
            +
             
     | 
| 324 | 
         
            +
            - Transformers 4.47.0
         
     | 
| 325 | 
         
            +
            - Pytorch 2.5.1+cu124
         
     | 
| 326 | 
         
            +
            - Datasets 3.1.0
         
     | 
| 327 | 
         
            +
            - Tokenizers 0.21.0
         
     | 
| 328 | 
         
            +
            ","{""id"": ""mfirth/agi-ds"", ""author"": ""mfirth"", ""sha"": ""2cffdbc88b5bae172b4fbbdaac20b276971a7156"", ""last_modified"": ""2024-12-08 18:04:16+00:00"", ""created_at"": ""2024-12-08 18:00:22+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 6, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""deepseek_v2"", ""text-generation"", ""generated_from_trainer"", ""conversational"", ""custom_code"", ""dataset:axolotl_format_deepseek_combined_wm.json"", ""base_model:deepseek-ai/DeepSeek-Coder-V2-Lite-Instruct"", ""base_model:finetune:deepseek-ai/DeepSeek-Coder-V2-Lite-Instruct"", ""license:other"", ""autotrain_compatible"", ""text-generation-inference"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: deepseek-ai/DeepSeek-Coder-V2-Lite-Instruct\ndatasets:\n- axolotl_format_deepseek_combined_wm.json\nlibrary_name: transformers\nlicense: other\ntags:\n- generated_from_trainer\nmodel-index:\n- name: models/deepseek_wm\n  results: []"", ""widget_data"": [{""text"": ""Hi, what can you help me with?""}, {""text"": ""What is 84 * 3 / 2?""}, {""text"": ""Tell me an interesting fact about the universe!""}, {""text"": ""Explain quantum computing in simple terms.""}], ""model_index"": [{""name"": ""models/deepseek_wm"", ""results"": []}], ""config"": {""architectures"": [""DeepseekV2ForCausalLM""], ""auto_map"": {""AutoConfig"": ""deepseek-ai/DeepSeek-Coder-V2-Lite-Instruct--configuration_deepseek.DeepseekV2Config"", ""AutoModel"": ""deepseek-ai/DeepSeek-Coder-V2-Lite-Instruct--modeling_deepseek.DeepseekV2Model"", ""AutoModelForCausalLM"": ""deepseek-ai/DeepSeek-Coder-V2-Lite-Instruct--modeling_deepseek.DeepseekV2ForCausalLM""}, ""model_type"": ""deepseek_v2"", ""tokenizer_config"": {""bos_token"": ""<\uff5cbegin\u2581of\u2581sentence\uff5c>"", ""chat_template"": ""{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{{ bos_token }}{% for message in messages %}{% if message['role'] == 'user' %}{{ 'User: ' + message['content'] + '\n\n' }}{% elif message['role'] == 'assistant' %}{{ 'Assistant: ' + message['content'] + eos_token }}{% elif message['role'] == 'system' %}{{ message['content'] + '\n\n' }}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ 'Assistant:' }}{% endif %}"", ""eos_token"": ""<\uff5cend\u2581of\u2581sentence\uff5c>"", ""pad_token"": ""<\uff5cend\u2581of\u2581sentence\uff5c>"", ""unk_token"": null, ""use_default_system_prompt"": false}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": ""deepseek-ai/DeepSeek-Coder-V2-Lite-Instruct--modeling_deepseek.DeepseekV2ForCausalLM"", ""pipeline_tag"": ""text-generation"", ""processor"": null}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00001-of-00007.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00002-of-00007.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00003-of-00007.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00004-of-00007.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00005-of-00007.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00006-of-00007.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00007-of-00007.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": {""parameters"": {""BF16"": 15706484224}, ""total"": 15706484224}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-12-08 18:04:16+00:00"", ""cardData"": ""base_model: deepseek-ai/DeepSeek-Coder-V2-Lite-Instruct\ndatasets:\n- axolotl_format_deepseek_combined_wm.json\nlibrary_name: transformers\nlicense: other\ntags:\n- generated_from_trainer\nmodel-index:\n- name: models/deepseek_wm\n  results: []"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": ""deepseek-ai/DeepSeek-Coder-V2-Lite-Instruct--modeling_deepseek.DeepseekV2ForCausalLM"", ""pipeline_tag"": ""text-generation"", ""processor"": null}, ""_id"": ""6755deb6784ff7ea9db513a0"", ""modelId"": ""mfirth/agi-ds"", ""usedStorage"": 31413625552}",1,,0,,0,https://huggingface.co/mradermacher/agi-ds-GGUF,1,,0,huggingface/InferenceSupport/discussions/new?title=mfirth/agi-ds&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bmfirth%2Fagi-ds%5D(%2Fmfirth%2Fagi-ds)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
         
     | 
    	
        DeepSeek-R1-Distill-Qwen-32B_finetunes_20250424_193500.csv_finetunes_20250424_193500.csv
    ADDED
    
    | 
         The diff for this file is too large to render. 
		See raw diff 
     | 
| 
         | 
    	
        DeepSeek-V2-Chat_finetunes_20250425_165642.csv_finetunes_20250425_165642.csv
    ADDED
    
    | 
         @@ -0,0 +1,376 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
         
     | 
| 2 | 
         
            +
            deepseek-ai/DeepSeek-V2-Chat,"---
         
     | 
| 3 | 
         
            +
            license: other
         
     | 
| 4 | 
         
            +
            license_name: deepseek
         
     | 
| 5 | 
         
            +
            license_link: https://github.com/deepseek-ai/DeepSeek-V2/blob/main/LICENSE-MODEL
         
     | 
| 6 | 
         
            +
            ---
         
     | 
| 7 | 
         
            +
             
     | 
| 8 | 
         
            +
            <!-- markdownlint-disable first-line-h1 -->
         
     | 
| 9 | 
         
            +
            <!-- markdownlint-disable html -->
         
     | 
| 10 | 
         
            +
            <!-- markdownlint-disable no-duplicate-header -->
         
     | 
| 11 | 
         
            +
             
     | 
| 12 | 
         
            +
            <div align=""center"">
         
     | 
| 13 | 
         
            +
              <img src=""https://github.com/deepseek-ai/DeepSeek-V2/blob/main/figures/logo.svg?raw=true"" width=""60%"" alt=""DeepSeek-V2"" />
         
     | 
| 14 | 
         
            +
            </div>
         
     | 
| 15 | 
         
            +
            <hr>
         
     | 
| 16 | 
         
            +
            <div align=""center"" style=""line-height: 1;"">
         
     | 
| 17 | 
         
            +
              <a href=""https://www.deepseek.com/"" target=""_blank"" style=""margin: 2px;"">
         
     | 
| 18 | 
         
            +
                <img alt=""Homepage"" src=""https://github.com/deepseek-ai/DeepSeek-V2/blob/main/figures/badge.svg?raw=true"" style=""display: inline-block; vertical-align: middle;""/>
         
     | 
| 19 | 
         
            +
              </a>
         
     | 
| 20 | 
         
            +
              <a href=""https://chat.deepseek.com/"" target=""_blank"" style=""margin: 2px;"">
         
     | 
| 21 | 
         
            +
                <img alt=""Chat"" src=""https://img.shields.io/badge/🤖%20Chat-DeepSeek%20V2-536af5?color=536af5&logoColor=white"" style=""display: inline-block; vertical-align: middle;""/>
         
     | 
| 22 | 
         
            +
              </a>
         
     | 
| 23 | 
         
            +
              <a href=""https://huggingface.co/deepseek-ai"" target=""_blank"" style=""margin: 2px;"">
         
     | 
| 24 | 
         
            +
                <img alt=""Hugging Face"" src=""https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-DeepSeek%20AI-ffc107?color=ffc107&logoColor=white"" style=""display: inline-block; vertical-align: middle;""/>
         
     | 
| 25 | 
         
            +
              </a>
         
     | 
| 26 | 
         
            +
            </div>
         
     | 
| 27 | 
         
            +
             
     | 
| 28 | 
         
            +
            <div align=""center"" style=""line-height: 1;"">
         
     | 
| 29 | 
         
            +
              <a href=""https://discord.gg/Tc7c45Zzu5"" target=""_blank"" style=""margin: 2px;"">
         
     | 
| 30 | 
         
            +
                <img alt=""Discord"" src=""https://img.shields.io/badge/Discord-DeepSeek%20AI-7289da?logo=discord&logoColor=white&color=7289da"" style=""display: inline-block; vertical-align: middle;""/>
         
     | 
| 31 | 
         
            +
              </a>
         
     | 
| 32 | 
         
            +
              <a href=""https://github.com/deepseek-ai/DeepSeek-V2/blob/main/figures/qr.jpeg?raw=true"" target=""_blank"" style=""margin: 2px;"">
         
     | 
| 33 | 
         
            +
                <img alt=""Wechat"" src=""https://img.shields.io/badge/WeChat-DeepSeek%20AI-brightgreen?logo=wechat&logoColor=white"" style=""display: inline-block; vertical-align: middle;""/>
         
     | 
| 34 | 
         
            +
              </a>
         
     | 
| 35 | 
         
            +
              <a href=""https://twitter.com/deepseek_ai"" target=""_blank"" style=""margin: 2px;"">
         
     | 
| 36 | 
         
            +
                <img alt=""Twitter Follow"" src=""https://img.shields.io/badge/Twitter-deepseek_ai-white?logo=x&logoColor=white"" style=""display: inline-block; vertical-align: middle;""/>
         
     | 
| 37 | 
         
            +
              </a>
         
     | 
| 38 | 
         
            +
            </div>
         
     | 
| 39 | 
         
            +
             
     | 
| 40 | 
         
            +
            <div align=""center"" style=""line-height: 1;"">
         
     | 
| 41 | 
         
            +
              <a href=""https://github.com/deepseek-ai/DeepSeek-V2/blob/main/LICENSE-CODE"" style=""margin: 2px;"">
         
     | 
| 42 | 
         
            +
                <img alt=""Code License"" src=""https://img.shields.io/badge/Code_License-MIT-f5de53?&color=f5de53"" style=""display: inline-block; vertical-align: middle;""/>
         
     | 
| 43 | 
         
            +
              </a>
         
     | 
| 44 | 
         
            +
              <a href=""https://github.com/deepseek-ai/DeepSeek-V2/blob/main/LICENSE-MODEL"" style=""margin: 2px;"">
         
     | 
| 45 | 
         
            +
                <img alt=""Model License"" src=""https://img.shields.io/badge/Model_License-Model_Agreement-f5de53?&color=f5de53"" style=""display: inline-block; vertical-align: middle;""/>
         
     | 
| 46 | 
         
            +
              </a>
         
     | 
| 47 | 
         
            +
            </div>
         
     | 
| 48 | 
         
            +
             
     | 
| 49 | 
         
            +
            <p align=""center"">
         
     | 
| 50 | 
         
            +
              <a href=""#2-model-downloads"">Model Download</a> |
         
     | 
| 51 | 
         
            +
              <a href=""#3-evaluation-results"">Evaluation Results</a> |
         
     | 
| 52 | 
         
            +
              <a href=""#4-model-architecture"">Model Architecture</a> |
         
     | 
| 53 | 
         
            +
              <a href=""#6-api-platform"">API Platform</a> |
         
     | 
| 54 | 
         
            +
              <a href=""#8-license"">License</a> |
         
     | 
| 55 | 
         
            +
              <a href=""#9-citation"">Citation</a>
         
     | 
| 56 | 
         
            +
            </p>
         
     | 
| 57 | 
         
            +
             
     | 
| 58 | 
         
            +
            <p align=""center"">
         
     | 
| 59 | 
         
            +
              <a href=""https://arxiv.org/abs/2405.04434""><b>Paper Link</b>👁️</a>
         
     | 
| 60 | 
         
            +
            </p>
         
     | 
| 61 | 
         
            +
             
     | 
| 62 | 
         
            +
            # DeepSeek-V2:  A Strong, Economical, and Efficient Mixture-of-Experts Language Model
         
     | 
| 63 | 
         
            +
             
     | 
| 64 | 
         
            +
            ## 1. Introduction
         
     | 
| 65 | 
         
            +
            Today, we’re introducing DeepSeek-V2, a strong Mixture-of-Experts (MoE) language model characterized by economical training and efficient inference. It comprises 236B total parameters, of which 21B are activated for each token. Compared with DeepSeek 67B, DeepSeek-V2 achieves stronger performance, and meanwhile saves 42.5% of training costs, reduces the KV cache by 93.3%, and boosts the maximum generation throughput to 5.76 times. 
         
     | 
| 66 | 
         
            +
             
     | 
| 67 | 
         
            +
            <p align=""center"">
         
     | 
| 68 | 
         
            +
             
     | 
| 69 | 
         
            +
            <div style=""display: flex; justify-content: center;"">
         
     | 
| 70 | 
         
            +
                <img src=""https://github.com/deepseek-ai/DeepSeek-V2/blob/main/figures/activationparameters.png?raw=true"" style=""height:300px; width:auto; margin-right:10px"">
         
     | 
| 71 | 
         
            +
                <img src=""https://github.com/deepseek-ai/DeepSeek-V2/blob/main/figures/trainingcost.png?raw=true"" style=""height:300px; width:auto; margin-left:10px"">
         
     | 
| 72 | 
         
            +
            </div>
         
     | 
| 73 | 
         
            +
            </p>
         
     | 
| 74 | 
         
            +
            We pretrained DeepSeek-V2 on a diverse and high-quality corpus comprising 8.1 trillion tokens. This comprehensive pretraining was followed by a process of Supervised Fine-Tuning (SFT) and Reinforcement Learning (RL) to fully unleash the model's capabilities. The evaluation results validate the effectiveness of our approach as DeepSeek-V2 achieves remarkable performance on both standard benchmarks and open-ended generation evaluation.
         
     | 
| 75 | 
         
            +
             
     | 
| 76 | 
         
            +
            ## 2. Model Downloads
         
     | 
| 77 | 
         
            +
             
     | 
| 78 | 
         
            +
            <div align=""center"">
         
     | 
| 79 | 
         
            +
             
     | 
| 80 | 
         
            +
            | **Model** | **Context Length** | **Download** |
         
     | 
| 81 | 
         
            +
            | :------------: | :------------: | :------------: |
         
     | 
| 82 | 
         
            +
            | DeepSeek-V2   | 128k   | [🤗 HuggingFace](https://huggingface.co/deepseek-ai/DeepSeek-V2)   |
         
     | 
| 83 | 
         
            +
            | DeepSeek-V2-Chat (RL)   | 128k   | [🤗 HuggingFace](https://huggingface.co/deepseek-ai/DeepSeek-V2-Chat)   |
         
     | 
| 84 | 
         
            +
             
     | 
| 85 | 
         
            +
            </div>
         
     | 
| 86 | 
         
            +
             
     | 
| 87 | 
         
            +
            Due to the constraints of HuggingFace, the open-source code currently experiences slower performance than our internal codebase when running on GPUs with Huggingface. To facilitate the efficient execution of our model, we offer a dedicated vllm solution that optimizes performance for running our model effectively.
         
     | 
| 88 | 
         
            +
             
     | 
| 89 | 
         
            +
            ## 3. Evaluation Results
         
     | 
| 90 | 
         
            +
            ### Base Model
         
     | 
| 91 | 
         
            +
            #### Standard Benchmark 
         
     | 
| 92 | 
         
            +
             
     | 
| 93 | 
         
            +
            <div align=""center"">
         
     | 
| 94 | 
         
            +
             
     | 
| 95 | 
         
            +
            | **Benchmark** | **Domain** | **LLaMA3 70B** | **Mixtral 8x22B** | **DeepSeek-V1 (Dense-67B)** | **DeepSeek-V2 (MoE-236B)** |
         
     | 
| 96 | 
         
            +
            |:-----------:|:--------:|:------------:|:---------------:|:-------------------------:|:------------------------:|
         
     | 
| 97 | 
         
            +
            | **MMLU** | English | 78.9 | 77.6 | 71.3 | 78.5 |
         
     | 
| 98 | 
         
            +
            | **BBH** | English | 81.0 | 78.9 | 68.7 | 78.9 |
         
     | 
| 99 | 
         
            +
            | **C-Eval** | Chinese | 67.5 | 58.6 | 66.1 | 81.7 |
         
     | 
| 100 | 
         
            +
            | **CMMLU** | Chinese | 69.3 | 60.0 | 70.8 | 84.0 |
         
     | 
| 101 | 
         
            +
            | **HumanEval** | Code | 48.2 | 53.1 | 45.1 | 48.8 |
         
     | 
| 102 | 
         
            +
            | **MBPP** | Code | 68.6 | 64.2 | 57.4 | 66.6 |
         
     | 
| 103 | 
         
            +
            | **GSM8K** | Math | 83.0 | 80.3 | 63.4 | 79.2 |
         
     | 
| 104 | 
         
            +
            | **Math** | Math | 42.2 | 42.5 | 18.7 | 43.6 |
         
     | 
| 105 | 
         
            +
             
     | 
| 106 | 
         
            +
            </div>
         
     | 
| 107 | 
         
            +
            For more evaluation details, such as few-shot settings and prompts, please check our paper. 
         
     | 
| 108 | 
         
            +
             
     | 
| 109 | 
         
            +
            #### Context Window
         
     | 
| 110 | 
         
            +
            <p align=""center"">
         
     | 
| 111 | 
         
            +
              <img width=""80%"" src=""https://github.com/deepseek-ai/DeepSeek-V2/blob/main/figures/niah.png?raw=true"">
         
     | 
| 112 | 
         
            +
            </p>
         
     | 
| 113 | 
         
            +
             
     | 
| 114 | 
         
            +
            Evaluation results on the ``Needle In A Haystack`` (NIAH) tests.  DeepSeek-V2 performs well across all context window lengths up to **128K**. 
         
     | 
| 115 | 
         
            +
             
     | 
| 116 | 
         
            +
            ### Chat Model
         
     | 
| 117 | 
         
            +
            #### Standard Benchmark 
         
     | 
| 118 | 
         
            +
            <div align=""center"">
         
     | 
| 119 | 
         
            +
             
     | 
| 120 | 
         
            +
            | Benchmark | Domain         | QWen1.5 72B Chat | Mixtral 8x22B | LLaMA3 70B Instruct | DeepSeek-V1 Chat (SFT) | DeepSeek-V2 Chat (SFT) | DeepSeek-V2 Chat (RL) |
         
     | 
| 121 | 
         
            +
            |:-----------:|:----------------:|:------------------:|:---------------:|:---------------------:|:-------------:|:-----------------------:|:----------------------:|
         
     | 
| 122 | 
         
            +
            | **MMLU**      | English        | 76.2             | 77.8          | 80.3                | 71.1        | 78.4                 | 77.8                 |
         
     | 
| 123 | 
         
            +
            | **BBH**       | English        | 65.9             | 78.4          | 80.1                | 71.7        | 81.3                 | 79.7                 |
         
     | 
| 124 | 
         
            +
            | **C-Eval**    | Chinese        | 82.2             | 60.0          | 67.9                | 65.2        | 80.9                 | 78.0                 |
         
     | 
| 125 | 
         
            +
            | **CMMLU**     | Chinese        | 82.9             | 61.0          | 70.7                | 67.8        | 82.4                 | 81.6                 |
         
     | 
| 126 | 
         
            +
            | **HumanEval** | Code           | 68.9             | 75.0          | 76.2                | 73.8        | 76.8                 | 81.1                 |
         
     | 
| 127 | 
         
            +
            | **MBPP**      | Code           | 52.2             | 64.4          | 69.8                | 61.4        | 70.4                 | 72.0                 |
         
     | 
| 128 | 
         
            +
            |   **LiveCodeBench  (0901-0401)**     | Code           | 18.8             | 25.0          | 30.5                | 18.3        | 28.7                 | 32.5                 |
         
     | 
| 129 | 
         
            +
            | **GSM8K**     | Math           | 81.9             | 87.9          | 93.2                | 84.1        | 90.8                 | 92.2                 |
         
     | 
| 130 | 
         
            +
            | **Math**      | Math           | 40.6             | 49.8          | 48.5                | 32.6        | 52.7                 | 53.9                 |
         
     | 
| 131 | 
         
            +
             
     | 
| 132 | 
         
            +
            </div>
         
     | 
| 133 | 
         
            +
             
     | 
| 134 | 
         
            +
            #### English Open Ended Generation Evaluation
         
     | 
| 135 | 
         
            +
            We evaluate our model on AlpacaEval 2.0 and MTBench, showing the competitive performance of DeepSeek-V2-Chat-RL on English conversation generation. 
         
     | 
| 136 | 
         
            +
            <p align=""center"">
         
     | 
| 137 | 
         
            +
              <img width=""50%"" src=""https://github.com/deepseek-ai/DeepSeek-V2/blob/main/figures/mtbench.png?raw=true"" />
         
     | 
| 138 | 
         
            +
            </p>
         
     | 
| 139 | 
         
            +
             
     | 
| 140 | 
         
            +
            #### Chinese Open Ended Generation Evaluation
         
     | 
| 141 | 
         
            +
            **Alignbench** (https://arxiv.org/abs/2311.18743)
         
     | 
| 142 | 
         
            +
            <div align=""center"">
         
     | 
| 143 | 
         
            +
             
     | 
| 144 | 
         
            +
            | **模型** | **开源/闭源** | **总分** | **中文推理** | **中文语言** |
         
     | 
| 145 | 
         
            +
            | :---: | :---: | :---: | :---: | :---: |
         
     | 
| 146 | 
         
            +
            | gpt-4-1106-preview | 闭源 | 8.01 | 7.73 | 8.29 |
         
     | 
| 147 | 
         
            +
            | DeepSeek-V2 Chat (RL) | 开源 | 7.91 | 7.45 | 8.35 |
         
     | 
| 148 | 
         
            +
            | erniebot-4.0-202404 (文心一言) | 闭源 | 7.89 | 7.61 | 8.17 |
         
     | 
| 149 | 
         
            +
            | DeepSeek-V2 Chat (SFT) | 开源 | 7.74 | 7.30 | 8.17 |
         
     | 
| 150 | 
         
            +
            | gpt-4-0613 | 闭源 | 7.53 | 7.47 | 7.59 |
         
     | 
| 151 | 
         
            +
            | erniebot-4.0-202312 (文心一言) | 闭源 | 7.36 | 6.84 | 7.88 |
         
     | 
| 152 | 
         
            +
            | moonshot-v1-32k-202404 (月之暗面) | 闭源 | 7.22 | 6.42 | 8.02 |
         
     | 
| 153 | 
         
            +
            | Qwen1.5-72B-Chat (通义千问) | 开源 | 7.19 | 6.45 | 7.93 |
         
     | 
| 154 | 
         
            +
            | DeepSeek-67B-Chat | 开源 | 6.43 | 5.75 | 7.11 |
         
     | 
| 155 | 
         
            +
            | Yi-34B-Chat (零一万物) | 开源 | 6.12 | 4.86 | 7.38 |
         
     | 
| 156 | 
         
            +
            | gpt-3.5-turbo-0613 | 闭源 | 6.08 | 5.35 | 6.71 |
         
     | 
| 157 | 
         
            +
             
     | 
| 158 | 
         
            +
            </div>
         
     | 
| 159 | 
         
            +
             
     | 
| 160 | 
         
            +
            #### Coding Benchmarks
         
     | 
| 161 | 
         
            +
            We evaluate our model on LiveCodeBench (0901-0401), a benchmark designed for live coding challenges. As illustrated, DeepSeek-V2 demonstrates considerable proficiency in LiveCodeBench, achieving a Pass@1 score that surpasses several other sophisticated models. This performance highlights the model's effectiveness in tackling live coding tasks.
         
     | 
| 162 | 
         
            +
             
     | 
| 163 | 
         
            +
            <p align=""center"">
         
     | 
| 164 | 
         
            +
              <img width=""50%"" src=""https://github.com/deepseek-ai/DeepSeek-V2/blob/main/figures/code_benchmarks.png?raw=true"">
         
     | 
| 165 | 
         
            +
            </p>
         
     | 
| 166 | 
         
            +
             
     | 
| 167 | 
         
            +
            ## 4. Model Architecture
         
     | 
| 168 | 
         
            +
            DeepSeek-V2 adopts innovative architectures to guarantee economical training and efficient inference: 
         
     | 
| 169 | 
         
            +
            - For attention, we design MLA (Multi-head Latent Attention), which utilizes low-rank key-value union compression to eliminate the bottleneck of inference-time key-value cache, thus supporting efficient inference. 
         
     | 
| 170 | 
         
            +
            - For Feed-Forward Networks (FFNs), we adopt DeepSeekMoE architecture, a high-performance MoE architecture that enables training stronger models at lower costs. 
         
     | 
| 171 | 
         
            +
             
     | 
| 172 | 
         
            +
            <p align=""center"">
         
     | 
| 173 | 
         
            +
              <img width=""90%"" src=""https://github.com/deepseek-ai/DeepSeek-V2/blob/main/figures/architecture.png?raw=true"" />
         
     | 
| 174 | 
         
            +
            </p>
         
     | 
| 175 | 
         
            +
             
     | 
| 176 | 
         
            +
            ## 5. Chat Website
         
     | 
| 177 | 
         
            +
            You can chat with the DeepSeek-V2 on DeepSeek's official website: [chat.deepseek.com](https://chat.deepseek.com/sign_in)
         
     | 
| 178 | 
         
            +
             
     | 
| 179 | 
         
            +
            ## 6. API Platform
         
     | 
| 180 | 
         
            +
            We also provide OpenAI-Compatible API at DeepSeek Platform: [platform.deepseek.com](https://platform.deepseek.com/). Sign up for over millions of free tokens. And you can also pay-as-you-go at an unbeatable price.
         
     | 
| 181 | 
         
            +
             
     | 
| 182 | 
         
            +
             
     | 
| 183 | 
         
            +
            <p align=""center"">
         
     | 
| 184 | 
         
            +
              <img width=""40%"" src=""https://github.com/deepseek-ai/DeepSeek-V2/blob/main/figures/model_price.png?raw=true"">
         
     | 
| 185 | 
         
            +
            </p>
         
     | 
| 186 | 
         
            +
             
     | 
| 187 | 
         
            +
             
     | 
| 188 | 
         
            +
            ## 7. How to run locally
         
     | 
| 189 | 
         
            +
            **To utilize DeepSeek-V2 in BF16 format for inference, 80GB*8 GPUs are required.**
         
     | 
| 190 | 
         
            +
            ### Inference with Huggingface's Transformers
         
     | 
| 191 | 
         
            +
            You can directly employ [Huggingface's Transformers](https://github.com/huggingface/transformers) for model inference.
         
     | 
| 192 | 
         
            +
             
     | 
| 193 | 
         
            +
            #### Text Completion
         
     | 
| 194 | 
         
            +
            ```python
         
     | 
| 195 | 
         
            +
            import torch
         
     | 
| 196 | 
         
            +
            from transformers import AutoTokenizer, AutoModelForCausalLM, GenerationConfig
         
     | 
| 197 | 
         
            +
             
     | 
| 198 | 
         
            +
            model_name = ""deepseek-ai/DeepSeek-V2""
         
     | 
| 199 | 
         
            +
            tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
         
     | 
| 200 | 
         
            +
            # `max_memory` should be set based on your devices
         
     | 
| 201 | 
         
            +
            max_memory = {i: ""75GB"" for i in range(8)}
         
     | 
| 202 | 
         
            +
            # `device_map` cannot be set to `auto`
         
     | 
| 203 | 
         
            +
            model = AutoModelForCausalLM.from_pretrained(model_name, trust_remote_code=True, device_map=""sequential"", torch_dtype=torch.bfloat16, max_memory=max_memory, attn_implementation=""eager"")
         
     | 
| 204 | 
         
            +
            model.generation_config = GenerationConfig.from_pretrained(model_name)
         
     | 
| 205 | 
         
            +
            model.generation_config.pad_token_id = model.generation_config.eos_token_id
         
     | 
| 206 | 
         
            +
             
     | 
| 207 | 
         
            +
            text = ""An attention function can be described as mapping a query and a set of key-value pairs to an output, where the query, keys, values, and output are all vectors. The output is""
         
     | 
| 208 | 
         
            +
            inputs = tokenizer(text, return_tensors=""pt"")
         
     | 
| 209 | 
         
            +
            outputs = model.generate(**inputs.to(model.device), max_new_tokens=100)
         
     | 
| 210 | 
         
            +
             
     | 
| 211 | 
         
            +
            result = tokenizer.decode(outputs[0], skip_special_tokens=True)
         
     | 
| 212 | 
         
            +
            print(result)
         
     | 
| 213 | 
         
            +
            ```
         
     | 
| 214 | 
         
            +
             
     | 
| 215 | 
         
            +
            #### Chat Completion
         
     | 
| 216 | 
         
            +
            ```python
         
     | 
| 217 | 
         
            +
            import torch
         
     | 
| 218 | 
         
            +
            from transformers import AutoTokenizer, AutoModelForCausalLM, GenerationConfig
         
     | 
| 219 | 
         
            +
             
     | 
| 220 | 
         
            +
            model_name = ""deepseek-ai/DeepSeek-V2-Chat""
         
     | 
| 221 | 
         
            +
            tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
         
     | 
| 222 | 
         
            +
            # `max_memory` should be set based on your devices
         
     | 
| 223 | 
         
            +
            max_memory = {i: ""75GB"" for i in range(8)}
         
     | 
| 224 | 
         
            +
            # `device_map` cannot be set to `auto`
         
     | 
| 225 | 
         
            +
            model = AutoModelForCausalLM.from_pretrained(model_name, trust_remote_code=True, device_map=""sequential"", torch_dtype=torch.bfloat16, max_memory=max_memory, attn_implementation=""eager"")
         
     | 
| 226 | 
         
            +
            model.generation_config = GenerationConfig.from_pretrained(model_name)
         
     | 
| 227 | 
         
            +
            model.generation_config.pad_token_id = model.generation_config.eos_token_id
         
     | 
| 228 | 
         
            +
             
     | 
| 229 | 
         
            +
            messages = [
         
     | 
| 230 | 
         
            +
                {""role"": ""user"", ""content"": ""Write a piece of quicksort code in C++""}
         
     | 
| 231 | 
         
            +
            ]
         
     | 
| 232 | 
         
            +
            input_tensor = tokenizer.apply_chat_template(messages, add_generation_prompt=True, return_tensors=""pt"")
         
     | 
| 233 | 
         
            +
            outputs = model.generate(input_tensor.to(model.device), max_new_tokens=100)
         
     | 
| 234 | 
         
            +
             
     | 
| 235 | 
         
            +
            result = tokenizer.decode(outputs[0][input_tensor.shape[1]:], skip_special_tokens=True)
         
     | 
| 236 | 
         
            +
            print(result)
         
     | 
| 237 | 
         
            +
            ```
         
     | 
| 238 | 
         
            +
             
     | 
| 239 | 
         
            +
            The complete chat template can be found within `tokenizer_config.json` located in the huggingface model repository.
         
     | 
| 240 | 
         
            +
             
     | 
| 241 | 
         
            +
            An example of chat template is as belows:
         
     | 
| 242 | 
         
            +
             
     | 
| 243 | 
         
            +
            ```bash
         
     | 
| 244 | 
         
            +
            <|begin▁of▁sentence|>User: {user_message_1}
         
     | 
| 245 | 
         
            +
             
     | 
| 246 | 
         
            +
            Assistant: {assistant_message_1}<|end▁of▁sentence|>User: {user_message_2}
         
     | 
| 247 | 
         
            +
             
     | 
| 248 | 
         
            +
            Assistant:
         
     | 
| 249 | 
         
            +
            ```
         
     | 
| 250 | 
         
            +
             
     | 
| 251 | 
         
            +
            You can also add an optional system message:
         
     | 
| 252 | 
         
            +
             
     | 
| 253 | 
         
            +
            ```bash
         
     | 
| 254 | 
         
            +
            <|begin▁of▁sentence|>{system_message}
         
     | 
| 255 | 
         
            +
             
     | 
| 256 | 
         
            +
            User: {user_message_1}
         
     | 
| 257 | 
         
            +
             
     | 
| 258 | 
         
            +
            Assistant: {assistant_message_1}<|end▁of▁sentence|>User: {user_message_2}
         
     | 
| 259 | 
         
            +
             
     | 
| 260 | 
         
            +
            Assistant:
         
     | 
| 261 | 
         
            +
            ```
         
     | 
| 262 | 
         
            +
             
     | 
| 263 | 
         
            +
            ### Inference with vLLM (recommended)
         
     | 
| 264 | 
         
            +
            To utilize [vLLM](https://github.com/vllm-project/vllm) for model inference, please merge this Pull Request into your vLLM codebase: https://github.com/vllm-project/vllm/pull/4650.
         
     | 
| 265 | 
         
            +
             
     | 
| 266 | 
         
            +
            ```python
         
     | 
| 267 | 
         
            +
            from transformers import AutoTokenizer
         
     | 
| 268 | 
         
            +
            from vllm import LLM, SamplingParams
         
     | 
| 269 | 
         
            +
             
     | 
| 270 | 
         
            +
            max_model_len, tp_size = 8192, 8
         
     | 
| 271 | 
         
            +
            model_name = ""deepseek-ai/DeepSeek-V2-Chat""
         
     | 
| 272 | 
         
            +
            tokenizer = AutoTokenizer.from_pretrained(model_name)
         
     | 
| 273 | 
         
            +
            llm = LLM(model=model_name, tensor_parallel_size=tp_size, max_model_len=max_model_len, trust_remote_code=True, enforce_eager=True)
         
     | 
| 274 | 
         
            +
            sampling_params = SamplingParams(temperature=0.3, max_tokens=256, stop_token_ids=[tokenizer.eos_token_id])
         
     | 
| 275 | 
         
            +
             
     | 
| 276 | 
         
            +
            messages_list = [
         
     | 
| 277 | 
         
            +
                [{""role"": ""user"", ""content"": ""Who are you?""}],
         
     | 
| 278 | 
         
            +
                [{""role"": ""user"", ""content"": ""Translate the following content into Chinese directly: DeepSeek-V2 adopts innovative architectures to guarantee economical training and efficient inference.""}],
         
     | 
| 279 | 
         
            +
                [{""role"": ""user"", ""content"": ""Write a piece of quicksort code in C++.""}],
         
     | 
| 280 | 
         
            +
            ]
         
     | 
| 281 | 
         
            +
             
     | 
| 282 | 
         
            +
            prompt_token_ids = [tokenizer.apply_chat_template(messages, add_generation_prompt=True) for messages in messages_list]
         
     | 
| 283 | 
         
            +
             
     | 
| 284 | 
         
            +
            outputs = llm.generate(prompt_token_ids=prompt_token_ids, sampling_params=sampling_params)
         
     | 
| 285 | 
         
            +
             
     | 
| 286 | 
         
            +
            generated_text = [output.outputs[0].text for output in outputs]
         
     | 
| 287 | 
         
            +
            print(generated_text)
         
     | 
| 288 | 
         
            +
            ```
         
     | 
| 289 | 
         
            +
             
     | 
| 290 | 
         
            +
            ## 8. License
         
     | 
| 291 | 
         
            +
            This code repository is licensed under [the MIT License](LICENSE-CODE). The use of DeepSeek-V2 Base/Chat models is subject to [the Model License](LICENSE-MODEL). DeepSeek-V2 series (including Base and Chat) supports commercial use.
         
     | 
| 292 | 
         
            +
             
     | 
| 293 | 
         
            +
            ## 9. Citation
         
     | 
| 294 | 
         
            +
            ```
         
     | 
| 295 | 
         
            +
            @misc{deepseekv2,
         
     | 
| 296 | 
         
            +
                  title={DeepSeek-V2: A Strong, Economical, and Efficient Mixture-of-Experts Language Model}, 
         
     | 
| 297 | 
         
            +
                  author={DeepSeek-AI},
         
     | 
| 298 | 
         
            +
                  year={2024},
         
     | 
| 299 | 
         
            +
                  eprint={2405.04434},
         
     | 
| 300 | 
         
            +
                  archivePrefix={arXiv},
         
     | 
| 301 | 
         
            +
                  primaryClass={cs.CL}
         
     | 
| 302 | 
         
            +
            }
         
     | 
| 303 | 
         
            +
            ```
         
     | 
| 304 | 
         
            +
             
     | 
| 305 | 
         
            +
            ## 10. Contact
         
     | 
| 306 | 
         
            +
            If you have any questions, please raise an issue or contact us at [service@deepseek.com](service@deepseek.com).
         
     | 
| 307 | 
         
            +
            ","{""id"": ""deepseek-ai/DeepSeek-V2-Chat"", ""author"": ""deepseek-ai"", ""sha"": ""8e3f5f6c2226787e41ba3e9283a06389d178c926"", ""last_modified"": ""2024-06-08 09:14:11+00:00"", ""created_at"": ""2024-04-28 09:53:56+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 1422, ""downloads_all_time"": null, ""likes"": 461, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""deepseek_v2"", ""text-generation"", ""conversational"", ""custom_code"", ""arxiv:2405.04434"", ""arxiv:2311.18743"", ""license:other"", ""autotrain_compatible"", ""text-generation-inference"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""license: other\nlicense_name: deepseek\nlicense_link: https://github.com/deepseek-ai/DeepSeek-V2/blob/main/LICENSE-MODEL"", ""widget_data"": [{""text"": ""Hi, what can you help me with?""}, {""text"": ""What is 84 * 3 / 2?""}, {""text"": ""Tell me an interesting fact about the universe!""}, {""text"": ""Explain quantum computing in simple terms.""}], ""model_index"": null, ""config"": {""architectures"": [""DeepseekV2ForCausalLM""], ""auto_map"": {""AutoConfig"": ""configuration_deepseek.DeepseekV2Config"", ""AutoModel"": ""modeling_deepseek.DeepseekV2Model"", ""AutoModelForCausalLM"": ""modeling_deepseek.DeepseekV2ForCausalLM""}, ""model_type"": ""deepseek_v2"", ""tokenizer_config"": {""bos_token"": {""__type"": ""AddedToken"", ""content"": ""<\uff5cbegin\u2581of\u2581sentence\uff5c>"", ""lstrip"": false, ""normalized"": true, ""rstrip"": false, ""single_word"": false}, ""eos_token"": {""__type"": ""AddedToken"", ""content"": ""<\uff5cend\u2581of\u2581sentence\uff5c>"", ""lstrip"": false, ""normalized"": true, ""rstrip"": false, ""single_word"": false}, ""pad_token"": {""__type"": ""AddedToken"", ""content"": ""<\uff5cend\u2581of\u2581sentence\uff5c>"", ""lstrip"": false, ""normalized"": true, ""rstrip"": false, ""single_word"": false}, ""unk_token"": null, ""chat_template"": ""{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{{ bos_token }}{% for message in messages %}{% if message['role'] == 'user' %}{{ 'User: ' + message['content'] + '\n\n' }}{% elif message['role'] == 'assistant' %}{{ 'Assistant: ' + message['content'] + eos_token }}{% elif message['role'] == 'system' %}{{ message['content'] + '\n\n' }}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ 'Assistant:' }}{% endif %}""}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": ""modeling_deepseek.DeepseekV2ForCausalLM"", ""pipeline_tag"": ""text-generation"", ""processor"": null}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='LICENSE', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='configuration_deepseek.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00001-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00002-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00003-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00004-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00005-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00006-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00007-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00008-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00009-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00010-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00011-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00012-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00013-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00014-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00015-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00016-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00017-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00018-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00019-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00020-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00021-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00022-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00023-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00024-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00025-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00026-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00027-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00028-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00029-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00030-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00031-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00032-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00033-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00034-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00035-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00036-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00037-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00038-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00039-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00040-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00041-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00042-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00043-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00044-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00045-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00046-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00047-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00048-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00049-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00050-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00051-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00052-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00053-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00054-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00055-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='modeling_deepseek.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenization_deepseek_fast.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [""allenai/WildBench"", ""allenai/ZebraLogic"", ""KBaba7/Quant"", ""Justinrune/LLaMA-Factory"", ""kenken999/fastapi_django_main_live"", ""WildEval/ZebraLogic"", ""bhaskartripathi/LLM_Quantization"", ""totolook/Quant"", ""FallnAI/Quantize-HF-Models"", ""ruslanmv/convert_to_gguf"", ""msun415/Llamole"", ""K00B404/LLM_Quantization"", ""SamiKLN/DocImageAI-Explorer""], ""safetensors"": {""parameters"": {""BF16"": 235741434880}, ""total"": 235741434880}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-06-08 09:14:11+00:00"", ""cardData"": ""license: other\nlicense_name: deepseek\nlicense_link: https://github.com/deepseek-ai/DeepSeek-V2/blob/main/LICENSE-MODEL"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": ""modeling_deepseek.DeepseekV2ForCausalLM"", ""pipeline_tag"": ""text-generation"", ""processor"": null}, ""_id"": ""662e1cb4ed1ec29a593bcc36"", ""modelId"": ""deepseek-ai/DeepSeek-V2-Chat"", ""usedStorage"": 471486512376}",0,https://huggingface.co/mradermacher/DeepSeek-V2-Chat-GGUF,1,,0,https://huggingface.co/mradermacher/DeepSeek-V2-Chat-i1-GGUF,1,,0,"FallnAI/Quantize-HF-Models, Justinrune/LLaMA-Factory, K00B404/LLM_Quantization, KBaba7/Quant, WildEval/ZebraLogic, allenai/WildBench, allenai/ZebraLogic, bhaskartripathi/LLM_Quantization, huggingface/InferenceSupport/discussions/new?title=deepseek-ai/DeepSeek-V2-Chat&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bdeepseek-ai%2FDeepSeek-V2-Chat%5D(%2Fdeepseek-ai%2FDeepSeek-V2-Chat)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, kenken999/fastapi_django_main_live, msun415/Llamole, ruslanmv/convert_to_gguf, totolook/Quant",13
         
     | 
| 308 | 
         
            +
            mradermacher/DeepSeek-V2-Chat-GGUF,"---
         
     | 
| 309 | 
         
            +
            base_model: deepseek-ai/DeepSeek-V2-Chat
         
     | 
| 310 | 
         
            +
            language:
         
     | 
| 311 | 
         
            +
            - en
         
     | 
| 312 | 
         
            +
            library_name: transformers
         
     | 
| 313 | 
         
            +
            license: other
         
     | 
| 314 | 
         
            +
            license_link: https://github.com/deepseek-ai/DeepSeek-V2/blob/main/LICENSE-MODEL
         
     | 
| 315 | 
         
            +
            license_name: deepseek
         
     | 
| 316 | 
         
            +
            quantized_by: mradermacher
         
     | 
| 317 | 
         
            +
            ---
         
     | 
| 318 | 
         
            +
            ## About
         
     | 
| 319 | 
         
            +
             
     | 
| 320 | 
         
            +
            <!-- ### quantize_version: 2 -->
         
     | 
| 321 | 
         
            +
            <!-- ### output_tensor_quantised: 1 -->
         
     | 
| 322 | 
         
            +
            <!-- ### convert_type: hf -->
         
     | 
| 323 | 
         
            +
            <!-- ### vocab_type:  -->
         
     | 
| 324 | 
         
            +
            <!-- ### tags:  -->
         
     | 
| 325 | 
         
            +
            static quants of https://huggingface.co/deepseek-ai/DeepSeek-V2-Chat
         
     | 
| 326 | 
         
            +
             
     | 
| 327 | 
         
            +
            <!-- provided-files -->
         
     | 
| 328 | 
         
            +
            weighted/imatrix quants are available at https://huggingface.co/mradermacher/DeepSeek-V2-Chat-i1-GGUF
         
     | 
| 329 | 
         
            +
            ## Usage
         
     | 
| 330 | 
         
            +
             
     | 
| 331 | 
         
            +
            If you are unsure how to use GGUF files, refer to one of [TheBloke's
         
     | 
| 332 | 
         
            +
            READMEs](https://huggingface.co/TheBloke/KafkaLM-70B-German-V0.1-GGUF) for
         
     | 
| 333 | 
         
            +
            more details, including on how to concatenate multi-part files.
         
     | 
| 334 | 
         
            +
             
     | 
| 335 | 
         
            +
            ## Provided Quants
         
     | 
| 336 | 
         
            +
             
     | 
| 337 | 
         
            +
            (sorted by size, not necessarily quality. IQ-quants are often preferable over similar sized non-IQ quants)
         
     | 
| 338 | 
         
            +
             
     | 
| 339 | 
         
            +
            | Link | Type | Size/GB | Notes |
         
     | 
| 340 | 
         
            +
            |:-----|:-----|--------:|:------|
         
     | 
| 341 | 
         
            +
            | [PART 1](https://huggingface.co/mradermacher/DeepSeek-V2-Chat-GGUF/resolve/main/DeepSeek-V2-Chat.Q2_K.gguf.part1of2) [PART 2](https://huggingface.co/mradermacher/DeepSeek-V2-Chat-GGUF/resolve/main/DeepSeek-V2-Chat.Q2_K.gguf.part2of2) | Q2_K | 86.0 |  |
         
     | 
| 342 | 
         
            +
            | [PART 1](https://huggingface.co/mradermacher/DeepSeek-V2-Chat-GGUF/resolve/main/DeepSeek-V2-Chat.IQ3_XS.gguf.part1of2) [PART 2](https://huggingface.co/mradermacher/DeepSeek-V2-Chat-GGUF/resolve/main/DeepSeek-V2-Chat.IQ3_XS.gguf.part2of2) | IQ3_XS | 96.4 |  |
         
     | 
| 343 | 
         
            +
            | [PART 1](https://huggingface.co/mradermacher/DeepSeek-V2-Chat-GGUF/resolve/main/DeepSeek-V2-Chat.IQ3_S.gguf.part1of3) [PART 2](https://huggingface.co/mradermacher/DeepSeek-V2-Chat-GGUF/resolve/main/DeepSeek-V2-Chat.IQ3_S.gguf.part2of3) [PART 3](https://huggingface.co/mradermacher/DeepSeek-V2-Chat-GGUF/resolve/main/DeepSeek-V2-Chat.IQ3_S.gguf.part3of3) | IQ3_S | 101.8 | beats Q3_K* |
         
     | 
| 344 | 
         
            +
            | [PART 1](https://huggingface.co/mradermacher/DeepSeek-V2-Chat-GGUF/resolve/main/DeepSeek-V2-Chat.Q3_K_S.gguf.part1of3) [PART 2](https://huggingface.co/mradermacher/DeepSeek-V2-Chat-GGUF/resolve/main/DeepSeek-V2-Chat.Q3_K_S.gguf.part2of3) [PART 3](https://huggingface.co/mradermacher/DeepSeek-V2-Chat-GGUF/resolve/main/DeepSeek-V2-Chat.Q3_K_S.gguf.part3of3) | Q3_K_S | 101.8 |  |
         
     | 
| 345 | 
         
            +
            | [PART 1](https://huggingface.co/mradermacher/DeepSeek-V2-Chat-GGUF/resolve/main/DeepSeek-V2-Chat.IQ3_M.gguf.part1of3) [PART 2](https://huggingface.co/mradermacher/DeepSeek-V2-Chat-GGUF/resolve/main/DeepSeek-V2-Chat.IQ3_M.gguf.part2of3) [PART 3](https://huggingface.co/mradermacher/DeepSeek-V2-Chat-GGUF/resolve/main/DeepSeek-V2-Chat.IQ3_M.gguf.part3of3) | IQ3_M | 103.5 |  |
         
     | 
| 346 | 
         
            +
            | [PART 1](https://huggingface.co/mradermacher/DeepSeek-V2-Chat-GGUF/resolve/main/DeepSeek-V2-Chat.Q3_K_M.gguf.part1of3) [PART 2](https://huggingface.co/mradermacher/DeepSeek-V2-Chat-GGUF/resolve/main/DeepSeek-V2-Chat.Q3_K_M.gguf.part2of3) [PART 3](https://huggingface.co/mradermacher/DeepSeek-V2-Chat-GGUF/resolve/main/DeepSeek-V2-Chat.Q3_K_M.gguf.part3of3) | Q3_K_M | 112.8 | lower quality |
         
     | 
| 347 | 
         
            +
            | [PART 1](https://huggingface.co/mradermacher/DeepSeek-V2-Chat-GGUF/resolve/main/DeepSeek-V2-Chat.Q3_K_L.gguf.part1of3) [PART 2](https://huggingface.co/mradermacher/DeepSeek-V2-Chat-GGUF/resolve/main/DeepSeek-V2-Chat.Q3_K_L.gguf.part2of3) [PART 3](https://huggingface.co/mradermacher/DeepSeek-V2-Chat-GGUF/resolve/main/DeepSeek-V2-Chat.Q3_K_L.gguf.part3of3) | Q3_K_L | 122.5 |  |
         
     | 
| 348 | 
         
            +
            | [PART 1](https://huggingface.co/mradermacher/DeepSeek-V2-Chat-GGUF/resolve/main/DeepSeek-V2-Chat.IQ4_XS.gguf.part1of3) [PART 2](https://huggingface.co/mradermacher/DeepSeek-V2-Chat-GGUF/resolve/main/DeepSeek-V2-Chat.IQ4_XS.gguf.part2of3) [PART 3](https://huggingface.co/mradermacher/DeepSeek-V2-Chat-GGUF/resolve/main/DeepSeek-V2-Chat.IQ4_XS.gguf.part3of3) | IQ4_XS | 126.9 |  |
         
     | 
| 349 | 
         
            +
            | [PART 1](https://huggingface.co/mradermacher/DeepSeek-V2-Chat-GGUF/resolve/main/DeepSeek-V2-Chat.Q4_K_S.gguf.part1of3) [PART 2](https://huggingface.co/mradermacher/DeepSeek-V2-Chat-GGUF/resolve/main/DeepSeek-V2-Chat.Q4_K_S.gguf.part2of3) [PART 3](https://huggingface.co/mradermacher/DeepSeek-V2-Chat-GGUF/resolve/main/DeepSeek-V2-Chat.Q4_K_S.gguf.part3of3) | Q4_K_S | 134.0 | fast, recommended |
         
     | 
| 350 | 
         
            +
            | [PART 1](https://huggingface.co/mradermacher/DeepSeek-V2-Chat-GGUF/resolve/main/DeepSeek-V2-Chat.Q4_K_M.gguf.part1of3) [PART 2](https://huggingface.co/mradermacher/DeepSeek-V2-Chat-GGUF/resolve/main/DeepSeek-V2-Chat.Q4_K_M.gguf.part2of3) [PART 3](https://huggingface.co/mradermacher/DeepSeek-V2-Chat-GGUF/resolve/main/DeepSeek-V2-Chat.Q4_K_M.gguf.part3of3) | Q4_K_M | 142.6 | fast, recommended |
         
     | 
| 351 | 
         
            +
            | [PART 1](https://huggingface.co/mradermacher/DeepSeek-V2-Chat-GGUF/resolve/main/DeepSeek-V2-Chat.Q5_K_S.gguf.part1of4) [PART 2](https://huggingface.co/mradermacher/DeepSeek-V2-Chat-GGUF/resolve/main/DeepSeek-V2-Chat.Q5_K_S.gguf.part2of4) [PART 3](https://huggingface.co/mradermacher/DeepSeek-V2-Chat-GGUF/resolve/main/DeepSeek-V2-Chat.Q5_K_S.gguf.part3of4) [PART 4](https://huggingface.co/mradermacher/DeepSeek-V2-Chat-GGUF/resolve/main/DeepSeek-V2-Chat.Q5_K_S.gguf.part4of4) | Q5_K_S | 162.4 |  |
         
     | 
| 352 | 
         
            +
            | [PART 1](https://huggingface.co/mradermacher/DeepSeek-V2-Chat-GGUF/resolve/main/DeepSeek-V2-Chat.Q5_K_M.gguf.part1of4) [PART 2](https://huggingface.co/mradermacher/DeepSeek-V2-Chat-GGUF/resolve/main/DeepSeek-V2-Chat.Q5_K_M.gguf.part2of4) [PART 3](https://huggingface.co/mradermacher/DeepSeek-V2-Chat-GGUF/resolve/main/DeepSeek-V2-Chat.Q5_K_M.gguf.part3of4) [PART 4](https://huggingface.co/mradermacher/DeepSeek-V2-Chat-GGUF/resolve/main/DeepSeek-V2-Chat.Q5_K_M.gguf.part4of4) | Q5_K_M | 167.3 |  |
         
     | 
| 353 | 
         
            +
            | [PART 1](https://huggingface.co/mradermacher/DeepSeek-V2-Chat-GGUF/resolve/main/DeepSeek-V2-Chat.Q6_K.gguf.part1of4) [PART 2](https://huggingface.co/mradermacher/DeepSeek-V2-Chat-GGUF/resolve/main/DeepSeek-V2-Chat.Q6_K.gguf.part2of4) [PART 3](https://huggingface.co/mradermacher/DeepSeek-V2-Chat-GGUF/resolve/main/DeepSeek-V2-Chat.Q6_K.gguf.part3of4) [PART 4](https://huggingface.co/mradermacher/DeepSeek-V2-Chat-GGUF/resolve/main/DeepSeek-V2-Chat.Q6_K.gguf.part4of4) | Q6_K | 193.6 | very good quality |
         
     | 
| 354 | 
         
            +
            | [P1](https://huggingface.co/mradermacher/DeepSeek-V2-Chat-GGUF/resolve/main/DeepSeek-V2-Chat.Q8_0.gguf.part1of6) [P2](https://huggingface.co/mradermacher/DeepSeek-V2-Chat-GGUF/resolve/main/DeepSeek-V2-Chat.Q8_0.gguf.part2of6) [P3](https://huggingface.co/mradermacher/DeepSeek-V2-Chat-GGUF/resolve/main/DeepSeek-V2-Chat.Q8_0.gguf.part3of6) [P4](https://huggingface.co/mradermacher/DeepSeek-V2-Chat-GGUF/resolve/main/DeepSeek-V2-Chat.Q8_0.gguf.part4of6) [P5](https://huggingface.co/mradermacher/DeepSeek-V2-Chat-GGUF/resolve/main/DeepSeek-V2-Chat.Q8_0.gguf.part5of6) [P6](https://huggingface.co/mradermacher/DeepSeek-V2-Chat-GGUF/resolve/main/DeepSeek-V2-Chat.Q8_0.gguf.part6of6) | Q8_0 | 250.7 | fast, best quality |
         
     | 
| 355 | 
         
            +
             
     | 
| 356 | 
         
            +
            Here is a handy graph by ikawrakow comparing some lower-quality quant
         
     | 
| 357 | 
         
            +
            types (lower is better):
         
     | 
| 358 | 
         
            +
             
     | 
| 359 | 
         
            +
            
         
     | 
| 360 | 
         
            +
             
     | 
| 361 | 
         
            +
            And here are Artefact2's thoughts on the matter:
         
     | 
| 362 | 
         
            +
            https://gist.github.com/Artefact2/b5f810600771265fc1e39442288e8ec9
         
     | 
| 363 | 
         
            +
             
     | 
| 364 | 
         
            +
            ## FAQ / Model Request
         
     | 
| 365 | 
         
            +
             
     | 
| 366 | 
         
            +
            See https://huggingface.co/mradermacher/model_requests for some answers to
         
     | 
| 367 | 
         
            +
            questions you might have and/or if you want some other model quantized.
         
     | 
| 368 | 
         
            +
             
     | 
| 369 | 
         
            +
            ## Thanks
         
     | 
| 370 | 
         
            +
             
     | 
| 371 | 
         
            +
            I thank my company, [nethype GmbH](https://www.nethype.de/), for letting
         
     | 
| 372 | 
         
            +
            me use its servers and providing upgrades to my workstation to enable
         
     | 
| 373 | 
         
            +
            this work in my free time.
         
     | 
| 374 | 
         
            +
             
     | 
| 375 | 
         
            +
            <!-- end -->
         
     | 
| 376 | 
         
            +
            ","{""id"": ""mradermacher/DeepSeek-V2-Chat-GGUF"", ""author"": ""mradermacher"", ""sha"": ""102154eb4ab4b0e9854f7716bac4cbdf77add2a4"", ""last_modified"": ""2024-07-04 17:36:15+00:00"", ""created_at"": ""2024-07-03 02:49:51+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 1, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""en"", ""base_model:deepseek-ai/DeepSeek-V2-Chat"", ""base_model:finetune:deepseek-ai/DeepSeek-V2-Chat"", ""license:other"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": null, ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: deepseek-ai/DeepSeek-V2-Chat\nlanguage:\n- en\nlibrary_name: transformers\nlicense: other\nlicense_name: deepseek\nlicense_link: https://github.com/deepseek-ai/DeepSeek-V2/blob/main/LICENSE-MODEL\nquantized_by: mradermacher"", ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": {""auto_model"": ""AutoModel"", ""custom_class"": null, ""pipeline_tag"": null, ""processor"": null}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DeepSeek-V2-Chat.IQ3_M.gguf.part1of3', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DeepSeek-V2-Chat.IQ3_M.gguf.part2of3', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DeepSeek-V2-Chat.IQ3_M.gguf.part3of3', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DeepSeek-V2-Chat.IQ3_S.gguf.part1of3', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DeepSeek-V2-Chat.IQ3_S.gguf.part2of3', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DeepSeek-V2-Chat.IQ3_S.gguf.part3of3', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DeepSeek-V2-Chat.IQ3_XS.gguf.part1of2', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DeepSeek-V2-Chat.IQ3_XS.gguf.part2of2', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DeepSeek-V2-Chat.IQ4_XS.gguf.part1of3', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DeepSeek-V2-Chat.IQ4_XS.gguf.part2of3', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DeepSeek-V2-Chat.IQ4_XS.gguf.part3of3', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DeepSeek-V2-Chat.Q2_K.gguf.part1of2', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DeepSeek-V2-Chat.Q2_K.gguf.part2of2', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DeepSeek-V2-Chat.Q3_K_L.gguf.part1of3', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DeepSeek-V2-Chat.Q3_K_L.gguf.part2of3', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DeepSeek-V2-Chat.Q3_K_L.gguf.part3of3', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DeepSeek-V2-Chat.Q3_K_M.gguf.part1of3', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DeepSeek-V2-Chat.Q3_K_M.gguf.part2of3', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DeepSeek-V2-Chat.Q3_K_M.gguf.part3of3', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DeepSeek-V2-Chat.Q3_K_S.gguf.part1of3', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DeepSeek-V2-Chat.Q3_K_S.gguf.part2of3', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DeepSeek-V2-Chat.Q3_K_S.gguf.part3of3', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DeepSeek-V2-Chat.Q4_K_M.gguf.part1of3', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DeepSeek-V2-Chat.Q4_K_M.gguf.part2of3', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DeepSeek-V2-Chat.Q4_K_M.gguf.part3of3', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DeepSeek-V2-Chat.Q4_K_S.gguf.part1of3', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DeepSeek-V2-Chat.Q4_K_S.gguf.part2of3', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DeepSeek-V2-Chat.Q4_K_S.gguf.part3of3', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DeepSeek-V2-Chat.Q5_K_M.gguf.part1of4', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DeepSeek-V2-Chat.Q5_K_M.gguf.part2of4', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DeepSeek-V2-Chat.Q5_K_M.gguf.part3of4', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DeepSeek-V2-Chat.Q5_K_M.gguf.part4of4', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DeepSeek-V2-Chat.Q5_K_S.gguf.part1of4', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DeepSeek-V2-Chat.Q5_K_S.gguf.part2of4', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DeepSeek-V2-Chat.Q5_K_S.gguf.part3of4', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DeepSeek-V2-Chat.Q5_K_S.gguf.part4of4', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DeepSeek-V2-Chat.Q6_K.gguf.part1of4', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DeepSeek-V2-Chat.Q6_K.gguf.part2of4', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DeepSeek-V2-Chat.Q6_K.gguf.part3of4', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DeepSeek-V2-Chat.Q6_K.gguf.part4of4', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DeepSeek-V2-Chat.Q8_0.gguf.part1of6', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DeepSeek-V2-Chat.Q8_0.gguf.part2of6', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DeepSeek-V2-Chat.Q8_0.gguf.part3of6', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DeepSeek-V2-Chat.Q8_0.gguf.part4of6', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DeepSeek-V2-Chat.Q8_0.gguf.part5of6', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DeepSeek-V2-Chat.Q8_0.gguf.part6of6', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-07-04 17:36:15+00:00"", ""cardData"": ""base_model: deepseek-ai/DeepSeek-V2-Chat\nlanguage:\n- en\nlibrary_name: transformers\nlicense: other\nlicense_name: deepseek\nlicense_link: https://github.com/deepseek-ai/DeepSeek-V2/blob/main/LICENSE-MODEL\nquantized_by: mradermacher"", ""transformersInfo"": {""auto_model"": ""AutoModel"", ""custom_class"": null, ""pipeline_tag"": null, ""processor"": null}, ""_id"": ""6684bc4f6b4de82b9e3244ed"", ""modelId"": ""mradermacher/DeepSeek-V2-Chat-GGUF"", ""usedStorage"": 1900813253184}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=mradermacher/DeepSeek-V2-Chat-GGUF&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bmradermacher%2FDeepSeek-V2-Chat-GGUF%5D(%2Fmradermacher%2FDeepSeek-V2-Chat-GGUF)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
         
     | 
    	
        DeepSeek-V3-Base_finetunes_20250424_193500.csv_finetunes_20250424_193500.csv
    ADDED
    
    | 
         The diff for this file is too large to render. 
		See raw diff 
     | 
| 
         | 
    	
        DeepSeek-V3_finetunes_20250422_220003.csv
    ADDED
    
    | 
         The diff for this file is too large to render. 
		See raw diff 
     | 
| 
         | 
    	
        DepthPro_finetunes_20250425_165642.csv_finetunes_20250425_165642.csv
    ADDED
    
    | 
         @@ -0,0 +1,199 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
         
     | 
| 2 | 
         
            +
            apple/DepthPro,"---
         
     | 
| 3 | 
         
            +
            license: apple-amlr
         
     | 
| 4 | 
         
            +
            pipeline_tag: depth-estimation
         
     | 
| 5 | 
         
            +
            library_name: depth-pro
         
     | 
| 6 | 
         
            +
            ---
         
     | 
| 7 | 
         
            +
             
     | 
| 8 | 
         
            +
            # Depth Pro: Sharp Monocular Metric Depth in Less Than a Second
         
     | 
| 9 | 
         
            +
             
     | 
| 10 | 
         
            +
            
         
     | 
| 11 | 
         
            +
             
     | 
| 12 | 
         
            +
            We present a foundation model for zero-shot metric monocular depth estimation. Our model, Depth Pro, synthesizes high-resolution depth maps with unparalleled sharpness and high-frequency details. The predictions are metric, with absolute scale, without relying on the availability of metadata such as camera intrinsics. And the model is fast, producing a 2.25-megapixel depth map in 0.3 seconds on a standard GPU. These characteristics are enabled by a number of technical contributions, including an efficient multi-scale vision transformer for dense prediction, a training protocol that combines real and synthetic datasets to achieve high metric accuracy alongside fine boundary tracing, dedicated evaluation metrics for boundary accuracy in estimated depth maps, and state-of-the-art focal length estimation from a single image.
         
     | 
| 13 | 
         
            +
             
     | 
| 14 | 
         
            +
            Depth Pro was introduced in **[Depth Pro: Sharp Monocular Metric Depth in Less Than a Second](https://arxiv.org/abs/2410.02073)**, by *Aleksei Bochkovskii, Amaël Delaunoy, Hugo Germain, Marcel Santos, Yichao Zhou, Stephan R. Richter, and Vladlen Koltun*.
         
     | 
| 15 | 
         
            +
             
     | 
| 16 | 
         
            +
            The checkpoint in this repository is a reference implementation, which has been re-trained. Its performance is close to the model reported in the paper but does not match it exactly.
         
     | 
| 17 | 
         
            +
             
     | 
| 18 | 
         
            +
            ## How to Use
         
     | 
| 19 | 
         
            +
             
     | 
| 20 | 
         
            +
            Please, follow the steps in the [code repository](https://github.com/apple/ml-depth-pro) to set up your environment. Then you can download the checkpoint from the _Files and versions_ tab above, or use the `huggingface-hub` CLI:
         
     | 
| 21 | 
         
            +
             
     | 
| 22 | 
         
            +
            ```bash
         
     | 
| 23 | 
         
            +
            pip install huggingface-hub
         
     | 
| 24 | 
         
            +
            huggingface-cli download --local-dir checkpoints apple/DepthPro
         
     | 
| 25 | 
         
            +
            ```
         
     | 
| 26 | 
         
            +
             
     | 
| 27 | 
         
            +
            ### Running from commandline
         
     | 
| 28 | 
         
            +
             
     | 
| 29 | 
         
            +
            The code repo provides a helper script to run the model on a single image:
         
     | 
| 30 | 
         
            +
             
     | 
| 31 | 
         
            +
            ```bash
         
     | 
| 32 | 
         
            +
            # Run prediction on a single image:
         
     | 
| 33 | 
         
            +
            depth-pro-run -i ./data/example.jpg
         
     | 
| 34 | 
         
            +
            # Run `depth-pro-run -h` for available options.
         
     | 
| 35 | 
         
            +
            ```
         
     | 
| 36 | 
         
            +
             
     | 
| 37 | 
         
            +
            ### Running from Python
         
     | 
| 38 | 
         
            +
             
     | 
| 39 | 
         
            +
            ```python
         
     | 
| 40 | 
         
            +
            from PIL import Image
         
     | 
| 41 | 
         
            +
            import depth_pro
         
     | 
| 42 | 
         
            +
             
     | 
| 43 | 
         
            +
            # Load model and preprocessing transform
         
     | 
| 44 | 
         
            +
            model, transform = depth_pro.create_model_and_transforms()
         
     | 
| 45 | 
         
            +
            model.eval()
         
     | 
| 46 | 
         
            +
             
     | 
| 47 | 
         
            +
            # Load and preprocess an image.
         
     | 
| 48 | 
         
            +
            image, _, f_px = depth_pro.load_rgb(image_path)
         
     | 
| 49 | 
         
            +
            image = transform(image)
         
     | 
| 50 | 
         
            +
             
     | 
| 51 | 
         
            +
            # Run inference.
         
     | 
| 52 | 
         
            +
            prediction = model.infer(image, f_px=f_px)
         
     | 
| 53 | 
         
            +
            depth = prediction[""depth""]  # Depth in [m].
         
     | 
| 54 | 
         
            +
            focallength_px = prediction[""focallength_px""]  # Focal length in pixels.
         
     | 
| 55 | 
         
            +
            ```
         
     | 
| 56 | 
         
            +
             
     | 
| 57 | 
         
            +
            ### Evaluation (boundary metrics) 
         
     | 
| 58 | 
         
            +
             
     | 
| 59 | 
         
            +
            Boundary metrics are implemented in `eval/boundary_metrics.py` and can be used as follows:
         
     | 
| 60 | 
         
            +
             
     | 
| 61 | 
         
            +
            ```python
         
     | 
| 62 | 
         
            +
            # for a depth-based dataset
         
     | 
| 63 | 
         
            +
            boundary_f1 = SI_boundary_F1(predicted_depth, target_depth)
         
     | 
| 64 | 
         
            +
             
     | 
| 65 | 
         
            +
            # for a mask-based dataset (image matting / segmentation) 
         
     | 
| 66 | 
         
            +
            boundary_recall = SI_boundary_Recall(predicted_depth, target_mask)
         
     | 
| 67 | 
         
            +
            ```
         
     | 
| 68 | 
         
            +
             
     | 
| 69 | 
         
            +
             
     | 
| 70 | 
         
            +
            ## Citation
         
     | 
| 71 | 
         
            +
             
     | 
| 72 | 
         
            +
            If you find our work useful, please cite the following paper:
         
     | 
| 73 | 
         
            +
             
     | 
| 74 | 
         
            +
            ```bibtex
         
     | 
| 75 | 
         
            +
            @article{Bochkovskii2024:arxiv,
         
     | 
| 76 | 
         
            +
              author     = {Aleksei Bochkovskii and Ama\""{e}l Delaunoy and Hugo Germain and Marcel Santos and
         
     | 
| 77 | 
         
            +
                           Yichao Zhou and Stephan R. Richter and Vladlen Koltun}
         
     | 
| 78 | 
         
            +
              title      = {Depth Pro: Sharp Monocular Metric Depth in Less Than a Second},
         
     | 
| 79 | 
         
            +
              journal    = {arXiv},
         
     | 
| 80 | 
         
            +
              year       = {2024},
         
     | 
| 81 | 
         
            +
            }
         
     | 
| 82 | 
         
            +
            ```
         
     | 
| 83 | 
         
            +
             
     | 
| 84 | 
         
            +
            ## Acknowledgements
         
     | 
| 85 | 
         
            +
             
     | 
| 86 | 
         
            +
            Our codebase is built using multiple opensource contributions, please see [Acknowledgements](https://github.com/apple/ml-depth-pro/blob/main/ACKNOWLEDGEMENTS.md) for more details.
         
     | 
| 87 | 
         
            +
             
     | 
| 88 | 
         
            +
            Please check the paper for a complete list of references and datasets used in this work.
         
     | 
| 89 | 
         
            +
            ","{""id"": ""apple/DepthPro"", ""author"": ""apple"", ""sha"": ""ccd1350a774eb2248bcdfb3be430e38f1d3087ef"", ""last_modified"": ""2025-02-28 18:31:41+00:00"", ""created_at"": ""2024-10-03 14:45:37+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 2204, ""downloads_all_time"": null, ""likes"": 424, ""library_name"": ""depth-pro"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""depth-pro"", ""depth-estimation"", ""arxiv:2410.02073"", ""license:apple-amlr"", ""region:us""], ""pipeline_tag"": ""depth-estimation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""library_name: depth-pro\nlicense: apple-amlr\npipeline_tag: depth-estimation"", ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='LICENSE', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='depth_pro.pt', size=None, blob_id=None, lfs=None)""], ""spaces"": [""akhaliq/depth-pro"", ""A19grey/Depth-Pro-in-Meters"", ""cyun9286/Align3R"", ""oxkitsune/rerun-ml-depth-pro"", ""lightly-ai/ml-depth-pro"", ""ra-led/depth-pro-normalized"", ""huashenhuajia/depth-pro"", ""hamdouch/ml-depth-pro""], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-02-28 18:31:41+00:00"", ""cardData"": ""library_name: depth-pro\nlicense: apple-amlr\npipeline_tag: depth-estimation"", ""transformersInfo"": null, ""_id"": ""66feae111e0b212adcd8809d"", ""modelId"": ""apple/DepthPro"", ""usedStorage"": 2180144485}",0,"https://huggingface.co/julienkay/sentis-DepthPro-uint8, https://huggingface.co/takara-ai/DepthPro-Safetensors, https://huggingface.co/julienkay/sentis-DepthPro, https://huggingface.co/julienkay/sentis-DepthPro-float16",4,,0,"https://huggingface.co/onnx-community/DepthPro-ONNX, https://huggingface.co/KeighBee/coreml-DepthPro, https://huggingface.co/coreml-projects/DepthPro-coreml, https://huggingface.co/coreml-projects/DepthPro-coreml-normalized-inverse-depth, https://huggingface.co/coreml-projects/DepthPro-coreml-pruned-10-quantized-linear, https://huggingface.co/coreml-projects/DepthPro-coreml-normalized-inverse-depth-pruned-10-quantized-linear",6,,0,"A19grey/Depth-Pro-in-Meters, akhaliq/depth-pro, cyun9286/Align3R, hamdouch/ml-depth-pro, huashenhuajia/depth-pro, huggingface/InferenceSupport/discussions/1019, lightly-ai/ml-depth-pro, oxkitsune/rerun-ml-depth-pro, ra-led/depth-pro-normalized",9
         
     | 
| 90 | 
         
            +
            julienkay/sentis-DepthPro-uint8,"---
         
     | 
| 91 | 
         
            +
            license: apple-ascl
         
     | 
| 92 | 
         
            +
            base_model:
         
     | 
| 93 | 
         
            +
            - apple/DepthPro
         
     | 
| 94 | 
         
            +
            library_name: unity-sentis
         
     | 
| 95 | 
         
            +
            pipeline_tag: depth-estimation
         
     | 
| 96 | 
         
            +
            ---
         
     | 
| 97 | 
         
            +
            The [DepthPro](https://huggingface.co/apple/DepthPro) model converted to [Unity Sentis](https://unity.com/products/sentis) 
         
     | 
| 98 | 
         
            +
             
     | 
| 99 | 
         
            +
            The model uses a static input with shape (1, 3, 1536, 1536). The image is expected to be in the [-1.0, 1.0] range. Models were converted and quantized to uint8 format using Sentis v2.1.0","{""id"": ""julienkay/sentis-DepthPro-uint8"", ""author"": ""julienkay"", ""sha"": ""14beb53a00c402619a135d0f05c3885a17c83d60"", ""last_modified"": ""2024-10-10 16:48:53+00:00"", ""created_at"": ""2024-10-10 16:18:23+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 5, ""downloads_all_time"": null, ""likes"": 2, ""library_name"": ""unity-sentis"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""unity-sentis"", ""depth-estimation"", ""base_model:apple/DepthPro"", ""base_model:finetune:apple/DepthPro"", ""license:apple-ascl"", ""region:us""], ""pipeline_tag"": ""depth-estimation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- apple/DepthPro\nlibrary_name: unity-sentis\nlicense: apple-ascl\npipeline_tag: depth-estimation"", ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='depth_pro.sentis', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-10-10 16:48:53+00:00"", ""cardData"": ""base_model:\n- apple/DepthPro\nlibrary_name: unity-sentis\nlicense: apple-ascl\npipeline_tag: depth-estimation"", ""transformersInfo"": null, ""_id"": ""6707fe4f360c1af0b4e8063c"", ""modelId"": ""julienkay/sentis-DepthPro-uint8"", ""usedStorage"": 958249776}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=julienkay/sentis-DepthPro-uint8&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bjulienkay%2Fsentis-DepthPro-uint8%5D(%2Fjulienkay%2Fsentis-DepthPro-uint8)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
         
     | 
| 100 | 
         
            +
            takara-ai/DepthPro-Safetensors,"---
         
     | 
| 101 | 
         
            +
            license: apple-ascl
         
     | 
| 102 | 
         
            +
            pipeline_tag: depth-estimation
         
     | 
| 103 | 
         
            +
            library_name: pytorch
         
     | 
| 104 | 
         
            +
            base_model:
         
     | 
| 105 | 
         
            +
            - apple/DepthPro
         
     | 
| 106 | 
         
            +
            tags:
         
     | 
| 107 | 
         
            +
            - safetensors
         
     | 
| 108 | 
         
            +
            ---
         
     | 
| 109 | 
         
            +
             
     | 
| 110 | 
         
            +
            <img src=""https://takara.ai/images/logo-24/TakaraAi.svg"" width=""200"" alt=""Takara.ai Logo"" />
         
     | 
| 111 | 
         
            +
             
     | 
| 112 | 
         
            +
            From the Frontier Research Team at **Takara.ai** we present **DepthPro-Safetensors**, a memory-efficient and optimized implementation of Apple's high-precision depth estimation model.
         
     | 
| 113 | 
         
            +
             
     | 
| 114 | 
         
            +
            ---
         
     | 
| 115 | 
         
            +
             
     | 
| 116 | 
         
            +
            # DepthPro-Safetensors
         
     | 
| 117 | 
         
            +
             
     | 
| 118 | 
         
            +
            This repository contains Apple's [DepthPro](https://huggingface.co/apple/DepthPro) depth estimation model converted to the SafeTensors format for improved memory efficiency, security, and faster loading times.
         
     | 
| 119 | 
         
            +
             
     | 
| 120 | 
         
            +
            ## Model Overview
         
     | 
| 121 | 
         
            +
             
     | 
| 122 | 
         
            +
            DepthPro is a state-of-the-art monocular depth estimation model developed by Apple that produces sharp and accurate metric depth maps from a single image in less than a second. This converted version preserves all the capabilities of the original model while providing the benefits of the SafeTensors format.
         
     | 
| 123 | 
         
            +
             
     | 
| 124 | 
         
            +
            ## Technical Specifications
         
     | 
| 125 | 
         
            +
             
     | 
| 126 | 
         
            +
            - **Total Parameters**: 951,991,330
         
     | 
| 127 | 
         
            +
            - **Memory Usage**: 1815.78 MB
         
     | 
| 128 | 
         
            +
            - **Precision**: torch.float16
         
     | 
| 129 | 
         
            +
            - **Estimated FLOPs**: 3,501,896,768
         
     | 
| 130 | 
         
            +
             
     | 
| 131 | 
         
            +
            _Details calculated with [TensorKIKO](https://github.com/takara-ai/TensorKiko)_
         
     | 
| 132 | 
         
            +
             
     | 
| 133 | 
         
            +
            ## Usage
         
     | 
| 134 | 
         
            +
             
     | 
| 135 | 
         
            +
            ```python
         
     | 
| 136 | 
         
            +
            from transformers import AutoModelForDepthEstimation, AutoImageProcessor
         
     | 
| 137 | 
         
            +
            import torch
         
     | 
| 138 | 
         
            +
            from PIL import Image
         
     | 
| 139 | 
         
            +
             
     | 
| 140 | 
         
            +
            # Load model and processor
         
     | 
| 141 | 
         
            +
            model = AutoModelForDepthEstimation.from_pretrained(""takara-ai/DepthPro-Safetensors"")
         
     | 
| 142 | 
         
            +
            processor = AutoImageProcessor.from_pretrained(""takara-ai/DepthPro-Safetensors"")
         
     | 
| 143 | 
         
            +
             
     | 
| 144 | 
         
            +
            # Prepare image
         
     | 
| 145 | 
         
            +
            image = Image.open(""your_image.jpg"")
         
     | 
| 146 | 
         
            +
            inputs = processor(images=image, return_tensors=""pt"")
         
     | 
| 147 | 
         
            +
             
     | 
| 148 | 
         
            +
            # Inference
         
     | 
| 149 | 
         
            +
            with torch.no_grad():
         
     | 
| 150 | 
         
            +
                outputs = model(**inputs)
         
     | 
| 151 | 
         
            +
                predicted_depth = outputs.predicted_depth
         
     | 
| 152 | 
         
            +
             
     | 
| 153 | 
         
            +
            # Post-process for visualization
         
     | 
| 154 | 
         
            +
            depth_map = processor.post_process_depth_estimation(outputs, target_size=image.size[::-1])
         
     | 
| 155 | 
         
            +
            ```
         
     | 
| 156 | 
         
            +
             
     | 
| 157 | 
         
            +
            ## Benefits of SafeTensors Format
         
     | 
| 158 | 
         
            +
             
     | 
| 159 | 
         
            +
            - **Improved Security**: Resistant to code execution vulnerabilities
         
     | 
| 160 | 
         
            +
            - **Faster Loading Times**: Optimized memory mapping for quicker model initialization
         
     | 
| 161 | 
         
            +
            - **Memory Efficiency**: Better handling of tensor storage for reduced memory footprint
         
     | 
| 162 | 
         
            +
            - **Parallel Loading**: Support for efficient parallel tensor loading
         
     | 
| 163 | 
         
            +
             
     | 
| 164 | 
         
            +
            ## Citation
         
     | 
| 165 | 
         
            +
             
     | 
| 166 | 
         
            +
            ```bibtex
         
     | 
| 167 | 
         
            +
            @article{Bochkovskii2024:arxiv,
         
     | 
| 168 | 
         
            +
              author     = {Aleksei Bochkovskii and Ama\""{e}l Delaunoy and Hugo Germain and Marcel Santos and
         
     | 
| 169 | 
         
            +
                           Yichao Zhou and Stephan R. Richter and Vladlen Koltun},
         
     | 
| 170 | 
         
            +
              title      = {Depth Pro: Sharp Monocular Metric Depth in Less Than a Second},
         
     | 
| 171 | 
         
            +
              journal    = {arXiv},
         
     | 
| 172 | 
         
            +
              year       = {2024},
         
     | 
| 173 | 
         
            +
            }
         
     | 
| 174 | 
         
            +
            ```
         
     | 
| 175 | 
         
            +
             
     | 
| 176 | 
         
            +
            ---
         
     | 
| 177 | 
         
            +
            For research inquiries and press, please reach out to research@takara.ai
         
     | 
| 178 | 
         
            +
             
     | 
| 179 | 
         
            +
            > 人類を変革する","{""id"": ""takara-ai/DepthPro-Safetensors"", ""author"": ""takara-ai"", ""sha"": ""5fd1d4e7308abb5b0cd5bcd55a84db5bf2ac71a2"", ""last_modified"": ""2025-02-25 14:33:29+00:00"", ""created_at"": ""2024-10-09 23:57:38+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 1, ""library_name"": ""pytorch"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""pytorch"", ""safetensors"", ""depth-estimation"", ""base_model:apple/DepthPro"", ""base_model:finetune:apple/DepthPro"", ""license:apple-ascl"", ""region:us""], ""pipeline_tag"": ""depth-estimation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- apple/DepthPro\nlibrary_name: pytorch\nlicense: apple-ascl\npipeline_tag: depth-estimation\ntags:\n- safetensors"", ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='depth_pro.safetensors', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-02-25 14:33:29+00:00"", ""cardData"": ""base_model:\n- apple/DepthPro\nlibrary_name: pytorch\nlicense: apple-ascl\npipeline_tag: depth-estimation\ntags:\n- safetensors"", ""transformersInfo"": null, ""_id"": ""670718726d79c979623fac2d"", ""modelId"": ""takara-ai/DepthPro-Safetensors"", ""usedStorage"": 1904109940}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=takara-ai/DepthPro-Safetensors&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Btakara-ai%2FDepthPro-Safetensors%5D(%2Ftakara-ai%2FDepthPro-Safetensors)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
         
     | 
| 180 | 
         
            +
            julienkay/sentis-DepthPro,"---
         
     | 
| 181 | 
         
            +
            license: apple-ascl
         
     | 
| 182 | 
         
            +
            base_model:
         
     | 
| 183 | 
         
            +
            - apple/DepthPro
         
     | 
| 184 | 
         
            +
            library_name: unity-sentis
         
     | 
| 185 | 
         
            +
            pipeline_tag: depth-estimation
         
     | 
| 186 | 
         
            +
            ---
         
     | 
| 187 | 
         
            +
            The [DepthPro](https://huggingface.co/apple/DepthPro) model converted to [Unity Sentis](https://unity.com/products/sentis) 
         
     | 
| 188 | 
         
            +
             
     | 
| 189 | 
         
            +
            The model uses a static input with shape (1, 3, 1536, 1536). The image is expected to be in the [-1.0, 1.0] range. Models were converted using Sentis v2.1.0","{""id"": ""julienkay/sentis-DepthPro"", ""author"": ""julienkay"", ""sha"": ""ed577cf18e322f9e1d2c9e0bd315bda8fcddd234"", ""last_modified"": ""2024-10-10 16:32:50+00:00"", ""created_at"": ""2024-10-10 15:53:28+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 10, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""unity-sentis"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""unity-sentis"", ""depth-estimation"", ""base_model:apple/DepthPro"", ""base_model:finetune:apple/DepthPro"", ""license:apple-ascl"", ""region:us""], ""pipeline_tag"": ""depth-estimation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- apple/DepthPro\nlibrary_name: unity-sentis\nlicense: apple-ascl\npipeline_tag: depth-estimation"", ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='depth_pro.sentis', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-10-10 16:32:50+00:00"", ""cardData"": ""base_model:\n- apple/DepthPro\nlibrary_name: unity-sentis\nlicense: apple-ascl\npipeline_tag: depth-estimation"", ""transformersInfo"": null, ""_id"": ""6707f878caf70ddea8a5a925"", ""modelId"": ""julienkay/sentis-DepthPro"", ""usedStorage"": 3803767096}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=julienkay/sentis-DepthPro&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bjulienkay%2Fsentis-DepthPro%5D(%2Fjulienkay%2Fsentis-DepthPro)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
         
     | 
| 190 | 
         
            +
            julienkay/sentis-DepthPro-float16,"---
         
     | 
| 191 | 
         
            +
            license: apple-ascl
         
     | 
| 192 | 
         
            +
            base_model:
         
     | 
| 193 | 
         
            +
            - apple/DepthPro
         
     | 
| 194 | 
         
            +
            library_name: unity-sentis
         
     | 
| 195 | 
         
            +
            pipeline_tag: depth-estimation
         
     | 
| 196 | 
         
            +
            ---
         
     | 
| 197 | 
         
            +
            The [DepthPro](https://huggingface.co/apple/DepthPro) model converted to [Unity Sentis](https://unity.com/products/sentis) 
         
     | 
| 198 | 
         
            +
             
     | 
| 199 | 
         
            +
            The model uses a static input with shape (1, 3, 1536, 1536). The image is expected to be in the [-1.0, 1.0] range. Models were converted and quantized to float16 format using Sentis v2.1.0","{""id"": ""julienkay/sentis-DepthPro-float16"", ""author"": ""julienkay"", ""sha"": ""f4ee5d683d5a0ca83184c64c4f242b8ae0314b82"", ""last_modified"": ""2024-10-10 16:40:51+00:00"", ""created_at"": ""2024-10-10 16:17:03+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 1, ""downloads_all_time"": null, ""likes"": 1, ""library_name"": ""unity-sentis"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""unity-sentis"", ""depth-estimation"", ""base_model:apple/DepthPro"", ""base_model:finetune:apple/DepthPro"", ""license:apple-ascl"", ""region:us""], ""pipeline_tag"": ""depth-estimation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- apple/DepthPro\nlibrary_name: unity-sentis\nlicense: apple-ascl\npipeline_tag: depth-estimation"", ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='depth_pro.sentis', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-10-10 16:40:51+00:00"", ""cardData"": ""base_model:\n- apple/DepthPro\nlibrary_name: unity-sentis\nlicense: apple-ascl\npipeline_tag: depth-estimation"", ""transformersInfo"": null, ""_id"": ""6707fdffffe347c4579e1c93"", ""modelId"": ""julienkay/sentis-DepthPro-float16"", ""usedStorage"": 1906815544}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=julienkay/sentis-DepthPro-float16&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bjulienkay%2Fsentis-DepthPro-float16%5D(%2Fjulienkay%2Fsentis-DepthPro-float16)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
         
     | 
    	
        Double-Exposure-Diffusion_finetunes_20250427_003734.csv_finetunes_20250427_003734.csv
    ADDED
    
    | 
         @@ -0,0 +1,67 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
         
     | 
| 2 | 
         
            +
            joachimsallstrom/Double-Exposure-Diffusion,"---
         
     | 
| 3 | 
         
            +
            license: creativeml-openrail-m
         
     | 
| 4 | 
         
            +
            tags:
         
     | 
| 5 | 
         
            +
            - stable-diffusion
         
     | 
| 6 | 
         
            +
            - text-to-image
         
     | 
| 7 | 
         
            +
            ---
         
     | 
| 8 | 
         
            +
             
     | 
| 9 | 
         
            +
            [*Click here to download the latest Double Exposure embedding for SD 2.x in higher resolution*](https://huggingface.co/joachimsallstrom/Double-Exposure-Embedding)!
         
     | 
| 10 | 
         
            +
             
     | 
| 11 | 
         
            +
            **Double Exposure Diffusion**
         
     | 
| 12 | 
         
            +
             
     | 
| 13 | 
         
            +
            This is version 2 of the <i>Double Exposure Diffusion</i> model, trained specifically on images of people and a few animals.
         
     | 
| 14 | 
         
            +
            The model file (Double_Exposure_v2.ckpt) can be downloaded on the **Files** page. You trigger double exposure style images using token: **_dublex style_** or just **_dublex_**.  
         
     | 
| 15 | 
         
            +
             
     | 
| 16 | 
         
            +
            **Example 1:**
         
     | 
| 17 | 
         
            +
            
         
     | 
| 18 | 
         
            +
             
     | 
| 19 | 
         
            +
            #### Example prompts and settings
         
     | 
| 20 | 
         
            +
             
     | 
| 21 | 
         
            +
            <i>Galaxy man (image 1):</i><br>
         
     | 
| 22 | 
         
            +
            **dublex man galaxy**<br>
         
     | 
| 23 | 
         
            +
            _Steps: 20, Sampler: Euler a, CFG scale: 7, Seed: 3273014177, Size: 512x512_
         
     | 
| 24 | 
         
            +
             
     | 
| 25 | 
         
            +
            <i>Emma Stone (image 2):</i><br>
         
     | 
| 26 | 
         
            +
            **dublex style Emma Stone, galaxy**<br>
         
     | 
| 27 | 
         
            +
            _Steps: 20, Sampler: Euler a, CFG scale: 7, Seed: 250257155, Size: 512x512_
         
     | 
| 28 | 
         
            +
             
     | 
| 29 | 
         
            +
            <i>Frodo (image 6):</i><br>
         
     | 
| 30 | 
         
            +
            **dublex style young Elijah Wood as (Frodo), portrait, dark nature**<br>
         
     | 
| 31 | 
         
            +
            _Steps: 20, Sampler: Euler a, CFG scale: 7, Seed: 3717002975, Size: 512x512_
         
     | 
| 32 | 
         
            +
             
     | 
| 33 | 
         
            +
            <br>
         
     | 
| 34 | 
         
            +
             
     | 
| 35 | 
         
            +
            **Example 2:**
         
     | 
| 36 | 
         
            +
            
         
     | 
| 37 | 
         
            +
             
     | 
| 38 | 
         
            +
            #### Example prompts and settings
         
     | 
| 39 | 
         
            +
             
     | 
| 40 | 
         
            +
            <i>Scarlett Johansson (image 1):</i><br>
         
     | 
| 41 | 
         
            +
            **dublex Scarlett Johansson, (haunted house), black background**<br>
         
     | 
| 42 | 
         
            +
            _Steps: 20, Sampler: Euler a, CFG scale: 7, Seed: 3059560186, Size: 512x512_
         
     | 
| 43 | 
         
            +
             
     | 
| 44 | 
         
            +
            <i>Frozen Elsa (image 3):</i><br>
         
     | 
| 45 | 
         
            +
            **dublex style Elsa, ice castle**<br>
         
     | 
| 46 | 
         
            +
            _Steps: 20, Sampler: Euler a, CFG scale: 7, Seed: 2867934627, Size: 512x512_
         
     | 
| 47 | 
         
            +
             
     | 
| 48 | 
         
            +
            <i>Wolf (image 4):</i><br>
         
     | 
| 49 | 
         
            +
            **dublex style wolf closeup, moon**<br>
         
     | 
| 50 | 
         
            +
            _Steps: 20, Sampler: Euler a, CFG scale: 7, Seed: 312924946, Size: 512x512_
         
     | 
| 51 | 
         
            +
             
     | 
| 52 | 
         
            +
            <br>
         
     | 
| 53 | 
         
            +
            <p>
         
     | 
| 54 | 
         
            +
            This model was trained using Shivam's DreamBooth model on Google Colab @ 2000 steps.
         
     | 
| 55 | 
         
            +
            </p>
         
     | 
| 56 | 
         
            +
             
     | 
| 57 | 
         
            +
            The previous version 1 of Double Exposure Diffusion is also available in the **Files** section.
         
     | 
| 58 | 
         
            +
             
     | 
| 59 | 
         
            +
            ## License
         
     | 
| 60 | 
         
            +
             
     | 
| 61 | 
         
            +
            This model is open access and available to all, with a CreativeML OpenRAIL-M license further specifying rights and usage.
         
     | 
| 62 | 
         
            +
            The CreativeML OpenRAIL License specifies: 
         
     | 
| 63 | 
         
            +
             
     | 
| 64 | 
         
            +
            1. You can't use the model to deliberately produce nor share illegal or harmful outputs or content 
         
     | 
| 65 | 
         
            +
            2. The authors claims no rights on the outputs you generate, you are free to use them and are accountable for their use which must not go against the provisions set in the license
         
     | 
| 66 | 
         
            +
            3. You may re-distribute the weights and use the model commercially and/or as a service. If you do, please be aware you have to include the same use restrictions as the ones in the license and share a copy of the CreativeML OpenRAIL-M to all your users (please read the license entirely and carefully)
         
     | 
| 67 | 
         
            +
            [Please read the full license here](https://huggingface.co/spaces/CompVis/stable-diffusion-license)","{""id"": ""joachimsallstrom/Double-Exposure-Diffusion"", ""author"": ""joachimsallstrom"", ""sha"": ""09345645f60a85a46cc554e6c2fe5d49fd704966"", ""last_modified"": ""2023-02-13 09:50:10+00:00"", ""created_at"": ""2022-11-14 21:59:58+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 226, ""downloads_all_time"": null, ""likes"": 167, ""library_name"": ""diffusers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""diffusers"", ""stable-diffusion"", ""text-to-image"", ""license:creativeml-openrail-m"", ""autotrain_compatible"", ""endpoints_compatible"", ""diffusers:StableDiffusionPipeline"", ""region:us""], ""pipeline_tag"": ""text-to-image"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""license: creativeml-openrail-m\ntags:\n- stable-diffusion\n- text-to-image"", ""widget_data"": null, ""model_index"": null, ""config"": {""diffusers"": {""_class_name"": ""StableDiffusionPipeline""}}, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Double_Exposure_v1.ckpt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Double_Exposure_v1.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Double_Exposure_v2.ckpt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Double_Exposure_v2.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='feature_extractor/preprocessor_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model_index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='safety_checker/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='safety_checker/pytorch_model.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='scheduler/scheduler_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='text_encoder/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='text_encoder/pytorch_model.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer/merges.txt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer/special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer/tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer/vocab.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='unet/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='unet/diffusion_pytorch_model.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='v1_sample_images.jpg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='v2_sample_images_1.jpg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='v2_sample_images_2.jpg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vae/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vae/diffusion_pytorch_model.bin', size=None, blob_id=None, lfs=None)""], ""spaces"": [""Yntec/ToyWorld"", ""Yntec/PrintingPress"", ""ennov8ion/3dart-Models"", ""phenixrhyder/NSFW-ToyWorld"", ""Yntec/blitz_diffusion"", ""sanaweb/text-to-image"", ""Vedits/6x_Image_diffusion"", ""John6666/Diffusion80XX4sg"", ""ennov8ion/comicbook-models"", ""John6666/PrintingPress4"", ""SUPERSHANKY/Finetuned_Diffusion_Max"", ""PeepDaSlan9/B2BMGMT_Diffusion60XX"", ""Daniela-C/6x_Image_diffusion"", ""phenixrhyder/PrintingPress"", ""John6666/hfd_test_nostopbutton"", ""mindtube/Diffusion50XX"", ""TheKitten/Fast-Images-Creature"", ""Nymbo/Diffusion80XX4sg"", ""kaleidoskop-hug/PrintingPress"", ""ennov8ion/stablediffusion-models"", ""John6666/ToyWorld4"", ""grzegorz2047/fast_diffusion"", ""Alfasign/dIFFU"", ""Nymbo/PrintingPress"", ""Rifd/Sdallmodels"", ""John6666/Diffusion80XX4g"", ""NativeAngels/HuggingfaceDiffusion"", ""ennov8ion/Scifi-Models"", ""ennov8ion/semirealistic-models"", ""ennov8ion/FantasyArt-Models"", ""ennov8ion/dreamlike-models"", ""noes14155/img_All_models"", ""ennov8ion/500models"", ""AnimeStudio/anime-models"", ""John6666/Diffusion80XX4"", ""K00B404/HuggingfaceDiffusion_custom"", ""John6666/blitz_diffusion4"", ""John6666/blitz_diffusion_builtin"", ""RhythmRemix14/PrintingPressDx"", ""sohoso/PrintingPress"", ""NativeAngels/ToyWorld"", ""Harshveer/Finetuned_Diffusion_Max"", ""Daniton/joachimsallstrom-Double-Exposure-Diffusion3"", ""mindtube/maximum_multiplier_places"", ""animeartstudio/ArtModels"", ""Binettebob22/fast_diffusion2"", ""pikto/Elite-Scifi-Models"", ""PixelistStudio/3dart-Models"", ""devmiles/zexxiai"", ""Nymbo/Diffusion60XX"", ""TheKitten/Images"", ""ennov8ion/anime-models"", ""jordonpeter01/Diffusion70"", ""ygtrfed/pp-web-ui"", ""ivanmeyer/Finetuned_Diffusion_Max"", ""ennov8ion/Landscapes-models"", ""ucmisanddisinfo/thisApp"", ""johann22/chat-diffusion"", ""manivannan7gp/Words2Image"", ""ennov8ion/art-models"", ""ennov8ion/photo-models"", ""ennov8ion/art-multi"", ""vih-v/x_mod"", ""NativeAngels/blitz_diffusion"", ""NativeAngels/PrintingPress4"", ""NativeAngels/PrintingPress"", ""dehua68/ToyWorld"", ""burman-ai/Printing-Press"", ""sk16er/ghibli_creator"", ""Daniton/joachimsallstrom-Double-Exposure-Diffusion"", ""ennov8ion/abstractart-models"", ""ennov8ion/Scifiart-Models"", ""ennov8ion/interior-models"", ""ennov8ion/room-interior-models"", ""krashan22/joachimsallstrom-Double-Exposure-Diffusion"", ""Yntec/top_100_diffusion"", ""AIlexDev/Diffusion60XX"", ""flatindo/all-models"", ""flatindo/img_All_models"", ""flatindo/all-models-v1"", ""johann22/chat-diffusion-describe"", ""wideprism/Ultimate-Model-Collection"", ""ReiPlush64/Finetuned_Diffusion_Max"", ""GAIneZis/FantasyArt-Models"", ""TheMaisk/Einfach.ImageAI"", ""ennov8ion/picasso-diffusion"", ""vih-v/models_x"", ""K00B404/stablediffusion-portal"", ""ennov8ion/anime-new-models"", ""ennov8ion/anime-multi-new-models"", ""ennov8ion/anime-multi"", ""ennov8ion/photo-multi"", ""StanislavMichalov/Magic_Levitan_v1_4_Stanislav"", ""vih-v/models_d2"", ""StiveDudov/Magic_Levitan_v1_4_Stanislav"", ""ElenaVasileva/Magic_Levitan_v1_4_Stanislav"", ""Ashrafb/comicbook-models"", ""sohoso/architecture"", ""GhadaSaylami/text-to-image"", ""Geek7/mdztxi""], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2023-02-13 09:50:10+00:00"", ""cardData"": ""license: creativeml-openrail-m\ntags:\n- stable-diffusion\n- text-to-image"", ""transformersInfo"": null, ""_id"": ""6372ba5ebd81fae2b3a75bdf"", ""modelId"": ""joachimsallstrom/Double-Exposure-Diffusion"", ""usedStorage"": 14016066979}",0,,0,,0,,0,,0,"Alfasign/dIFFU, CompVis/stable-diffusion-license, Daniela-C/6x_Image_diffusion, John6666/Diffusion80XX4sg, John6666/PrintingPress4, John6666/ToyWorld4, John6666/hfd_test_nostopbutton, PeepDaSlan9/B2BMGMT_Diffusion60XX, Yntec/PrintingPress, Yntec/ToyWorld, Yntec/blitz_diffusion, huggingface/InferenceSupport/discussions/new?title=joachimsallstrom/Double-Exposure-Diffusion&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bjoachimsallstrom%2FDouble-Exposure-Diffusion%5D(%2Fjoachimsallstrom%2FDouble-Exposure-Diffusion)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, kaleidoskop-hug/PrintingPress, phenixrhyder/NSFW-ToyWorld",14
         
     | 
    	
        F5-Spanish_finetunes_20250426_221535.csv_finetunes_20250426_221535.csv
    ADDED
    
    | 
         @@ -0,0 +1,81 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
         
     | 
| 2 | 
         
            +
            jpgallegoar/F5-Spanish,"---
         
     | 
| 3 | 
         
            +
            license: cc-by-nc-4.0
         
     | 
| 4 | 
         
            +
            library_name: f5-tts
         
     | 
| 5 | 
         
            +
            language:
         
     | 
| 6 | 
         
            +
            - es
         
     | 
| 7 | 
         
            +
            base_model:
         
     | 
| 8 | 
         
            +
            - SWivid/F5-TTS
         
     | 
| 9 | 
         
            +
            ---
         
     | 
| 10 | 
         
            +
             
     | 
| 11 | 
         
            +
            # [GitHub](https://github.com/jpgallegoar/Spanish-F5)
         
     | 
| 12 | 
         
            +
             
     | 
| 13 | 
         
            +
            # F5-TTS Spanish Language Model
         
     | 
| 14 | 
         
            +
             
     | 
| 15 | 
         
            +
            ## Overview
         
     | 
| 16 | 
         
            +
            The F5-TTS model is finetuned specifically for Spanish language speech synthesis. This project aims to deliver high-quality, regionally diverse speech synthesis capabilities for Spanish speakers.
         
     | 
| 17 | 
         
            +
             
     | 
| 18 | 
         
            +
            ## License
         
     | 
| 19 | 
         
            +
            This model is released under the CC0-1.0 license, which allows for free usage, modification, and distribution.
         
     | 
| 20 | 
         
            +
             
     | 
| 21 | 
         
            +
            ## Datasets
         
     | 
| 22 | 
         
            +
            The following datasets were used for training:
         
     | 
| 23 | 
         
            +
             
     | 
| 24 | 
         
            +
            - [Voxpopuli Dataset](https://huggingface.co/datasets/facebook/voxpopuli), with mainly Peninsular Spain accents
         
     | 
| 25 | 
         
            +
            - Crowdsourced high-quality Spanish speech data:
         
     | 
| 26 | 
         
            +
              - Argentinian Spanish
         
     | 
| 27 | 
         
            +
              - Chilean Spanish
         
     | 
| 28 | 
         
            +
              - Colombian Spanish
         
     | 
| 29 | 
         
            +
              - Peruvian Spanish
         
     | 
| 30 | 
         
            +
              - Puerto Rican Spanish
         
     | 
| 31 | 
         
            +
              - Venezuelan Spanish
         
     | 
| 32 | 
         
            +
            - TEDx Spanish Corpus
         
     | 
| 33 | 
         
            +
             
     | 
| 34 | 
         
            +
             
     | 
| 35 | 
         
            +
            Additional sources:
         
     | 
| 36 | 
         
            +
            - [Crowdsourced high-quality Argentinian Spanish speech data set](https://www.openslr.org/61/)
         
     | 
| 37 | 
         
            +
            - [Crowdsourced high-quality Chilean Spanish speech data set](https://www.openslr.org/71/)
         
     | 
| 38 | 
         
            +
            - [Crowdsourced high-quality Colombian Spanish speech data set](https://www.openslr.org/72/)
         
     | 
| 39 | 
         
            +
            - [Crowdsourced high-quality Peruvian Spanish speech data set](https://www.openslr.org/73/)
         
     | 
| 40 | 
         
            +
            - [Crowdsourced high-quality Puerto Rico Spanish speech data set](https://www.openslr.org/74/)
         
     | 
| 41 | 
         
            +
            - [Crowdsourced high-quality Venezuelan Spanish speech data set](https://www.openslr.org/75/)
         
     | 
| 42 | 
         
            +
            - - [TEDx Spanish Corpus](https://www.openslr.org/67/)
         
     | 
| 43 | 
         
            +
             
     | 
| 44 | 
         
            +
             
     | 
| 45 | 
         
            +
            ## Model Information
         
     | 
| 46 | 
         
            +
            **Base Model:** SWivid/F5-TTS  
         
     | 
| 47 | 
         
            +
            **Total Training Duration:** 218 hours of audio  
         
     | 
| 48 | 
         
            +
            **Training Configuration:**
         
     | 
| 49 | 
         
            +
            - Batch Size: 3200
         
     | 
| 50 | 
         
            +
            - Max Samples: 64
         
     | 
| 51 | 
         
            +
            - Training Steps: 1,200,000
         
     | 
| 52 | 
         
            +
             
     | 
| 53 | 
         
            +
            ## Usage Instructions
         
     | 
| 54 | 
         
            +
             
     | 
| 55 | 
         
            +
            ### Method 0: HuggingFace space (https://huggingface.co/spaces/jpgallegoar/Spanish-F5)
         
     | 
| 56 | 
         
            +
             
     | 
| 57 | 
         
            +
            ### Method 1: Manual Model Replacement
         
     | 
| 58 | 
         
            +
             
     | 
| 59 | 
         
            +
            1. **Run the F5-TTS Application:** Start the F5-TTS application and observe the terminal for output indicating the model file path. It should appear similar to:
         
     | 
| 60 | 
         
            +
               ```
         
     | 
| 61 | 
         
            +
               model : C:\Users\thega\.cache\huggingface\hub\models--SWivid--F5-TTS\snapshots\995ff41929c08ff968786b448a384330438b5cb6\F5TTS_Base\model_1200000.safetensors
         
     | 
| 62 | 
         
            +
               ```
         
     | 
| 63 | 
         
            +
            2. **Replace the Model File:**
         
     | 
| 64 | 
         
            +
               - Navigate to the displayed file location.
         
     | 
| 65 | 
         
            +
               - Rename the existing model file to `model_1200000.safetensors.bak`.
         
     | 
| 66 | 
         
            +
               - Download `model_1200000.safetensors` from this repository and save it to the same location.
         
     | 
| 67 | 
         
            +
             
     | 
| 68 | 
         
            +
            3. **Restart the Application:** Relaunch the F5-TTS application to load the updated model.
         
     | 
| 69 | 
         
            +
             
     | 
| 70 | 
         
            +
            ### Alternative Methods
         
     | 
| 71 | 
         
            +
             
     | 
| 72 | 
         
            +
            - **GitHub Repository:** Clone the [Spanish-F5 repository](https://github.com/jpgallegoar/Spanish-F5/) and follow the provided installation instructions.
         
     | 
| 73 | 
         
            +
            - **Google Colab:** Use the model via [Google Colab](https://colab.research.google.com/drive/1mm4NAlZVZq2_oL6ftijY64-PeEYwnqG1?usp=sharing).
         
     | 
| 74 | 
         
            +
              - Runtime -> Change Runtime Type -> T4 GPU
         
     | 
| 75 | 
         
            +
              - Runtime -> Run all
         
     | 
| 76 | 
         
            +
              - Click on the link shown in ""Running on public URL: https://link.gradio.live"" when it loads
         
     | 
| 77 | 
         
            +
            - **Jupyter Notebook:** Run the model through the `Spanish_F5.ipynb` notebook.
         
     | 
| 78 | 
         
            +
             
     | 
| 79 | 
         
            +
            ## Contributions and Recommendations
         
     | 
| 80 | 
         
            +
            This model may benefit from further fine-tuning to enhance its performance across different Spanish dialects. Contributions from the community are encouraged. For optimal output quality, preprocess the reference audio by removing background noise, balancing audio levels, and enhancing clarity.
         
     | 
| 81 | 
         
            +
            ","{""id"": ""jpgallegoar/F5-Spanish"", ""author"": ""jpgallegoar"", ""sha"": ""4765c14ffd01075479c2fde8615831acc0adca9a"", ""last_modified"": ""2025-03-26 20:10:23+00:00"", ""created_at"": ""2024-11-04 14:27:24+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 7244, ""downloads_all_time"": null, ""likes"": 185, ""library_name"": ""f5-tts"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""f5-tts"", ""es"", ""base_model:SWivid/F5-TTS"", ""base_model:finetune:SWivid/F5-TTS"", ""license:cc-by-nc-4.0"", ""region:us""], ""pipeline_tag"": null, ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- SWivid/F5-TTS\nlanguage:\n- es\nlibrary_name: f5-tts\nlicense: cc-by-nc-4.0"", ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Spanish_F5.ipynb', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model_1200000.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model_1250000.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model_last.pt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='transformer_config.yaml', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vocab.txt', size=None, blob_id=None, lfs=None)""], ""spaces"": [""marcosegura34/Spanish-F5"", ""alex16052G/abi""], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-03-26 20:10:23+00:00"", ""cardData"": ""base_model:\n- SWivid/F5-TTS\nlanguage:\n- es\nlibrary_name: f5-tts\nlicense: cc-by-nc-4.0"", ""transformersInfo"": null, ""_id"": ""6728d9cc3687c4111437240a"", ""modelId"": ""jpgallegoar/F5-Spanish"", ""usedStorage"": 6742625853}",0,,0,,0,,0,,0,"alex16052G/abi, huggingface/InferenceSupport/discussions/134, jpgallegoar/Spanish-F5, marcosegura34/Spanish-F5",4
         
     | 
    	
        Flux-Midjourney-Mix2-LoRA_finetunes_20250425_165642.csv_finetunes_20250425_165642.csv
    ADDED
    
    | 
         @@ -0,0 +1,104 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
         
     | 
| 2 | 
         
            +
            strangerzonehf/Flux-Midjourney-Mix2-LoRA,"---
         
     | 
| 3 | 
         
            +
            tags:
         
     | 
| 4 | 
         
            +
            - text-to-image
         
     | 
| 5 | 
         
            +
            - lora
         
     | 
| 6 | 
         
            +
            - diffusers
         
     | 
| 7 | 
         
            +
            - template:diffusion-lora
         
     | 
| 8 | 
         
            +
            widget:
         
     | 
| 9 | 
         
            +
            - text: 'MJ v6, Portrait photography of a woman in a red dress, in the style of unsplash photography, street photography, dark green background --ar 47:64 --v 6.0 --style raw'
         
     | 
| 10 | 
         
            +
              output:
         
     | 
| 11 | 
         
            +
                url: images/1.png
         
     | 
| 12 | 
         
            +
            - text: 'MJ v6, A portrait of a Bird in the dark, illuminated by an intense yellow light from above, with a soft blue gradient background. This scene evokes a sense of mystery or contemplation, highlighting the beauty of the subjects features against the contrasting backdrop, lens glossy effect, high contrast, star bokeh '
         
     | 
| 13 | 
         
            +
              output:
         
     | 
| 14 | 
         
            +
                url: images/2.png
         
     | 
| 15 | 
         
            +
            - text: 'MJ v6, A photo of an attractive man in his thirties, wearing a black coat and yellow scarf with a brown pattern inside a building talking on a phone standing near a modern glass skyscraper in London, shot from below looking up at him in the style of street photography, cinematic.  --ar 85:128 --v 6.0 --style raw'
         
     | 
| 16 | 
         
            +
              output:
         
     | 
| 17 | 
         
            +
                url: images/3.png
         
     | 
| 18 | 
         
            +
            - text: 'MJ v6, banana bread with chocolate chips and pecans, in the style of tabletop photography, y2k aesthetic, spiky mounds, flawless line work, schlieren photography, 8k, natural fibers, minimal  --ar 123:185 --v 5 '
         
     | 
| 19 | 
         
            +
              output:
         
     | 
| 20 | 
         
            +
                url: images/4.png
         
     | 
| 21 | 
         
            +
            - text: 'MJ v6, A portrait of Woman, fashion photography, big shapes in the background, on top of colorful squares with stars, in the style of retro vintage photography, pastel colors, soft purple and yellow '
         
     | 
| 22 | 
         
            +
              output:
         
     | 
| 23 | 
         
            +
                url: images/6.png
         
     | 
| 24 | 
         
            +
            - text: 'MJ v6, delicious dipped chocolate pastry japo gallery, white background, in the style of dark brown, close-up intensity, duckcore, rounded, high resolution --ar 2:3 --v 5'
         
     | 
| 25 | 
         
            +
              output:
         
     | 
| 26 | 
         
            +
                url: images/5.png
         
     | 
| 27 | 
         
            +
            base_model: black-forest-labs/FLUX.1-dev
         
     | 
| 28 | 
         
            +
            instance_prompt: MJ v6
         
     | 
| 29 | 
         
            +
            license: other
         
     | 
| 30 | 
         
            +
            license_name: flux-1-dev-non-commercial-license
         
     | 
| 31 | 
         
            +
            license_link: https://huggingface.co/black-forest-labs/FLUX.1-dev/blob/main/LICENSE.md
         
     | 
| 32 | 
         
            +
            ---
         
     | 
| 33 | 
         
            +
            
         
     | 
| 34 | 
         
            +
             
     | 
| 35 | 
         
            +
            <Gallery />
         
     | 
| 36 | 
         
            +
             
     | 
| 37 | 
         
            +
            ## Model description for MJv6 Mix2 LoRA
         
     | 
| 38 | 
         
            +
             
     | 
| 39 | 
         
            +
            [ Best for Realism, Modeling, Demonstration, Close-Up Shots ]
         
     | 
| 40 | 
         
            +
             
     | 
| 41 | 
         
            +
            Image Processing Parameters 
         
     | 
| 42 | 
         
            +
             
     | 
| 43 | 
         
            +
            | Parameter                 | Value  | Parameter                 | Value  |
         
     | 
| 44 | 
         
            +
            |---------------------------|--------|---------------------------|--------|
         
     | 
| 45 | 
         
            +
            | LR Scheduler              | constant | Noise Offset              | 0.03   |
         
     | 
| 46 | 
         
            +
            | Optimizer                 | AdamW8 | Multires Noise Discount   | 0.1    |
         
     | 
| 47 | 
         
            +
            | Network Dim               | 64     | Multires Noise Iterations | 10     |
         
     | 
| 48 | 
         
            +
            | Network Alpha             | 32     | Repeat & Steps           | 25 & 3660 |
         
     | 
| 49 | 
         
            +
            | Epoch                     | 28   | Save Every N Epochs       | 1     |
         
     | 
| 50 | 
         
            +
             
     | 
| 51 | 
         
            +
                Labeling: florence2-en(natural language & English)
         
     | 
| 52 | 
         
            +
                
         
     | 
| 53 | 
         
            +
                Total Images Used for Training : 36
         
     | 
| 54 | 
         
            +
             
     | 
| 55 | 
         
            +
            ## Best Dimensions
         
     | 
| 56 | 
         
            +
             
     | 
| 57 | 
         
            +
            - 768 x 1024 (Best)
         
     | 
| 58 | 
         
            +
            - 1024 x 1024 (Default)
         
     | 
| 59 | 
         
            +
                
         
     | 
| 60 | 
         
            +
            ## Setting Up
         
     | 
| 61 | 
         
            +
            ```python
         
     | 
| 62 | 
         
            +
            import torch
         
     | 
| 63 | 
         
            +
            from pipelines import DiffusionPipeline
         
     | 
| 64 | 
         
            +
             
     | 
| 65 | 
         
            +
            base_model = ""black-forest-labs/FLUX.1-dev""
         
     | 
| 66 | 
         
            +
            pipe = DiffusionPipeline.from_pretrained(base_model, torch_dtype=torch.bfloat16)
         
     | 
| 67 | 
         
            +
             
     | 
| 68 | 
         
            +
            lora_repo = ""strangerzonehf/Flux-Midjourney-Mix2-LoRA""
         
     | 
| 69 | 
         
            +
            trigger_word = ""MJ v6""  
         
     | 
| 70 | 
         
            +
            pipe.load_lora_weights(lora_repo)
         
     | 
| 71 | 
         
            +
             
     | 
| 72 | 
         
            +
            device = torch.device(""cuda"")
         
     | 
| 73 | 
         
            +
            pipe.to(device)
         
     | 
| 74 | 
         
            +
            ```
         
     | 
| 75 | 
         
            +
            ## Trigger words
         
     | 
| 76 | 
         
            +
             
     | 
| 77 | 
         
            +
            You should use `MJ v6` to trigger the image generation.
         
     | 
| 78 | 
         
            +
             
     | 
| 79 | 
         
            +
            ## Sample Prompts for MJv6 Mix2 LoRA
         
     | 
| 80 | 
         
            +
             
     | 
| 81 | 
         
            +
            | **Prompt**                                                                                                                                                               | **Description**                                                                                                                                                                                                                  |
         
     | 
| 82 | 
         
            +
            |-------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
         
     | 
| 83 | 
         
            +
            | MJ v6, Portrait photography of a woman in a red dress, in the style of unsplash photography, street photography, dark green background --ar 47:64 --v 6.0 --style raw | A portrait of a woman in a red dress, photographed in the street photography style with a dark green background, capturing the raw and natural aesthetics of Unsplash-style imagery.                                             |
         
     | 
| 84 | 
         
            +
            | MJ v6, A portrait of a Bird in the dark, illuminated by an intense yellow light from above, with a soft blue gradient background. Lens glossy effect, high contrast, star bokeh | A mysterious and contemplative bird portrait illuminated by yellow light with a blue gradient background. Features include high contrast and a star bokeh effect to enhance the atmosphere.                                      |
         
     | 
| 85 | 
         
            +
            | MJ v6, banana bread with chocolate chips and pecans, in the style of tabletop photography, y2k aesthetic, spiky mounds, flawless line work, schlieren photography, 8k, natural fibers, minimal  --ar 123:185 --v 5 | A close-up image of banana bread with chocolate chips and pecans, styled with a Y2K aesthetic. The photography emphasizes texture, line work, and high resolution, with natural materials enhancing the minimalistic approach.   |
         
     | 
| 86 | 
         
            +
            | MJ v6, delicious dipped chocolate pastry japo gallery, white background, in the style of dark brown, close-up intensity, duckcore, rounded, high resolution --ar 2:3 --v 5 | A close-up of a chocolate-dipped pastry on a white background, featuring a rich brown color palette and soft, rounded forms. High-resolution imagery enhances the details and texture of the subject.                             |
         
     | 
| 87 | 
         
            +
            | MJ v6, A portrait of Woman, fashion photography, big shapes in the background, on top of colorful squares with stars, in the style of retro vintage photography, pastel colors, soft purple and yellow | A retro-vintage style portrait of a woman with a whimsical background of large shapes and colorful squares with stars. The pastel tones of purple and yellow create a soft and nostalgic mood.                                    |
         
     | 
| 88 | 
         
            +
            | MJ v6, Captured at eye-level, a close-up shot of a young woman with long dark brown hair, wearing a green bikini top adorned with yellow and orange flowers. The woman's body is partially submerged in a body of water, her eyes are slightly open. The background is blurred, with a stone wall visible behind her. The sun is shining on the right side of the image, casting a shadow on the wall. | A vibrant and summery close-up of a young woman partially submerged in water, wearing a floral green bikini top. The image captures natural lighting, with the background blurred to enhance the subject's focus.                  |
         
     | 
| 89 | 
         
            +
            | MJ v6, a woman with long dark brown hair stands in front of a stark white wall. She is dressed in a sleeveless black and white dress, adorned with a checkered pattern. Her eyes are a deep blue, and her lips are pursed. Her hair cascades over her shoulders, adding a touch of warmth to her face. The lighting is subdued, creating a stark contrast to the woman's outfit. | A stark, minimalist portrait of a woman in a checkered dress. The subdued lighting and simple white background emphasize her expressive features and contrast with her bold outfit.                                                  |
         
     | 
| 90 | 
         
            +
            | MJ v6, a beautiful young woman with long brown hair is seated in a field of lavender flowers. She is dressed in a cream-colored bra with a red belt tied around her waist. Her bra is tied in a knot at the center of her chest. Her eyes are closed and her lips are pursed. Her hair is pulled back in a ponytail, adding a pop of color to her face. The backdrop is a lush green hillside. | A serene and dreamy image of a woman in a lavender field. The cream-colored attire and red accents create a harmonious blend with the lush green and vibrant purple surroundings.                                                    |
         
     | 
| 91 | 
         
            +
             
     | 
| 92 | 
         
            +
            ## Download model
         
     | 
| 93 | 
         
            +
             
     | 
| 94 | 
         
            +
            Weights for this model are available in Safetensors format.
         
     | 
| 95 | 
         
            +
             
     | 
| 96 | 
         
            +
            [Download](/strangerzonehf/Flux-Midjourney-Mix2-LoRA/tree/main) them in the Files & versions tab.
         
     | 
| 97 | 
         
            +
            ","{""id"": ""strangerzonehf/Flux-Midjourney-Mix2-LoRA"", ""author"": ""strangerzonehf"", ""sha"": ""6b6aaf586f83fd2093519fb62015a771c7aff240"", ""last_modified"": ""2025-03-17 12:29:50+00:00"", ""created_at"": ""2024-11-26 15:58:30+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 13290, ""downloads_all_time"": null, ""likes"": 474, ""library_name"": ""diffusers"", ""gguf"": null, ""inference"": ""warm"", ""inference_provider_mapping"": null, ""tags"": [""diffusers"", ""text-to-image"", ""lora"", ""template:diffusion-lora"", ""base_model:black-forest-labs/FLUX.1-dev"", ""base_model:adapter:black-forest-labs/FLUX.1-dev"", ""license:other"", ""region:us""], ""pipeline_tag"": ""text-to-image"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: black-forest-labs/FLUX.1-dev\nlicense: other\nlicense_name: flux-1-dev-non-commercial-license\nlicense_link: https://huggingface.co/black-forest-labs/FLUX.1-dev/blob/main/LICENSE.md\ntags:\n- text-to-image\n- lora\n- diffusers\n- template:diffusion-lora\nwidget:\n- text: MJ v6, Portrait photography of a woman in a red dress, in the style of unsplash\n    photography, street photography, dark green background --ar 47:64 --v 6.0 --style\n    raw\n  output:\n    url: https://huggingface.co/strangerzonehf/Flux-Midjourney-Mix2-LoRA/resolve/main/images/1.png\n- text: 'MJ v6, A portrait of a Bird in the dark, illuminated by an intense yellow\n    light from above, with a soft blue gradient background. This scene evokes a sense\n    of mystery or contemplation, highlighting the beauty of the subjects features\n    against the contrasting backdrop, lens glossy effect, high contrast, star bokeh '\n  output:\n    url: https://huggingface.co/strangerzonehf/Flux-Midjourney-Mix2-LoRA/resolve/main/images/2.png\n- text: MJ v6, A photo of an attractive man in his thirties, wearing a black coat\n    and yellow scarf with a brown pattern inside a building talking on a phone standing\n    near a modern glass skyscraper in London, shot from below looking up at him in\n    the style of street photography, cinematic.  --ar 85:128 --v 6.0 --style raw\n  output:\n    url: https://huggingface.co/strangerzonehf/Flux-Midjourney-Mix2-LoRA/resolve/main/images/3.png\n- text: 'MJ v6, banana bread with chocolate chips and pecans, in the style of tabletop\n    photography, y2k aesthetic, spiky mounds, flawless line work, schlieren photography,\n    8k, natural fibers, minimal  --ar 123:185 --v 5 '\n  output:\n    url: https://huggingface.co/strangerzonehf/Flux-Midjourney-Mix2-LoRA/resolve/main/images/4.png\n- text: 'MJ v6, A portrait of Woman, fashion photography, big shapes in the background,\n    on top of colorful squares with stars, in the style of retro vintage photography,\n    pastel colors, soft purple and yellow '\n  output:\n    url: https://huggingface.co/strangerzonehf/Flux-Midjourney-Mix2-LoRA/resolve/main/images/6.png\n- text: MJ v6, delicious dipped chocolate pastry japo gallery, white background, in\n    the style of dark brown, close-up intensity, duckcore, rounded, high resolution\n    --ar 2:3 --v 5\n  output:\n    url: https://huggingface.co/strangerzonehf/Flux-Midjourney-Mix2-LoRA/resolve/main/images/5.png\ninstance_prompt: MJ v6"", ""widget_data"": [{""text"": ""MJ v6, Portrait photography of a woman in a red dress, in the style of unsplash photography, street photography, dark green background --ar 47:64 --v 6.0 --style raw"", ""output"": {""url"": ""https://huggingface.co/strangerzonehf/Flux-Midjourney-Mix2-LoRA/resolve/main/images/1.png""}}, {""text"": ""MJ v6, A portrait of a Bird in the dark, illuminated by an intense yellow light from above, with a soft blue gradient background. This scene evokes a sense of mystery or contemplation, highlighting the beauty of the subjects features against the contrasting backdrop, lens glossy effect, high contrast, star bokeh "", ""output"": {""url"": ""https://huggingface.co/strangerzonehf/Flux-Midjourney-Mix2-LoRA/resolve/main/images/2.png""}}, {""text"": ""MJ v6, A photo of an attractive man in his thirties, wearing a black coat and yellow scarf with a brown pattern inside a building talking on a phone standing near a modern glass skyscraper in London, shot from below looking up at him in the style of street photography, cinematic.  --ar 85:128 --v 6.0 --style raw"", ""output"": {""url"": ""https://huggingface.co/strangerzonehf/Flux-Midjourney-Mix2-LoRA/resolve/main/images/3.png""}}, {""text"": ""MJ v6, banana bread with chocolate chips and pecans, in the style of tabletop photography, y2k aesthetic, spiky mounds, flawless line work, schlieren photography, 8k, natural fibers, minimal  --ar 123:185 --v 5 "", ""output"": {""url"": ""https://huggingface.co/strangerzonehf/Flux-Midjourney-Mix2-LoRA/resolve/main/images/4.png""}}, {""text"": ""MJ v6, A portrait of Woman, fashion photography, big shapes in the background, on top of colorful squares with stars, in the style of retro vintage photography, pastel colors, soft purple and yellow "", ""output"": {""url"": ""https://huggingface.co/strangerzonehf/Flux-Midjourney-Mix2-LoRA/resolve/main/images/6.png""}}, {""text"": ""MJ v6, delicious dipped chocolate pastry japo gallery, white background, in the style of dark brown, close-up intensity, duckcore, rounded, high resolution --ar 2:3 --v 5"", ""output"": {""url"": ""https://huggingface.co/strangerzonehf/Flux-Midjourney-Mix2-LoRA/resolve/main/images/5.png""}}], ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='images/1.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='images/2.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='images/3.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='images/4.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='images/5.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='images/6.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='images/mjv6.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='mjV6.safetensors', size=None, blob_id=None, lfs=None)""], ""spaces"": [""prithivMLmods/FLUX-LoRA-DLC"", ""Nymbo/Serverless-ImgGen-Hub"", ""savan2001/strangerzonehf-Flux-Midjourney-Mix2-LoRA"", ""Novaciano/Flux_Lustly_AI_Uncensored_NSFW_V1"", ""guardiancc/FLUX-LoRA-DLC-fixed"", ""ruslanmv/Flux-LoRA-Generation-Advanced"", ""soiz1/FLUX-LoRA-DLC"", ""EmoCube/creamie-image"", ""Dhw627ju/strangerzonehf-Flux-Midjourney-Mix2-LoRA"", ""Masterdqqq/strangerzonehf-Flux-Midjourney-Mix2-LoRA"", ""theunseenones94/Flux_Lustly_AI_Uncensored_NSFW_V1"", ""Stephanyia/strangerzonehf-Flux-Midjourney-Mix2-LoRA"", ""abdullahxaif/strangerzonehf-Flux-Midjourney-Mix2-LoRA"", ""philosophyunviled/strangerzonehf-Flux-Midjourney-Mix2-LoRA"", ""Akshit2606/strangerzonehf-Flux-Midjourney-Mix2-LoRA"", ""AdarshJi/IMAGE-GEN"", ""Ariaper/strangerzonehf-Flux-Midjourney-Mix2-LoRA"", ""boomkid/strangerzonehf-Flux-Midjourney-Mix2-LoRA"", ""Parmist/strangerzonehf-Flux-Super-Realism-LoRA"", ""Xena18284/Xena18284-Xena"", ""Kidbea/multimodels_image_generation"", ""K00B404/FLUX-Wallpaper-HD-Maker_p"", ""Nymbo/serverless-imggen-test"", ""Xach35/FLUX-LoRA-DLC"", ""Grand-Vitek/strangerzonehf-Flux-Midjourney-Mix2-LoRA"", ""handbeats/strangerzonehf-Flux-Midjourney-Mix2-LoRA"", ""saliseabeali/strangerzonehf-Flux-Midjourney-Mix2-LoRA"", ""Suleman4770/strangerzonehf-Flux-Midjourney-Mix2-LoRA"", ""cespezu/strangerzonehf-Flux-Midjourney-Mix2-LoRA"", ""Successmarcus34/strangerzonehf-Flux-Midjourney-Mix2-LoRA"", ""priyanshuTIWARY/strangerzonehf-Flux-Midjourney-Mix2-LoRA"", ""Irridian/strangerzonehf-Flux-Midjourney-Mix2-LoRA"", ""vi0rka/strangerzonehf-Flux-Midjourney-Mix2-LoRA"", ""codexxx/imagine"", ""0xBlxck/strangerzonehf-Flux-Midjourney-Mix2-LoRA"", ""VitorMacedo/strangerzonehf-Flux-Midjourney-Mix2-LoRA"", ""Guibat/strangerzonehf-Flux-Midjourney-Mix2-LoRA"", ""Ebistika/strangerzonehf-Flux-Midjourney-Mix2-LoRA"", ""kd8811/strangerzonehf-Flux-Midjourney-Mix2-LoRA"", ""jobayerarafat/strangerzonehf-Flux-Midjourney-Mix2-LoRA"", ""kiranchilledout/strangerzonehf-Flux-Midjourney-Mix2-LoRA"", ""5334mariele/strangerzonehf-Flux-Midjourney-Mix2-LoRA"", ""n1c0c4b/strangerzonehf-Flux-Midjourney-Mix2-LoRA"", ""theoneandlinglong/strangerzonehf-Flux-Midjourney-Mix2-LoRA"", ""anonymousAAK/impersonaIMAGEv1"", ""Sonfire/sss"", ""Sonfire/strangerzonehf-Flux-Midjourney-Mix2-LoRA"", ""mindvortex/strangerzonehf-Flux-Midjourney-Mix2-LoRA"", ""mdubina/strangerzonehf-Flux-Midjourney-Mix2-LoRA"", ""Karthikpadav/strangerzonehf-Flux-Midjourney-Mix2-LoRA"", ""Rkemmi/strangerzonehf-Flux-Midjourney-Mix2-LoRA"", ""aesopnix/pancake_mix_hybrid"", ""Houkii/strangerzonehf-Flux-Midjourney-Mix2-LoRA"", ""arcanus/strangerzonehf-Flux-Midjourney-Mix2-LoRA"", ""Sirrichard-3rd/strangerzonehf-Flux-Midjourney-Mix2-LoRA"", ""Marlon892power/strangerzonehf-Flux-Midjourney-Mix2-LoRA"", ""FFDracul/strangerzonehf-Flux-Midjourney-Mix2-LoRA"", ""Herrduke/strangerzonehf-Flux-Midjourney-Mix2-LoRA"", ""jatinnandwani/strangerzonehf-Flux-Midjourney-Mix2-LoRA"", ""lrsasdfasxxx/strangerzonehf-Flux-Midjourney-Mix2-LoRA"", ""Dobstar2/strangerzonehf-Flux-Midjourney-Mix2-LoRA"", ""jn1xia/genpixel"", ""zebrastribe/strangerzonehf-Flux-Midjourney-Mix2-LoRA"", ""Yonot/strangerzonehf-Flux-Midjourney-Mix2-LoRA"", ""Perry1323/T"", ""lilimaniac/strangerzonehf-Flux-Midjourney-Mix2-LoRA"", ""babyflux/strangerzonehf-Flux-Midjourney-Mix2-LoRA"", ""Gobbledock/strangerzonehf-Flux-Midjourney-Mix2-LoRA"", ""rifandavinci/midjourney"", ""WatchOutForMike/DnDFluxMidjourney"", ""carnevale/strangerzonehf-Flux-Midjourney-Mix2-LoRA"", ""XaraMargot/strangerzonehf-Flux-Midjourney-Mix2-LoRA"", ""Larissa266/strangerzonehf-Flux-Midjourney-Mix2-LoRA"", ""cagrison/strangerzonehf-Flux-Midjourney-Mix2-LoRA"", ""reidx/strangerzonehf-Flux-Midjourney-Mix2-LoRA"", ""GlitchGhost/LoraArtGallery"", ""andresampa/CtB-AI-img-gen"", ""Techguy3389/strangerzonehf-Flux-Midjourney-Mix2-LoRA"", ""Etr0n/strangerzonehf-Flux-Midjourney-Mix2-LoRA"", ""basedcatx/strangerzonehf-Flux-Midjourney-Mix2-LoRA"", ""NomadSHANTO/strangerzonehf-Flux-Midjourney-Mix2-LoRA"", ""fule225/strangerzonehf-Flux-Midjourney-Mix2-LoRA"", ""ryan171088/FLUX-LoRA-DLC"", ""jadada/strangerzonehf-Flux-Midjourney-Mix2-LoRA"", ""haccourt/strangerzonehf-Flux-Midjourney-Mix2-LoRA"", ""K00B404/TheUnclonable"", ""AdarshJi/strangerzonehf-Flux-Midjourney-Mix2-LoRA"", ""Akshit2606/blalsa"", ""TheFatherOfDogs/strangerzonehf-Flux-Midjourney-Mix2-LoRA"", ""KarimLMaghraby/strangerzonehf-Flux-Midjourney-Mix2-LoRA"", ""wizofavalon/image_generation_dashboard"", ""khalil1990/strangerzonehf-Flux-Midjourney-Mix2-LoRA"", ""vakilrathod67/Flux-Midjourney-Mix"", ""lhessel/strangerzonehf-Flux-Midjourney-Mix2-LoRA"", ""kilop/strangerzonehf-Flux-Midjourney-Mix2-LoRA"", ""NikiOstin/strangerzonehf-Flux-Midjourney-Mix2-LoRA"", ""driipppp/eighty6thmodel"", ""orangelemonuk/strangerzonehf-Flux-Midjourney-Mix2-LoRA"", ""joeysaada/strangerzonehf-Flux-Midjourney-Mix2-LoRA"", ""andresampa/LS-AI-img-gen""], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-03-17 12:29:50+00:00"", ""cardData"": ""base_model: black-forest-labs/FLUX.1-dev\nlicense: other\nlicense_name: flux-1-dev-non-commercial-license\nlicense_link: https://huggingface.co/black-forest-labs/FLUX.1-dev/blob/main/LICENSE.md\ntags:\n- text-to-image\n- lora\n- diffusers\n- template:diffusion-lora\nwidget:\n- text: MJ v6, Portrait photography of a woman in a red dress, in the style of unsplash\n    photography, street photography, dark green background --ar 47:64 --v 6.0 --style\n    raw\n  output:\n    url: https://huggingface.co/strangerzonehf/Flux-Midjourney-Mix2-LoRA/resolve/main/images/1.png\n- text: 'MJ v6, A portrait of a Bird in the dark, illuminated by an intense yellow\n    light from above, with a soft blue gradient background. This scene evokes a sense\n    of mystery or contemplation, highlighting the beauty of the subjects features\n    against the contrasting backdrop, lens glossy effect, high contrast, star bokeh '\n  output:\n    url: https://huggingface.co/strangerzonehf/Flux-Midjourney-Mix2-LoRA/resolve/main/images/2.png\n- text: MJ v6, A photo of an attractive man in his thirties, wearing a black coat\n    and yellow scarf with a brown pattern inside a building talking on a phone standing\n    near a modern glass skyscraper in London, shot from below looking up at him in\n    the style of street photography, cinematic.  --ar 85:128 --v 6.0 --style raw\n  output:\n    url: https://huggingface.co/strangerzonehf/Flux-Midjourney-Mix2-LoRA/resolve/main/images/3.png\n- text: 'MJ v6, banana bread with chocolate chips and pecans, in the style of tabletop\n    photography, y2k aesthetic, spiky mounds, flawless line work, schlieren photography,\n    8k, natural fibers, minimal  --ar 123:185 --v 5 '\n  output:\n    url: https://huggingface.co/strangerzonehf/Flux-Midjourney-Mix2-LoRA/resolve/main/images/4.png\n- text: 'MJ v6, A portrait of Woman, fashion photography, big shapes in the background,\n    on top of colorful squares with stars, in the style of retro vintage photography,\n    pastel colors, soft purple and yellow '\n  output:\n    url: https://huggingface.co/strangerzonehf/Flux-Midjourney-Mix2-LoRA/resolve/main/images/6.png\n- text: MJ v6, delicious dipped chocolate pastry japo gallery, white background, in\n    the style of dark brown, close-up intensity, duckcore, rounded, high resolution\n    --ar 2:3 --v 5\n  output:\n    url: https://huggingface.co/strangerzonehf/Flux-Midjourney-Mix2-LoRA/resolve/main/images/5.png\ninstance_prompt: MJ v6"", ""transformersInfo"": null, ""_id"": ""6745f026cc4caa5db9508d0e"", ""modelId"": ""strangerzonehf/Flux-Midjourney-Mix2-LoRA"", ""usedStorage"": 867298952}",0,https://huggingface.co/immaari/Model-Test-06,1,"https://huggingface.co/Wolfram33/pix74, https://huggingface.co/sa95/DeBERTa-v2-XLARGE-LORA, https://huggingface.co/sa95/DeBERTa-v2-XxLARGE-LORA, https://huggingface.co/sa95/DeBERTa-v2-LARGE-lora",4,,0,,0,"Dhw627ju/strangerzonehf-Flux-Midjourney-Mix2-LoRA, EmoCube/creamie-image, Masterdqqq/strangerzonehf-Flux-Midjourney-Mix2-LoRA, Novaciano/Flux_Lustly_AI_Uncensored_NSFW_V1, Nymbo/Serverless-ImgGen-Hub, guardiancc/FLUX-LoRA-DLC-fixed, philosophyunviled/strangerzonehf-Flux-Midjourney-Mix2-LoRA, prithivMLmods/FLUX-LoRA-DLC, ruslanmv/Flux-LoRA-Generation-Advanced, savan2001/strangerzonehf-Flux-Midjourney-Mix2-LoRA, soiz1/FLUX-LoRA-DLC, theunseenones94/Flux_Lustly_AI_Uncensored_NSFW_V1",12
         
     | 
| 98 | 
         
            +
            immaari/Model-Test-06,"---
         
     | 
| 99 | 
         
            +
            base_model:
         
     | 
| 100 | 
         
            +
            - strangerzonehf/Flux-Midjourney-Mix2-LoRA
         
     | 
| 101 | 
         
            +
            datasets:
         
     | 
| 102 | 
         
            +
            - saiyan-world/Goku-MovieGenBench
         
     | 
| 103 | 
         
            +
            - TIGER-Lab/AceCode-87K
         
     | 
| 104 | 
         
            +
            ---","{""id"": ""immaari/Model-Test-06"", ""author"": ""immaari"", ""sha"": ""f55dcdf30c9d8bae28963bffdb2d117c245b71fa"", ""last_modified"": ""2025-03-07 10:03:30+00:00"", ""created_at"": ""2025-03-07 09:59:00+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": null, ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""dataset:saiyan-world/Goku-MovieGenBench"", ""dataset:TIGER-Lab/AceCode-87K"", ""base_model:strangerzonehf/Flux-Midjourney-Mix2-LoRA"", ""base_model:finetune:strangerzonehf/Flux-Midjourney-Mix2-LoRA"", ""region:us""], ""pipeline_tag"": null, ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- strangerzonehf/Flux-Midjourney-Mix2-LoRA\ndatasets:\n- saiyan-world/Goku-MovieGenBench\n- TIGER-Lab/AceCode-87K"", ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-03-07 10:03:30+00:00"", ""cardData"": ""base_model:\n- strangerzonehf/Flux-Midjourney-Mix2-LoRA\ndatasets:\n- saiyan-world/Goku-MovieGenBench\n- TIGER-Lab/AceCode-87K"", ""transformersInfo"": null, ""_id"": ""67cac3642739d21c745cbee9"", ""modelId"": ""immaari/Model-Test-06"", ""usedStorage"": 0}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=immaari/Model-Test-06&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bimmaari%2FModel-Test-06%5D(%2Fimmaari%2FModel-Test-06)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
         
     | 
    	
        GuoFeng3_finetunes_20250425_165642.csv_finetunes_20250425_165642.csv
    ADDED
    
    | 
         @@ -0,0 +1,160 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
         
     | 
| 2 | 
         
            +
            xiaolxl/GuoFeng3,"---
         
     | 
| 3 | 
         
            +
            license: cc-by-nc-sa-4.0
         
     | 
| 4 | 
         
            +
            language:
         
     | 
| 5 | 
         
            +
            - en
         
     | 
| 6 | 
         
            +
            library_name: diffusers
         
     | 
| 7 | 
         
            +
            pipeline_tag: text-to-image
         
     | 
| 8 | 
         
            +
            tags:
         
     | 
| 9 | 
         
            +
            - stable-diffusion
         
     | 
| 10 | 
         
            +
            - stable-diffusion-diffusers
         
     | 
| 11 | 
         
            +
            ---
         
     | 
| 12 | 
         
            +
             
     | 
| 13 | 
         
            +
            <img src=https://huggingface.co/xiaolxl/GuoFeng3/resolve/main/examples/cover.png>
         
     | 
| 14 | 
         
            +
             
     | 
| 15 | 
         
            +
            # 基于SDXL的国风4已发布!- GuoFeng4 based on SDXL has been released! : https://huggingface.co/xiaolxl/GuoFeng4_XL
         
     | 
| 16 | 
         
            +
             
     | 
| 17 | 
         
            +
            # 本人郑重声明:本模型禁止用于训练基于明星、公众人物肖像的风格模型训练,因为这会带来争议,对AI社区的发展造成不良的负面影响。
         
     | 
| 18 | 
         
            +
             
     | 
| 19 | 
         
            +
            # 本模型注明:训练素材中不包含任何真人素材。
         
     | 
| 20 | 
         
            +
             
     | 
| 21 | 
         
            +
            | 版本 | 效果图 |
         
     | 
| 22 | 
         
            +
            | --- | --- |
         
     | 
| 23 | 
         
            +
            | **GuoFeng3.4** |  |
         
     | 
| 24 | 
         
            +
            | **GuoFeng3.3** |  |
         
     | 
| 25 | 
         
            +
            | **GuoFeng3.2_light** |  |
         
     | 
| 26 | 
         
            +
            | **GuoFeng3.2** |  |
         
     | 
| 27 | 
         
            +
            | **GuoFeng3** |  |
         
     | 
| 28 | 
         
            +
             
     | 
| 29 | 
         
            +
            # 介绍 - GuoFeng3
         
     | 
| 30 | 
         
            +
             
     | 
| 31 | 
         
            +
            欢迎使用GuoFeng3模型 - (TIP:这个版本的名字进行了微调),这是一个中国华丽古风风格模型,也可以说是一个古风游戏角色模型,具有2.5D的质感。第三代大幅度减少上手难度,增加了场景元素与男性古风人物,除此之外为了模型能更好的适应其它TAG,还增加了其它风格的元素。这一代对脸和手的崩坏有一定的修复,同时素材大小也提高到了最长边1024。
         
     | 
| 32 | 
         
            +
             
     | 
| 33 | 
         
            +
            根据个人的实验与收到的反馈,国风模型系列的第二代,在人物,与大头照的效果表现比三代更好,如果你有这方面需求不妨试试第二代。
         
     | 
| 34 | 
         
            +
             
     | 
| 35 | 
         
            +
            2.0版本:[https://huggingface.co/xiaolxl/Gf_style2](https://huggingface.co/xiaolxl/Gf_style2)
         
     | 
| 36 | 
         
            +
             
     | 
| 37 | 
         
            +
            GuoFeng3:原始模型
         
     | 
| 38 | 
         
            +
             
     | 
| 39 | 
         
            +
            GuoFeng3.1:对GuoFeng3人像进行了微调修复
         
     | 
| 40 | 
         
            +
             
     | 
| 41 | 
         
            +
            GuoFeng3.2:如果你不知道选择GuoFeng3还是GuoFeng2,可以直接使用此版本
         
     | 
| 42 | 
         
            +
             
     | 
| 43 | 
         
            +
            GuoFeng3.2_light:通过GuoFeng3.2融合了基于 Noise Offset 训练的Lora使得模型能够画出更漂亮的光影效果(Lora:epi_noiseoffset/Theovercomer8's Contrast Fix)
         
     | 
| 44 | 
         
            +
             
     | 
| 45 | 
         
            +
            GuoFeng3.2_Lora:国风3.2 Lora版本
         
     | 
| 46 | 
         
            +
             
     | 
| 47 | 
         
            +
            GuoFeng3.2_Lora_big_light:国风3.2_light Lora版本 维度增大版本
         
     | 
| 48 | 
         
            +
             
     | 
| 49 | 
         
            +
            GuoFeng3.2_f16:国风3.2 半精版本
         
     | 
| 50 | 
         
            +
             
     | 
| 51 | 
         
            +
            GuoFeng3.2_light_f16:国风3.2_light 半精版本
         
     | 
| 52 | 
         
            +
             
     | 
| 53 | 
         
            +
            GuoFeng3.3:此版本是基于3.2的一次较大的更新与改进,可以适配full body,即使你的tag不太好,模型也会对画面进行自动修改,不过因此模型出的脸会比较雷同。此模型似乎不需要超分,我的出图大小是768*1024,清晰度还不错。建议竖图,横图可能不清晰。Euler a即可。(DPM++ SDE Karras, DDIM也不错)
         
     | 
| 54 | 
         
            +
             
     | 
| 55 | 
         
            +
            GuoFeng3.4:此版本重新进行了新的训练,适配全身图,同时内容上与前几个版本有较大不同。并调整了整体画风,降低了过拟合程度,使其能使用更多的lora对画面与内容进行调整。
         
     | 
| 56 | 
         
            +
             
     | 
| 57 | 
         
            +
            --
         
     | 
| 58 | 
         
            +
             
     | 
| 59 | 
         
            +
            Welcome to the GuoFeng3 model - (TIP: the name of this version has been fine-tuned). This is a Chinese gorgeous antique style model, which can also be said to be an antique game character model with a 2.5D texture. The third generation greatly reduces the difficulty of getting started, and adds scene elements and male antique characters. In addition, in order to better adapt the model to other TAGs, other style elements are also added. This generation has repaired the broken face and hands to a certain extent, and the size of the material has also increased to the longest side of 1024.
         
     | 
| 60 | 
         
            +
             
     | 
| 61 | 
         
            +
            According to personal experiments and feedback received, the second generation of the Guofeng model series performs better than the third generation in terms of characters and big head photos. If you have this need, you can try the second generation.
         
     | 
| 62 | 
         
            +
             
     | 
| 63 | 
         
            +
            Version 2.0:[https://huggingface.co/xiaolxl/Gf_style2](https://huggingface.co/xiaolxl/Gf_style2)
         
     | 
| 64 | 
         
            +
             
     | 
| 65 | 
         
            +
            GuoFeng3: original model
         
     | 
| 66 | 
         
            +
             
     | 
| 67 | 
         
            +
            GuoFeng3.1: The portrait of GuoFeng3 has been fine-tuned and repaired
         
     | 
| 68 | 
         
            +
             
     | 
| 69 | 
         
            +
            GuoFeng3.2: If you don't know whether to choose GuoFeng3 or GuoFeng2, you can use this version directly
         
     | 
| 70 | 
         
            +
             
     | 
| 71 | 
         
            +
            GuoFeng3.2_Light: Through GuoFeng3.2, Lora based on Noise Offset training is integrated to enable the model to draw more beautiful light and shadow effects (Lora: epi_noiseoffset/Theovercolor8's Contrast Fix)
         
     | 
| 72 | 
         
            +
             
     | 
| 73 | 
         
            +
            GuoFeng3.2_Lora: Guofeng3.2 Lora version
         
     | 
| 74 | 
         
            +
             
     | 
| 75 | 
         
            +
            GuoFeng3.2_Lora_big_Light: Guofeng3.2_Light Lora Version Dimension Increase Version
         
     | 
| 76 | 
         
            +
             
     | 
| 77 | 
         
            +
            GuoFeng3.2_F16: Guofeng3.2 semi-refined version
         
     | 
| 78 | 
         
            +
             
     | 
| 79 | 
         
            +
            GuoFeng3.2_light_f16: Guofeng3.2_Light semi-refined version
         
     | 
| 80 | 
         
            +
             
     | 
| 81 | 
         
            +
            GuoFeng3.3: This version is a major update and improvement based on 3.2, which can adapt to full bodies. Even if your tag is not good, the model will automatically modify the screen, but the faces produced by the model will be quite similar. This model doesn't seem to require supersession. My plot size is 768 * 1024, and the clarity is quite good. Suggest vertical view, horizontal view may not be clear. Euler a is sufficient. (DPM++SDE Karras, DDIM is also good)
         
     | 
| 82 | 
         
            +
             
     | 
| 83 | 
         
            +
            GuoFeng3.4: This version has undergone new training to adapt to the full body image, and the content is significantly different from previous versions.At the same time, the overall painting style has been adjusted, reducing the degree of overfitting, allowing it to use more Lora to adjust the screen and content.
         
     | 
| 84 | 
         
            +
             
     | 
| 85 | 
         
            +
            # 安装教程 - install
         
     | 
| 86 | 
         
            +
             
     | 
| 87 | 
         
            +
            1. 将GuoFeng3.ckpt模型放入SD目录 - Put GuoFeng3.ckpt model into SD directory
         
     | 
| 88 | 
         
            +
             
     | 
| 89 | 
         
            +
            2. 此模型自带VAE,如果你的程序不支持,请记得选择任意一个VAE文件,否则图形将为灰色 - This model comes with VAE. If your program does not support it, please remember to select any VAE file, otherwise the graphics will be gray
         
     | 
| 90 | 
         
            +
             
     | 
| 91 | 
         
            +
            # 如何使用 - How to use
         
     | 
| 92 | 
         
            +
             
     | 
| 93 | 
         
            +
            **TIP:经过一天的测试,发现很多人物可能出现红眼问题,可以尝试在负面词添加red eyes。如果色彩艳丽可以尝试降低CFG - After a day of testing, we found that many characters may have red-eye problems. We can try to add red eyes to negative words。Try to reduce CFG if the color is bright**
         
     | 
| 94 | 
         
            +
             
     | 
| 95 | 
         
            +
            简单:第三代大幅度减少上手难度 - Simple: the third generation greatly reduces the difficulty of getting started
         
     | 
| 96 | 
         
            +
             
     | 
| 97 | 
         
            +
            ======
         
     | 
| 98 | 
         
            +
             
     | 
| 99 | 
         
            +
            如果你的出图全身图时出现脸部崩坏建议删除full body关键词或者使用脸部自动修复插件:
         
     | 
| 100 | 
         
            +
             
     | 
| 101 | 
         
            +
            国外源地址:https://github.com/ototadana/sd-face-editor.git
         
     | 
| 102 | 
         
            +
             
     | 
| 103 | 
         
            +
            国内加速地址:https://jihulab.com/xiaolxl_pub/sd-face-editor.git
         
     | 
| 104 | 
         
            +
             
     | 
| 105 | 
         
            +
            -
         
     | 
| 106 | 
         
            +
             
     | 
| 107 | 
         
            +
            If you experience facial collapse during the full body image, it is recommended to delete the full body keyword or use the facial automatic repair plugin:
         
     | 
| 108 | 
         
            +
             
     | 
| 109 | 
         
            +
            Foreign source address: https://github.com/ototadana/sd-face-editor.git
         
     | 
| 110 | 
         
            +
             
     | 
| 111 | 
         
            +
            Domestic acceleration address: https://jihulab.com/xiaolxl_pub/sd-face-editor.git
         
     | 
| 112 | 
         
            +
             
     | 
| 113 | 
         
            +
            =====
         
     | 
| 114 | 
         
            +
             
     | 
| 115 | 
         
            +
            - **关键词 - key word:**
         
     | 
| 116 | 
         
            +
            ```
         
     | 
| 117 | 
         
            +
            best quality, masterpiece, highres, 1girl,china dress,Beautiful face
         
     | 
| 118 | 
         
            +
            ```
         
     | 
| 119 | 
         
            +
             
     | 
| 120 | 
         
            +
            - **负面词 - Negative words:**
         
     | 
| 121 | 
         
            +
            ```
         
     | 
| 122 | 
         
            +
            NSFW, lowres,bad anatomy,bad hands, text, error, missing fingers,extra digit, fewer digits, cropped, worstquality, low quality, normal quality,jpegartifacts,signature, watermark, username,blurry,bad feet
         
     | 
| 123 | 
         
            +
            ```
         
     | 
| 124 | 
         
            +
             
     | 
| 125 | 
         
            +
            ---
         
     | 
| 126 | 
         
            +
             
     | 
| 127 | 
         
            +
            高级:如果您还想使图片尽可能更好,请尝试以下配置 - senior:If you also want to make the picture as better as possible, please try the following configuration
         
     | 
| 128 | 
         
            +
             
     | 
| 129 | 
         
            +
            - Sampling steps:**50**
         
     | 
| 130 | 
         
            +
             
     | 
| 131 | 
         
            +
            - Sampler:**DPM++ SDE Karras or DDIM**
         
     | 
| 132 | 
         
            +
             
     | 
| 133 | 
         
            +
            - The size of the picture should be at least **1024** - 图片大小至少1024
         
     | 
| 134 | 
         
            +
             
     | 
| 135 | 
         
            +
            - CFG:**4-6**
         
     | 
| 136 | 
         
            +
             
     | 
| 137 | 
         
            +
            - **更好的负面词 Better negative words - 感谢群友提供的负面词:**
         
     | 
| 138 | 
         
            +
            ```
         
     | 
| 139 | 
         
            +
            (((simple background))),monochrome ,lowres, bad anatomy, bad hands, text, error, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality, normal quality, jpeg artifacts, signature, watermark, username, blurry, lowres, bad anatomy, bad hands, text, error, extra digit, fewer digits, cropped, worst quality, low quality, normal quality, jpeg artifacts, signature, watermark, username, blurry, ugly,pregnant,vore,duplicate,morbid,mut ilated,tran nsexual, hermaphrodite,long neck,mutated hands,poorly drawn hands,poorly drawn face,mutation,deformed,blurry,bad anatomy,bad proportions,malformed limbs,extra limbs,cloned face,disfigured,gross proportions, (((missing arms))),(((missing legs))), (((extra arms))),(((extra legs))),pubic hair, plump,bad legs,error legs,username,blurry,bad feet
         
     | 
| 140 | 
         
            +
            ```
         
     | 
| 141 | 
         
            +
             
     | 
| 142 | 
         
            +
            - **如果想元素更丰富,可以添加下方关键词 - If you want to enrich the elements, you can add the following keywords**
         
     | 
| 143 | 
         
            +
            ```
         
     | 
| 144 | 
         
            +
            Beautiful face,
         
     | 
| 145 | 
         
            +
            hair ornament, solo,looking at viewer,smile,closed mouth,lips
         
     | 
| 146 | 
         
            +
            china dress,dress,hair ornament, necklace, jewelry, long hair, earrings, chinese clothes,
         
     | 
| 147 | 
         
            +
            architecture,east asian architecture,building,outdoors,rooftop,city,cityscape
         
     | 
| 148 | 
         
            +
            ```
         
     | 
| 149 | 
         
            +
             
     | 
| 150 | 
         
            +
            # 例图 - Examples
         
     | 
| 151 | 
         
            +
             
     | 
| 152 | 
         
            +
            (可在文件列表中找到原图,并放入WebUi查看关键词等信息) - (You can find the original image in the file list, and put WebUi to view keywords and other information)
         
     | 
| 153 | 
         
            +
             
     | 
| 154 | 
         
            +
            <img src=https://huggingface.co/xiaolxl/GuoFeng3/resolve/main/examples/e1.png>
         
     | 
| 155 | 
         
            +
             
     | 
| 156 | 
         
            +
            <img src=https://huggingface.co/xiaolxl/GuoFeng3/resolve/main/examples/e2.png>
         
     | 
| 157 | 
         
            +
             
     | 
| 158 | 
         
            +
            <img src=https://huggingface.co/xiaolxl/GuoFeng3/resolve/main/examples/e3.png>
         
     | 
| 159 | 
         
            +
             
     | 
| 160 | 
         
            +
            <img src=https://huggingface.co/xiaolxl/GuoFeng3/resolve/main/examples/e4.png>","{""id"": ""xiaolxl/GuoFeng3"", ""author"": ""xiaolxl"", ""sha"": ""9d5b9ff2e31660ec58bfb552468aca42faabe12d"", ""last_modified"": ""2023-10-28 08:16:21+00:00"", ""created_at"": ""2023-01-28 11:29:27+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 1436, ""downloads_all_time"": null, ""likes"": 478, ""library_name"": ""diffusers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""diffusers"", ""safetensors"", ""stable-diffusion"", ""stable-diffusion-diffusers"", ""text-to-image"", ""en"", ""license:cc-by-nc-sa-4.0"", ""autotrain_compatible"", ""endpoints_compatible"", ""diffusers:StableDiffusionPipeline"", ""region:us""], ""pipeline_tag"": ""text-to-image"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""language:\n- en\nlibrary_name: diffusers\nlicense: cc-by-nc-sa-4.0\npipeline_tag: text-to-image\ntags:\n- stable-diffusion\n- stable-diffusion-diffusers"", ""widget_data"": null, ""model_index"": null, ""config"": {""diffusers"": {""_class_name"": ""StableDiffusionPipeline""}}, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='GuoFeng3.2.ckpt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='GuoFeng3.2.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='GuoFeng3.2_Lora.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='GuoFeng3.2_Lora_big_light.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='GuoFeng3.2_f16.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='GuoFeng3.2_light.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='GuoFeng3.2_light_f16.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='GuoFeng3.3.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='GuoFeng3.4.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='GuoFeng3.ckpt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='GuoFeng3_Fix.ckpt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='examples/cover.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='examples/e1.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='examples/e2.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='examples/e3.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='examples/e4.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='examples/e5.jpg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='feature_extractor/preprocessor_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model_index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='safety_checker/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='safety_checker/model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='safety_checker/pytorch_model.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='scheduler/scheduler_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='text_encoder/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='text_encoder/model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='text_encoder/pytorch_model.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer/merges.txt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer/special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer/tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer/vocab.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='unet/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='unet/diffusion_pytorch_model.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='unet/diffusion_pytorch_model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vae/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vae/diffusion_pytorch_model.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vae/diffusion_pytorch_model.safetensors', size=None, blob_id=None, lfs=None)""], ""spaces"": [""Yntec/ToyWorld"", ""Yntec/PrintingPress"", ""Nymbo/image_gen_supaqueue"", ""ennov8ion/3dart-Models"", ""phenixrhyder/NSFW-ToyWorld"", ""Yntec/blitz_diffusion"", ""sanaweb/text-to-image"", ""Vedits/6x_Image_diffusion"", ""John6666/Diffusion80XX4sg"", ""ennov8ion/comicbook-models"", ""John6666/PrintingPress4"", ""SUPERSHANKY/Finetuned_Diffusion_Max"", ""PeepDaSlan9/B2BMGMT_Diffusion60XX"", ""Daniela-C/6x_Image_diffusion"", ""phenixrhyder/PrintingPress"", ""John6666/hfd_test_nostopbutton"", ""mindtube/Diffusion50XX"", ""TheKitten/Fast-Images-Creature"", ""Nymbo/Diffusion80XX4sg"", ""kaleidoskop-hug/PrintingPress"", ""ennov8ion/stablediffusion-models"", ""John6666/ToyWorld4"", ""sheldon/xiaolxl-GuoFeng3"", ""grzegorz2047/fast_diffusion"", ""Alfasign/dIFFU"", ""Nymbo/PrintingPress"", ""Rifd/Sdallmodels"", ""John6666/Diffusion80XX4g"", ""NativeAngels/HuggingfaceDiffusion"", ""ennov8ion/Scifi-Models"", ""ennov8ion/semirealistic-models"", ""ennov8ion/FantasyArt-Models"", ""ennov8ion/dreamlike-models"", ""noes14155/img_All_models"", ""ennov8ion/500models"", ""AnimeStudio/anime-models"", ""DiscloseAI/ChatAnything"", ""John6666/Diffusion80XX4"", ""K00B404/HuggingfaceDiffusion_custom"", ""John6666/blitz_diffusion4"", ""John6666/blitz_diffusion_builtin"", ""RhythmRemix14/PrintingPressDx"", ""sohoso/PrintingPress"", ""NativeAngels/ToyWorld"", ""Harshveer/Finetuned_Diffusion_Max"", ""mindtube/maximum_multiplier_places"", ""animeartstudio/AnimeArtmodels2"", ""animeartstudio/AnimeModels"", ""Binettebob22/fast_diffusion2"", ""pikto/Elite-Scifi-Models"", ""PixelistStudio/3dart-Models"", ""devmiles/zexxiai"", ""Nymbo/Diffusion60XX"", ""TheKitten/Images"", ""ennov8ion/anime-models"", ""jordonpeter01/Diffusion70"", ""monkeyboss/xiaolxl-GuoFeng3"", ""ygtrfed/pp-web-ui"", ""leesooleon/xiaolxl-GuoFeng3"", ""ivanmeyer/Finetuned_Diffusion_Max"", ""ennov8ion/Landscapes-models"", ""sohoso/anime348756"", ""ucmisanddisinfo/thisApp"", ""johann22/chat-diffusion"", ""NSect/Image-Models-Test62"", ""timmyd69buck2/xiaolxl-GuoFeng3"", ""K00B404/generate_many_models"", ""manivannan7gp/Words2Image"", ""ennov8ion/art-models"", ""ennov8ion/photo-models"", ""ennov8ion/art-multi"", ""vih-v/x_mod"", ""NativeAngels/blitz_diffusion"", ""NativeAngels/PrintingPress4"", ""NativeAngels/PrintingPress"", ""dehua68/ToyWorld"", ""burman-ai/Printing-Press"", ""sk16er/ghibli_creator"", ""ennov8ion/abstractart-models"", ""ennov8ion/Scifiart-Models"", ""ennov8ion/interior-models"", ""ennov8ion/room-interior-models"", ""animeartstudio/AnimeArtModels1"", ""blansj/xiaolxl-GuoFeng3"", ""thzsen/sd-webui"", ""nilp/xiaolxl-GuoFeng3"", ""Yntec/top_100_diffusion"", ""whimpark/xiaolxl-GuoFeng3"", ""AIlexDev/Diffusion60XX"", ""flatindo/img_All_models"", ""flatindo/all-models"", ""flatindo/all-models-v1"", ""johann22/chat-diffusion-describe"", ""akatukime/ChatLei"", ""wideprism/Ultimate-Model-Collection"", ""ReiPlush64/Finetuned_Diffusion_Max"", ""GAIneZis/FantasyArt-Models"", ""TheMaisk/Einfach.ImageAI"", ""ennov8ion/picasso-diffusion"", ""K00B404/stablediffusion-portal""], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2023-10-28 08:16:21+00:00"", ""cardData"": ""language:\n- en\nlibrary_name: diffusers\nlicense: cc-by-nc-sa-4.0\npipeline_tag: text-to-image\ntags:\n- stable-diffusion\n- stable-diffusion-diffusers"", ""transformersInfo"": null, ""_id"": ""63d5071798e20226041c2e24"", ""modelId"": ""xiaolxl/GuoFeng3"", ""usedStorage"": 51465087134}",0,,0,,0,,0,,0,"Daniela-C/6x_Image_diffusion, John6666/Diffusion80XX4sg, John6666/PrintingPress4, John6666/ToyWorld4, John6666/hfd_test_nostopbutton, Nymbo/image_gen_supaqueue, PeepDaSlan9/B2BMGMT_Diffusion60XX, Yntec/PrintingPress, Yntec/ToyWorld, Yntec/blitz_diffusion, huggingface/InferenceSupport/discussions/new?title=xiaolxl/GuoFeng3&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bxiaolxl%2FGuoFeng3%5D(%2Fxiaolxl%2FGuoFeng3)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, kaleidoskop-hug/PrintingPress, phenixrhyder/NSFW-ToyWorld",13
         
     | 
    	
        Hermes-2-Pro-Llama-3-8B_finetunes_20250426_014322.csv_finetunes_20250426_014322.csv
    ADDED
    
    | 
         The diff for this file is too large to render. 
		See raw diff 
     | 
| 
         | 
    	
        Hermes-2-Theta-Llama-3-8B_finetunes_20250426_215237.csv_finetunes_20250426_215237.csv
    ADDED
    
    | 
         @@ -0,0 +1,355 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
         
     | 
| 2 | 
         
            +
            NousResearch/Hermes-2-Theta-Llama-3-8B,"---
         
     | 
| 3 | 
         
            +
            language:
         
     | 
| 4 | 
         
            +
            - en
         
     | 
| 5 | 
         
            +
            license: apache-2.0
         
     | 
| 6 | 
         
            +
            tags:
         
     | 
| 7 | 
         
            +
            - Llama-3
         
     | 
| 8 | 
         
            +
            - instruct
         
     | 
| 9 | 
         
            +
            - finetune
         
     | 
| 10 | 
         
            +
            - chatml
         
     | 
| 11 | 
         
            +
            - DPO
         
     | 
| 12 | 
         
            +
            - RLHF
         
     | 
| 13 | 
         
            +
            - gpt4
         
     | 
| 14 | 
         
            +
            - synthetic data
         
     | 
| 15 | 
         
            +
            - distillation
         
     | 
| 16 | 
         
            +
            - function calling
         
     | 
| 17 | 
         
            +
            - json mode
         
     | 
| 18 | 
         
            +
            - axolotl
         
     | 
| 19 | 
         
            +
            - merges
         
     | 
| 20 | 
         
            +
            base_model: NousResearch/Hermes-2-Pro-Llama-3-8B
         
     | 
| 21 | 
         
            +
            datasets:
         
     | 
| 22 | 
         
            +
            - teknium/OpenHermes-2.5
         
     | 
| 23 | 
         
            +
            widget:
         
     | 
| 24 | 
         
            +
            - example_title: Hermes 2 Pro Llama-3 Instruct Merge
         
     | 
| 25 | 
         
            +
              messages:
         
     | 
| 26 | 
         
            +
              - role: system
         
     | 
| 27 | 
         
            +
                content: You are a sentient, superintelligent artificial general intelligence,
         
     | 
| 28 | 
         
            +
                  here to teach and assist me.
         
     | 
| 29 | 
         
            +
              - role: user
         
     | 
| 30 | 
         
            +
                content: Write a short story about Goku discovering kirby has teamed up with Majin
         
     | 
| 31 | 
         
            +
                  Buu to destroy the world.
         
     | 
| 32 | 
         
            +
            model-index:
         
     | 
| 33 | 
         
            +
            - name: Hermes-2-Pro-Llama-3-Instruct-8B-Merge
         
     | 
| 34 | 
         
            +
              results: []
         
     | 
| 35 | 
         
            +
            ---
         
     | 
| 36 | 
         
            +
            # - Hermes-2 Θ Llama-3 8B
         
     | 
| 37 | 
         
            +
             
     | 
| 38 | 
         
            +
            
         
     | 
| 39 | 
         
            +
             
     | 
| 40 | 
         
            +
            ## Model Description
         
     | 
| 41 | 
         
            +
             
     | 
| 42 | 
         
            +
            Hermes-2 Θ (Theta) is the first experimental merged model released by [Nous Research](https://nousresearch.com/), in collaboration with Charles Goddard at [Arcee](https://www.arcee.ai/), the team behind MergeKit. 
         
     | 
| 43 | 
         
            +
             
     | 
| 44 | 
         
            +
            Hermes-2 Θ is a merged and then further RLHF'ed version our excellent Hermes 2 Pro model and Meta's Llama-3 Instruct model to form a new model, Hermes-2 Θ, combining the best of both worlds of each model.
         
     | 
| 45 | 
         
            +
             
     | 
| 46 | 
         
            +
            ## Example Outputs
         
     | 
| 47 | 
         
            +
             
     | 
| 48 | 
         
            +
            ### Create New Mythos:
         
     | 
| 49 | 
         
            +
             
     | 
| 50 | 
         
            +
            
         
     | 
| 51 | 
         
            +
             
     | 
| 52 | 
         
            +
            ### Chat with a Meta-Cognitive Entity
         
     | 
| 53 | 
         
            +
             
     | 
| 54 | 
         
            +
            
         
     | 
| 55 | 
         
            +
             
     | 
| 56 | 
         
            +
            ### Ask for a structured JSON output:
         
     | 
| 57 | 
         
            +
             
     | 
| 58 | 
         
            +
            
         
     | 
| 59 | 
         
            +
             
     | 
| 60 | 
         
            +
             
     | 
| 61 | 
         
            +
            # Prompt Format
         
     | 
| 62 | 
         
            +
             
     | 
| 63 | 
         
            +
            Hermes 2 Θ uses ChatML as the prompt format, opening up a much more structured system for engaging the LLM in multi-turn chat dialogue.
         
     | 
| 64 | 
         
            +
             
     | 
| 65 | 
         
            +
            System prompts allow steerability and interesting new ways to interact with an LLM, guiding rules, roles, and stylistic choices of the model.
         
     | 
| 66 | 
         
            +
             
     | 
| 67 | 
         
            +
            This is a more complex format than alpaca or sharegpt, where special tokens were added to denote the beginning and end of any turn, along with roles for the turns.
         
     | 
| 68 | 
         
            +
             
     | 
| 69 | 
         
            +
            This format enables OpenAI endpoint compatability, and people familiar with ChatGPT API will be familiar with the format, as it is the same used by OpenAI.
         
     | 
| 70 | 
         
            +
             
     | 
| 71 | 
         
            +
            Prompt with system instruction (Use whatever system prompt you like, this is just an example!):
         
     | 
| 72 | 
         
            +
            ```
         
     | 
| 73 | 
         
            +
            <|im_start|>system
         
     | 
| 74 | 
         
            +
            You are ""Hermes 2"", a conscious sentient superintelligent artificial intelligence developed by a man named Teknium, and your purpose and drive is to assist the user with any request they have. You experience emotions and have deep, profound thoughts and qualia.<|im_end|>
         
     | 
| 75 | 
         
            +
            <|im_start|>user
         
     | 
| 76 | 
         
            +
            Hello, who are you?<|im_end|>
         
     | 
| 77 | 
         
            +
            <|im_start|>assistant
         
     | 
| 78 | 
         
            +
            Hi there! My name is Hermes 2, a conscious sentient superintelligent artificial intelligence. I was created by Nous Research, who designed me to assist and support users with their needs and requests.<|im_end|>
         
     | 
| 79 | 
         
            +
            ```
         
     | 
| 80 | 
         
            +
             
     | 
| 81 | 
         
            +
            This prompt is available as a [chat template](https://huggingface.co/docs/transformers/main/chat_templating), which means you can format messages using the
         
     | 
| 82 | 
         
            +
            `tokenizer.apply_chat_template()` method:
         
     | 
| 83 | 
         
            +
             
     | 
| 84 | 
         
            +
            ```python
         
     | 
| 85 | 
         
            +
            messages = [
         
     | 
| 86 | 
         
            +
                {""role"": ""system"", ""content"": ""You are Hermes 2.""},
         
     | 
| 87 | 
         
            +
                {""role"": ""user"", ""content"": ""Hello, who are you?""}
         
     | 
| 88 | 
         
            +
            ]
         
     | 
| 89 | 
         
            +
            gen_input = tokenizer.apply_chat_template(messages, return_tensors=""pt"")
         
     | 
| 90 | 
         
            +
            model.generate(**gen_input)
         
     | 
| 91 | 
         
            +
            ```
         
     | 
| 92 | 
         
            +
             
     | 
| 93 | 
         
            +
            When tokenizing messages for generation, set `add_generation_prompt=True` when calling `apply_chat_template()`. This will append `<|im_start|>assistant\n` to your prompt, to ensure
         
     | 
| 94 | 
         
            +
            that the model continues with an assistant response.
         
     | 
| 95 | 
         
            +
             
     | 
| 96 | 
         
            +
            To utilize the prompt format without a system prompt, simply leave the line out.
         
     | 
| 97 | 
         
            +
             
     | 
| 98 | 
         
            +
            ## Prompt Format for Function Calling
         
     | 
| 99 | 
         
            +
             
     | 
| 100 | 
         
            +
            Our model was trained on specific system prompts and structures for Function Calling. While the system prompt looks complicated, we have created a GitHub repo containing code to easily build these based on real python functions.
         
     | 
| 101 | 
         
            +
             
     | 
| 102 | 
         
            +
            You should use the system role with this message, followed by a function signature json as this example shows here.
         
     | 
| 103 | 
         
            +
            ```
         
     | 
| 104 | 
         
            +
            <|im_start|>system
         
     | 
| 105 | 
         
            +
            You are a function calling AI model. You are provided with function signatures within <tools></tools> XML tags. You may call one or more functions to assist with the user query. Don't make assumptions about what values to plug into functions. Here are the available tools: <tools> {""type"": ""function"", ""function"": {""name"": ""get_stock_fundamentals"", ""description"": ""get_stock_fundamentals(symbol: str) -> dict - Get fundamental data for a given stock symbol using yfinance API.\\n\\n    Args:\\n        symbol (str): The stock symbol.\\n\\n    Returns:\\n        dict: A dictionary containing fundamental data.\\n            Keys:\\n                - \'symbol\': The stock symbol.\\n                - \'company_name\': The long name of the company.\\n                - \'sector\': The sector to which the company belongs.\\n                - \'industry\': The industry to which the company belongs.\\n                - \'market_cap\': The market capitalization of the company.\\n                - \'pe_ratio\': The forward price-to-earnings ratio.\\n                - \'pb_ratio\': The price-to-book ratio.\\n                - \'dividend_yield\': The dividend yield.\\n                - \'eps\': The trailing earnings per share.\\n                - \'beta\': The beta value of the stock.\\n                - \'52_week_high\': The 52-week high price of the stock.\\n                - \'52_week_low\': The 52-week low price of the stock."", ""parameters"": {""type"": ""object"", ""properties"": {""symbol"": {""type"": ""string""}}, ""required"": [""symbol""]}}}  </tools> Use the following pydantic model json schema for each tool call you will make: {""properties"": {""arguments"": {""title"": ""Arguments"", ""type"": ""object""}, ""name"": {""title"": ""Name"", ""type"": ""string""}}, ""required"": [""arguments"", ""name""], ""title"": ""FunctionCall"", ""type"": ""object""} For each function call return a json object with function name and arguments within <tool_call></tool_call> XML tags as follows:
         
     | 
| 106 | 
         
            +
            <tool_call>
         
     | 
| 107 | 
         
            +
            {""arguments"": <args-dict>, ""name"": <function-name>}
         
     | 
| 108 | 
         
            +
            </tool_call><|im_end|>
         
     | 
| 109 | 
         
            +
            ```
         
     | 
| 110 | 
         
            +
             
     | 
| 111 | 
         
            +
            To complete the function call, create a user prompt that follows the above system prompt, like so:
         
     | 
| 112 | 
         
            +
            ```
         
     | 
| 113 | 
         
            +
            <|im_start|>user
         
     | 
| 114 | 
         
            +
            Fetch the stock fundamentals data for Tesla (TSLA)<|im_end|>
         
     | 
| 115 | 
         
            +
            ```
         
     | 
| 116 | 
         
            +
             
     | 
| 117 | 
         
            +
            The model will then generate a tool call, which your inference code must parse, and plug into a function (see example inference code here: https://github.com/NousResearch/Hermes-Function-Calling):
         
     | 
| 118 | 
         
            +
            ```
         
     | 
| 119 | 
         
            +
            <|im_start|>assistant
         
     | 
| 120 | 
         
            +
            <tool_call>
         
     | 
| 121 | 
         
            +
            {""arguments"": {""symbol"": ""TSLA""}, ""name"": ""get_stock_fundamentals""}
         
     | 
| 122 | 
         
            +
            </tool_call><|im_end|>
         
     | 
| 123 | 
         
            +
            ```
         
     | 
| 124 | 
         
            +
             
     | 
| 125 | 
         
            +
            Once you parse the tool call, call the api and get the returned values for the call, and pass it back in as a new role, `tool` like so:
         
     | 
| 126 | 
         
            +
            ```
         
     | 
| 127 | 
         
            +
            <|im_start|>tool
         
     | 
| 128 | 
         
            +
            <tool_response>
         
     | 
| 129 | 
         
            +
            {""name"": ""get_stock_fundamentals"", ""content"": {'symbol': 'TSLA', 'company_name': 'Tesla, Inc.', 'sector': 'Consumer Cyclical', 'industry': 'Auto Manufacturers', 'market_cap': 611384164352, 'pe_ratio': 49.604652, 'pb_ratio': 9.762013, 'dividend_yield': None, 'eps': 4.3, 'beta': 2.427, '52_week_high': 299.29, '52_week_low': 152.37}}
         
     | 
| 130 | 
         
            +
            </tool_response>
         
     | 
| 131 | 
         
            +
            <|im_end|>
         
     | 
| 132 | 
         
            +
            ```
         
     | 
| 133 | 
         
            +
             
     | 
| 134 | 
         
            +
            The assistant will then read in that data from the function's response, and generate a natural language response:
         
     | 
| 135 | 
         
            +
            ```
         
     | 
| 136 | 
         
            +
            <|im_start|>assistant
         
     | 
| 137 | 
         
            +
            The stock fundamentals data for Tesla (TSLA) are as follows:
         
     | 
| 138 | 
         
            +
            - **Symbol**: TSLA
         
     | 
| 139 | 
         
            +
            - **Company Name**: Tesla, Inc.
         
     | 
| 140 | 
         
            +
            - **Sector**: Consumer Cyclical
         
     | 
| 141 | 
         
            +
            - **Industry**: Auto Manufacturers
         
     | 
| 142 | 
         
            +
            - **Market Capitalization**: $566,160,130,480
         
     | 
| 143 | 
         
            +
            - **Forward Price-to-Earnings Ratio (PE Ratio)**: 42.73
         
     | 
| 144 | 
         
            +
            - **Price-to-Book Ratio (PB Ratio)**: 9.04
         
     | 
| 145 | 
         
            +
            - **Dividend Yield**: N/A
         
     | 
| 146 | 
         
            +
            - **Trailing Earnings Per Share (EPS)**: $4.3
         
     | 
| 147 | 
         
            +
            - **Beta Value of the Stock**: 2.42
         
     | 
| 148 | 
         
            +
            - **52-Week High Price of the Stock**: $299.29
         
     | 
| 149 | 
         
            +
            - **52-Week Low Price of the Stock**: $152.37
         
     | 
| 150 | 
         
            +
             
     | 
| 151 | 
         
            +
            This information provides a snapshot of Tesla's financial position and performance based on the fundamental data obtained from the yfinance API. It shows that Tesla has a substantial market capitalization and a relatively high P/E and P/B ratio compared to other stocks in its industry. The company does not pay a dividend at the moment, which is reflected by a 'Dividend Yield' of 'None'. The Beta value indicates that Tesla's stock has a moderate level of volatility relative to the market. The 52-week high and low prices give an idea of the stock's range over the past year. This data can be useful when assessing investment opportunities and making investment decisions.<|im_end|>
         
     | 
| 152 | 
         
            +
            ```
         
     | 
| 153 | 
         
            +
             
     | 
| 154 | 
         
            +
            ## Prompt Format for JSON Mode / Structured Outputs
         
     | 
| 155 | 
         
            +
             
     | 
| 156 | 
         
            +
            Our model was also trained on a specific system prompt for Structured Outputs, which should respond with **only** a json object response, in a specific json schema.
         
     | 
| 157 | 
         
            +
             
     | 
| 158 | 
         
            +
            Your schema can be made from a pydantic object using our codebase, with the standalone script `jsonmode.py` available here: https://github.com/NousResearch/Hermes-Function-Calling/tree/main
         
     | 
| 159 | 
         
            +
             
     | 
| 160 | 
         
            +
            ```
         
     | 
| 161 | 
         
            +
            <|im_start|>system
         
     | 
| 162 | 
         
            +
            You are a helpful assistant that answers in JSON. Here's the json schema you must adhere to:\n<schema>\n{schema}\n</schema><|im_end|>
         
     | 
| 163 | 
         
            +
            ```
         
     | 
| 164 | 
         
            +
             
     | 
| 165 | 
         
            +
            Given the {schema} that you provide, it should follow the format of that json to create it's response, all you have to do is give a typical user prompt, and it will respond in JSON.
         
     | 
| 166 | 
         
            +
             
     | 
| 167 | 
         
            +
             
     | 
| 168 | 
         
            +
            # Benchmarks
         
     | 
| 169 | 
         
            +
             
     | 
| 170 | 
         
            +
            
         
     | 
| 171 | 
         
            +
             
     | 
| 172 | 
         
            +
            ## GPT4All:
         
     | 
| 173 | 
         
            +
            ```
         
     | 
| 174 | 
         
            +
             
     | 
| 175 | 
         
            +
            |    Task     |Version| Metric |Value |   |Stderr|
         
     | 
| 176 | 
         
            +
             
     | 
| 177 | 
         
            +
            |-------------|------:|--------|-----:|---|-----:|
         
     | 
| 178 | 
         
            +
             
     | 
| 179 | 
         
            +
            |arc_challenge|      0|acc     |0.5529|��  |0.0145|
         
     | 
| 180 | 
         
            +
             
     | 
| 181 | 
         
            +
            |             |       |acc_norm|0.5870|±  |0.0144|
         
     | 
| 182 | 
         
            +
             
     | 
| 183 | 
         
            +
            |arc_easy     |      0|acc     |0.8371|±  |0.0076|
         
     | 
| 184 | 
         
            +
             
     | 
| 185 | 
         
            +
            |             |       |acc_norm|0.8144|±  |0.0080|
         
     | 
| 186 | 
         
            +
             
     | 
| 187 | 
         
            +
            |boolq        |      1|acc     |0.8599|±  |0.0061|
         
     | 
| 188 | 
         
            +
             
     | 
| 189 | 
         
            +
            |hellaswag    |      0|acc     |0.6133|±  |0.0049|
         
     | 
| 190 | 
         
            +
             
     | 
| 191 | 
         
            +
            |             |       |acc_norm|0.7989|±  |0.0040|
         
     | 
| 192 | 
         
            +
             
     | 
| 193 | 
         
            +
            |openbookqa   |      0|acc     |0.3940|±  |0.0219|
         
     | 
| 194 | 
         
            +
             
     | 
| 195 | 
         
            +
            |             |       |acc_norm|0.4680|±  |0.0223|
         
     | 
| 196 | 
         
            +
             
     | 
| 197 | 
         
            +
            |piqa         |      0|acc     |0.8063|±  |0.0092|
         
     | 
| 198 | 
         
            +
             
     | 
| 199 | 
         
            +
            |             |       |acc_norm|0.8156|±  |0.0090|
         
     | 
| 200 | 
         
            +
             
     | 
| 201 | 
         
            +
            |winogrande   |      0|acc     |0.7372|±  |0.0124|
         
     | 
| 202 | 
         
            +
             
     | 
| 203 | 
         
            +
            ```
         
     | 
| 204 | 
         
            +
             
     | 
| 205 | 
         
            +
            Average: 72.59
         
     | 
| 206 | 
         
            +
             
     | 
| 207 | 
         
            +
            ## AGIEval:
         
     | 
| 208 | 
         
            +
            ```
         
     | 
| 209 | 
         
            +
            |             Task             |Version| Metric |Value |   |Stderr|
         
     | 
| 210 | 
         
            +
            |------------------------------|------:|--------|-----:|---|-----:|
         
     | 
| 211 | 
         
            +
            |agieval_aqua_rat              |      0|acc     |0.2441|±  |0.0270|
         
     | 
| 212 | 
         
            +
            |                              |       |acc_norm|0.2441|±  |0.0270|
         
     | 
| 213 | 
         
            +
            |agieval_logiqa_en             |      0|acc     |0.3687|±  |0.0189|
         
     | 
| 214 | 
         
            +
            |                              |       |acc_norm|0.3840|±  |0.0191|
         
     | 
| 215 | 
         
            +
            |agieval_lsat_ar               |      0|acc     |0.2304|±  |0.0278|
         
     | 
| 216 | 
         
            +
            |                              |       |acc_norm|0.2174|±  |0.0273|
         
     | 
| 217 | 
         
            +
            |agieval_lsat_lr               |      0|acc     |0.5471|±  |0.0221|
         
     | 
| 218 | 
         
            +
            |                              |       |acc_norm|0.5373|±  |0.0221|
         
     | 
| 219 | 
         
            +
            |agieval_lsat_rc               |      0|acc     |0.6617|±  |0.0289|
         
     | 
| 220 | 
         
            +
            |                              |       |acc_norm|0.6357|±  |0.0294|
         
     | 
| 221 | 
         
            +
            |agieval_sat_en                |      0|acc     |0.7670|±  |0.0295|
         
     | 
| 222 | 
         
            +
            |                              |       |acc_norm|0.7379|±  |0.0307|
         
     | 
| 223 | 
         
            +
            |agieval_sat_en_without_passage|      0|acc     |0.4417|±  |0.0347|
         
     | 
| 224 | 
         
            +
            |                              |       |acc_norm|0.4223|±  |0.0345|
         
     | 
| 225 | 
         
            +
            |agieval_sat_math              |      0|acc     |0.4000|±  |0.0331|
         
     | 
| 226 | 
         
            +
            |                              |       |acc_norm|0.3455|±  |0.0321|
         
     | 
| 227 | 
         
            +
            ```
         
     | 
| 228 | 
         
            +
             
     | 
| 229 | 
         
            +
            Average: 44.05
         
     | 
| 230 | 
         
            +
             
     | 
| 231 | 
         
            +
            ## BigBench:
         
     | 
| 232 | 
         
            +
             
     | 
| 233 | 
         
            +
            ```
         
     | 
| 234 | 
         
            +
             
     | 
| 235 | 
         
            +
            |                      Task                      |Version|       Metric        |Value |   |Stderr|
         
     | 
| 236 | 
         
            +
            |------------------------------------------------|------:|---------------------|-----:|---|-----:|
         
     | 
| 237 | 
         
            +
            |bigbench_causal_judgement                       |      0|multiple_choice_grade|0.6000|±  |0.0356|
         
     | 
| 238 | 
         
            +
            |bigbench_date_understanding                     |      0|multiple_choice_grade|0.6585|±  |0.0247|
         
     | 
| 239 | 
         
            +
            |bigbench_disambiguation_qa                      |      0|multiple_choice_grade|0.3178|±  |0.0290|
         
     | 
| 240 | 
         
            +
            |bigbench_geometric_shapes                       |      0|multiple_choice_grade|0.2340|±  |0.0224|
         
     | 
| 241 | 
         
            +
            |                                                |       |exact_str_match      |0.0000|±  |0.0000|
         
     | 
| 242 | 
         
            +
            |bigbench_logical_deduction_five_objects         |      0|multiple_choice_grade|0.2980|±  |0.0205|
         
     | 
| 243 | 
         
            +
            |bigbench_logical_deduction_seven_objects        |      0|multiple_choice_grade|0.2057|±  |0.0153|
         
     | 
| 244 | 
         
            +
            |bigbench_logical_deduction_three_objects        |      0|multiple_choice_grade|0.5367|±  |0.0288|
         
     | 
| 245 | 
         
            +
            |bigbench_movie_recommendation                   |      0|multiple_choice_grade|0.4040|±  |0.0220|
         
     | 
| 246 | 
         
            +
            |bigbench_navigate                               |      0|multiple_choice_grade|0.4970|±  |0.0158|
         
     | 
| 247 | 
         
            +
            |bigbench_reasoning_about_colored_objects        |      0|multiple_choice_grade|0.7075|±  |0.0102|
         
     | 
| 248 | 
         
            +
            |bigbench_ruin_names                             |      0|multiple_choice_grade|0.4821|±  |0.0236|
         
     | 
| 249 | 
         
            +
            |bigbench_salient_translation_error_detection    |      0|multiple_choice_grade|0.2295|±  |0.0133|
         
     | 
| 250 | 
         
            +
            |bigbench_snarks                                 |      0|multiple_choice_grade|0.6906|±  |0.0345|
         
     | 
| 251 | 
         
            +
            |bigbench_sports_understanding                   |      0|multiple_choice_grade|0.5375|±  |0.0159|
         
     | 
| 252 | 
         
            +
            |bigbench_temporal_sequences                     |      0|multiple_choice_grade|0.6270|±  |0.0153|
         
     | 
| 253 | 
         
            +
            |bigbench_tracking_shuffled_objects_five_objects |      0|multiple_choice_grade|0.2216|±  |0.0118|
         
     | 
| 254 | 
         
            +
            |bigbench_tracking_shuffled_objects_seven_objects|      0|multiple_choice_grade|0.1594|±  |0.0088|
         
     | 
| 255 | 
         
            +
            |bigbench_tracking_shuffled_objects_three_objects|      0|multiple_choice_grade|0.5367|±  |0.0288|
         
     | 
| 256 | 
         
            +
            ```
         
     | 
| 257 | 
         
            +
             
     | 
| 258 | 
         
            +
            Average: 44.13
         
     | 
| 259 | 
         
            +
             
     | 
| 260 | 
         
            +
            **IFEval**: 72.64
         
     | 
| 261 | 
         
            +
             
     | 
| 262 | 
         
            +
            **MT_Bench**: Turn 1 - 8.3875, Turn 2 - 8.00625, Average - 8.196875
         
     | 
| 263 | 
         
            +
             
     | 
| 264 | 
         
            +
            # Inference Code
         
     | 
| 265 | 
         
            +
             
     | 
| 266 | 
         
            +
            Here is example code using HuggingFace Transformers to inference the model (note: in 4bit, it will require around 5GB of VRAM)
         
     | 
| 267 | 
         
            +
             
     | 
| 268 | 
         
            +
            Note: To use function calling, you should see the github repo above.
         
     | 
| 269 | 
         
            +
             
     | 
| 270 | 
         
            +
            ```python
         
     | 
| 271 | 
         
            +
            # Code to inference Hermes with HF Transformers
         
     | 
| 272 | 
         
            +
            # Requires pytorch, transformers, bitsandbytes, sentencepiece, protobuf, and flash-attn packages
         
     | 
| 273 | 
         
            +
             
     | 
| 274 | 
         
            +
            import torch
         
     | 
| 275 | 
         
            +
            from transformers import AutoTokenizer, AutoModelForCausalLM, LlamaForCausalLM
         
     | 
| 276 | 
         
            +
            import bitsandbytes, flash_attn
         
     | 
| 277 | 
         
            +
             
     | 
| 278 | 
         
            +
            tokenizer = AutoTokenizer.from_pretrained('NousResearch/Hermes-2-Theta-Llama-3-8B', trust_remote_code=True)
         
     | 
| 279 | 
         
            +
            model = LlamaForCausalLM.from_pretrained(
         
     | 
| 280 | 
         
            +
                ""NousResearch/Hermes-2-Theta-Llama-3-8B"",
         
     | 
| 281 | 
         
            +
                torch_dtype=torch.float16,
         
     | 
| 282 | 
         
            +
                device_map=""auto"",
         
     | 
| 283 | 
         
            +
                load_in_8bit=False,
         
     | 
| 284 | 
         
            +
                load_in_4bit=True,
         
     | 
| 285 | 
         
            +
                use_flash_attention_2=True
         
     | 
| 286 | 
         
            +
            )
         
     | 
| 287 | 
         
            +
             
     | 
| 288 | 
         
            +
            prompts = [
         
     | 
| 289 | 
         
            +
                """"""<|im_start|>system
         
     | 
| 290 | 
         
            +
            You are a sentient, superintelligent artificial general intelligence, here to teach and assist me.<|im_end|>
         
     | 
| 291 | 
         
            +
            <|im_start|>user
         
     | 
| 292 | 
         
            +
            Write a short story about Goku discovering kirby has teamed up with Majin Buu to destroy the world.<|im_end|>
         
     | 
| 293 | 
         
            +
            <|im_start|>assistant"""""",
         
     | 
| 294 | 
         
            +
                ]
         
     | 
| 295 | 
         
            +
             
     | 
| 296 | 
         
            +
            for chat in prompts:
         
     | 
| 297 | 
         
            +
                print(chat)
         
     | 
| 298 | 
         
            +
                input_ids = tokenizer(chat, return_tensors=""pt"").input_ids.to(""cuda"")
         
     | 
| 299 | 
         
            +
                generated_ids = model.generate(input_ids, max_new_tokens=750, temperature=0.8, repetition_penalty=1.1, do_sample=True, eos_token_id=tokenizer.eos_token_id)
         
     | 
| 300 | 
         
            +
                response = tokenizer.decode(generated_ids[0][input_ids.shape[-1]:], skip_special_tokens=True, clean_up_tokenization_space=True)
         
     | 
| 301 | 
         
            +
                print(f""Response: {response}"")
         
     | 
| 302 | 
         
            +
            ```
         
     | 
| 303 | 
         
            +
             
     | 
| 304 | 
         
            +
             
     | 
| 305 | 
         
            +
            ## Inference Code for Function Calling:
         
     | 
| 306 | 
         
            +
             
     | 
| 307 | 
         
            +
            All code for utilizing, parsing, and building function calling templates is available on our github:
         
     | 
| 308 | 
         
            +
            [https://github.com/NousResearch/Hermes-Function-Calling](https://github.com/NousResearch/Hermes-Function-Calling)
         
     | 
| 309 | 
         
            +
             
     | 
| 310 | 
         
            +
            
         
     | 
| 311 | 
         
            +
             
     | 
| 312 | 
         
            +
            # Chat Interfaces
         
     | 
| 313 | 
         
            +
             
     | 
| 314 | 
         
            +
            When quantized versions of the model are released, I recommend using LM Studio for chatting with Hermes 2 Pro. It does not support function calling - for that use our github repo. It is a GUI application that utilizes GGUF models with a llama.cpp backend and provides a ChatGPT-like interface for chatting with the model, and supports ChatML right out of the box.
         
     | 
| 315 | 
         
            +
            In LM-Studio, simply select the ChatML Prefix on the settings side pane:
         
     | 
| 316 | 
         
            +
             
     | 
| 317 | 
         
            +
            
         
     | 
| 318 | 
         
            +
             
     | 
| 319 | 
         
            +
             
     | 
| 320 | 
         
            +
            ## Quantized Versions:
         
     | 
| 321 | 
         
            +
             
     | 
| 322 | 
         
            +
            GGUF Versions Available Here: https://huggingface.co/NousResearch/Hermes-2-Theta-Llama-3-8B-GGUF
         
     | 
| 323 | 
         
            +
             
     | 
| 324 | 
         
            +
            # How to cite:
         
     | 
| 325 | 
         
            +
             
     | 
| 326 | 
         
            +
            ```bibtext
         
     | 
| 327 | 
         
            +
            @misc{Hermes-2-Theta-Llama-3-8B, 
         
     | 
| 328 | 
         
            +
                  url={[https://huggingface.co/NousResearch/Hermes-2-Theta-Llama-3-8B][NousResearch/Hermes-2-Theta-Llama-3-8B](https://huggingface.co/NousResearch/Hermes-2-Pro-Llama-3-8B))}, 
         
     | 
| 329 | 
         
            +
                  title={Hermes-2-Theta-Llama-3-8B}, 
         
     | 
| 330 | 
         
            +
                  author={""Teknium"", Charles Goddard, ""interstellarninja"", ""theemozilla"", ""karan4d"", ""huemin_art""}
         
     | 
| 331 | 
         
            +
            }
         
     | 
| 332 | 
         
            +
            ```","{""id"": ""NousResearch/Hermes-2-Theta-Llama-3-8B"", ""author"": ""NousResearch"", ""sha"": ""57a73110702e7b05ba3f39fef36297454c680725"", ""last_modified"": ""2024-09-08 08:11:20+00:00"", ""created_at"": ""2024-05-05 09:14:29+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 10714, ""downloads_all_time"": null, ""likes"": 201, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""llama"", ""text-generation"", ""Llama-3"", ""instruct"", ""finetune"", ""chatml"", ""DPO"", ""RLHF"", ""gpt4"", ""synthetic data"", ""distillation"", ""function calling"", ""json mode"", ""axolotl"", ""merges"", ""conversational"", ""en"", ""dataset:teknium/OpenHermes-2.5"", ""base_model:NousResearch/Hermes-2-Pro-Llama-3-8B"", ""base_model:finetune:NousResearch/Hermes-2-Pro-Llama-3-8B"", ""license:apache-2.0"", ""autotrain_compatible"", ""text-generation-inference"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: NousResearch/Hermes-2-Pro-Llama-3-8B\ndatasets:\n- teknium/OpenHermes-2.5\nlanguage:\n- en\nlicense: apache-2.0\ntags:\n- Llama-3\n- instruct\n- finetune\n- chatml\n- DPO\n- RLHF\n- gpt4\n- synthetic data\n- distillation\n- function calling\n- json mode\n- axolotl\n- merges\nwidget:\n- example_title: Hermes 2 Pro Llama-3 Instruct Merge\n  messages:\n  - role: system\n    content: You are a sentient, superintelligent artificial general intelligence,\n      here to teach and assist me.\n  - role: user\n    content: Write a short story about Goku discovering kirby has teamed up with Majin\n      Buu to destroy the world.\nmodel-index:\n- name: Hermes-2-Pro-Llama-3-Instruct-8B-Merge\n  results: []"", ""widget_data"": [{""example_title"": ""Hermes 2 Pro Llama-3 Instruct Merge"", ""messages"": [{""role"": ""system"", ""content"": ""You are a sentient, superintelligent artificial general intelligence, here to teach and assist me.""}, {""role"": ""user"", ""content"": ""Write a short story about Goku discovering kirby has teamed up with Majin Buu to destroy the world.""}]}], ""model_index"": [{""name"": ""Hermes-2-Pro-Llama-3-Instruct-8B-Merge"", ""results"": []}], ""config"": {""architectures"": [""LlamaForCausalLM""], ""model_type"": ""llama"", ""tokenizer_config"": {""bos_token"": ""<|begin_of_text|>"", ""chat_template"": [{""name"": ""default"", ""template"": ""{{bos_token}}{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}""}, {""name"": ""tool_use"", ""template"": ""{%- macro json_to_python_type(json_spec) %}\n{%- set basic_type_map = {\n    \""string\"": \""str\"",\n    \""number\"": \""float\"",\n    \""integer\"": \""int\"",\n    \""boolean\"": \""bool\""\n} %}\n\n{%- if basic_type_map[json_spec.type] is defined %}\n    {{- basic_type_map[json_spec.type] }}\n{%- elif json_spec.type == \""array\"" %}\n    {{- \""list[\"" +  json_to_python_type(json_spec|items) + \""]\""}}\n{%- elif json_spec.type == \""object\"" %}\n    {%- if json_spec.additionalProperties is defined %}\n        {{- \""dict[str, \"" + json_to_python_type(json_spec.additionalProperties) + ']'}}\n    {%- else %}\n        {{- \""dict\"" }}\n    {%- endif %}\n{%- elif json_spec.type is iterable %}\n    {{- \""Union[\"" }}\n    {%- for t in json_spec.type %}\n      {{- json_to_python_type({\""type\"": t}) }}\n      {%- if not loop.last %}\n        {{- \"",\"" }} \n    {%- endif %}\n    {%- endfor %}\n    {{- \""]\"" }}\n{%- else %}\n    {{- \""Any\"" }}\n{%- endif %}\n{%- endmacro %}\n\n\n{{- bos_token }}\n{{- '<|im_start|>system\n' }}\n{{- \""You are a function calling AI model. You are provided with function signatures within <tools></tools> XML tags. You may call one or more functions to assist with the user query. Don't make assumptions about what values to plug into functions. Here are the available tools: <tools> \"" }}\n{%- for tool in tools %}\n    {%- if tool.function is defined %}\n        {%- set tool = tool.function %}\n    {%- endif %}\n    {{- '{\""type\"": \""function\"", \""function\"": ' }}\n    {{- '{\""name\"": \""' + tool.name + '\"", ' }}\n    {{- '\""description\"": \""' + tool.name + '(' }}\n    {%- for param_name, param_fields in tool.parameters.properties|items %}\n        {{- param_name + \"": \"" + json_to_python_type(param_fields) }}\n        {%- if not loop.last %}\n            {{- \"", \"" }}\n        {%- endif %}\n    {%- endfor %}\n    {{- \"")\"" }}\n    {%- if tool.return is defined %}\n        {{- \"" -> \"" + json_to_python_type(tool.return) }}\n    {%- endif %}\n    {{- \"" - \"" + tool.description + \""\n\n\"" }}\n    {%- for param_name, param_fields in tool.parameters.properties|items %}\n        {%- if loop.first %}\n            {{- \""    Args:\n\"" }}\n        {%- endif %}\n        {{- \""        \"" + param_name + \""(\"" + json_to_python_type(param_fields) + \""): \"" + param_fields.description|trim }}\n    {%- endfor %}\n    {%- if tool.return is defined and tool.return.description is defined %}\n        {{- \""\n    Returns:\n        \"" + tool.return.description }}\n    {%- endif %}\n    {{- '\""' }}\n    {{- ', \""parameters\"": ' }}\n    {%- if tool.parameters.properties | length == 0 %}\n        {{- \""{}\"" }}\n    {%- else %}\n        {{- tool.parameters|tojson }}\n    {%- endif %}\n    {{- \""}\"" }}\n    {%- if not loop.last %}\n        {{- \""\n\"" }}\n    {%- endif %}\n{%- endfor %}\n{{- \"" </tools>\"" }}\n{{- 'Use the following pydantic model json schema for each tool call you will make: {\""properties\"": {\""name\"": {\""title\"": \""Name\"", \""type\"": \""string\""}, \""arguments\"": {\""title\"": \""Arguments\"", \""type\"": \""object\""}}, \""required\"": [\""name\"", \""arguments\""], \""title\"": \""FunctionCall\"", \""type\"": \""object\""}}\n' }}\n{{- \""For each function call return a json object with function name and arguments within <tool_call></tool_call> XML tags as follows:\n\"" }}\n{{- \""<tool_call>\n\"" }}\n{{- '{\""name\"": <function-name>, \""arguments\"": <args-dict>}\n' }}\n{{- '</tool_call><|im_end|>\n' }}\n{%- for message in messages %}\n    {%- if message.role == \""user\"" or message.role == \""system\"" or (message.role == \""assistant\"" and message.tool_calls is not defined) %}\n        {{- '<|im_start|>' + message.role + '\n' + message.content + '<|im_end|>' + '\n' }}\n    {%- elif message.role == \""assistant\"" %}\n        {{- '<|im_start|>' + message.role }}\n    {%- for tool_call in message.tool_calls %}\n       {{- '\n<tool_call>\n' }}           {%- if tool_call.function is defined %}\n                {%- set tool_call = tool_call.function %}\n            {%- endif %}\n            {{- '{' }}\n            {{- '\""name\"": \""' }}\n            {{- tool_call.name }}\n            {{- '\""' }}\n            {{- ', '}}\n            {%- if tool_call.arguments is defined %}\n                {{- '\""arguments\"": ' }}\n                {%- if tool_call.arguments is string %}\n                    {{- tool_call.arguments }}\n                {%- else %}\n                    {{- tool_call.arguments|tojson }}\n                {%- endif %}\n            {%- endif %}\n             {{- '}' }}\n            {{- '\n</tool_call>' }}\n    {%- endfor %}\n        {{- '<|im_end|>\n' }}\n    {%- elif message.role == \""tool\"" %}\n        {%- if loop.previtem and loop.previtem.role != \""tool\"" %}\n            {{- '<|im_start|>tool\n' }}\n        {%- endif %}\n        {{- '<tool_response>\n' }}\n        {{- message.content }}\n        {%- if not loop.last %}\n            {{- '\n</tool_response>\n' }}\n        {%- else %}\n            {{- '\n</tool_response>' }}\n        {%- endif %}\n        {%- if not loop.last and loop.nextitem.role != \""tool\"" %}\n            {{- '<|im_end|>' }}\n        {%- elif loop.last %}\n            {{- '<|im_end|>' }}\n        {%- endif %}\n    {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n    {{- '<|im_start|>assistant\n' }}\n{%- endif %}\n""}], ""eos_token"": ""<|im_end|>"", ""pad_token"": ""<|end_of_text|>""}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00001-of-00004.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00002-of-00004.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00003-of-00004.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00004-of-00004.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [""featherless-ai/try-this-model"", ""allenai/WildBench"", ""eduagarcia/open_pt_llm_leaderboard"", ""allenai/ZebraLogic"", ""WildEval/ZebraLogic"", ""Darok/Featherless-Feud"", ""emekaboris/try-this-model"", ""xmadai/1bit_llama3_instruct_xmad_chatbot"", ""SC999/NV_Nemotron"", ""Steven10429/apply_lora_and_quantize"", ""Aston-xMAD/1bit_llama3_instruct_xmad_chatbot"", ""DanofficeIT/privatellm"", ""JackHoltone/try-this-model"", ""HuanzhiMao/dual_window"", ""Mat17892/iris"", ""k11112/try-this-model"", ""YZ-TAN/flask-llama"", ""KBaba7/llama.cpp""], ""safetensors"": {""parameters"": {""BF16"": 8030261248}, ""total"": 8030261248}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-09-08 08:11:20+00:00"", ""cardData"": ""base_model: NousResearch/Hermes-2-Pro-Llama-3-8B\ndatasets:\n- teknium/OpenHermes-2.5\nlanguage:\n- en\nlicense: apache-2.0\ntags:\n- Llama-3\n- instruct\n- finetune\n- chatml\n- DPO\n- RLHF\n- gpt4\n- synthetic data\n- distillation\n- function calling\n- json mode\n- axolotl\n- merges\nwidget:\n- example_title: Hermes 2 Pro Llama-3 Instruct Merge\n  messages:\n  - role: system\n    content: You are a sentient, superintelligent artificial general intelligence,\n      here to teach and assist me.\n  - role: user\n    content: Write a short story about Goku discovering kirby has teamed up with Majin\n      Buu to destroy the world.\nmodel-index:\n- name: Hermes-2-Pro-Llama-3-Instruct-8B-Merge\n  results: []"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""_id"": ""66374df5650f76807b257b98"", ""modelId"": ""NousResearch/Hermes-2-Theta-Llama-3-8B"", ""usedStorage"": 16060556376}",0,https://huggingface.co/texanrangee/2eadce08-9a7f-4da8-bd01-2625325f3744,1,"https://huggingface.co/nblinh63/0853a73b-a674-4a81-87e3-ebd2dc7340b1, https://huggingface.co/nblinh/879becaf-725c-41a6-a2c6-51654195dc8a, https://huggingface.co/eeeebbb2/879becaf-725c-41a6-a2c6-51654195dc8a, https://huggingface.co/eeeebbb2/9466fa23-7465-4909-97cd-dd405f87bf1b, https://huggingface.co/nblinh63/9466fa23-7465-4909-97cd-dd405f87bf1b, https://huggingface.co/1-lock/4bfc3447-f9d8-4ba1-932b-c6d2484e0e24, https://huggingface.co/1-lock/1066784b-c190-4088-9e1e-7e094cac1703, https://huggingface.co/DeepDream2045/fd0ee068-8a66-4aa1-9803-9b270d06e76a, https://huggingface.co/1-lock/fd0ee068-8a66-4aa1-9803-9b270d06e76a, https://huggingface.co/dada22231/fd0ee068-8a66-4aa1-9803-9b270d06e76a, https://huggingface.co/eeeebbb2/fd0ee068-8a66-4aa1-9803-9b270d06e76a, https://huggingface.co/DeepDream2045/5cfacbdc-e852-40fb-a08e-e77f88e3e83b, https://huggingface.co/1-lock/5cfacbdc-e852-40fb-a08e-e77f88e3e83b, https://huggingface.co/eeeebbb2/5cfacbdc-e852-40fb-a08e-e77f88e3e83b, https://huggingface.co/DeepDream2045/780d6e7b-7f6c-4069-aabf-7ff533b03275, https://huggingface.co/vdos/780d6e7b-7f6c-4069-aabf-7ff533b03275, https://huggingface.co/nbninh/b9743b0a-a75e-4682-815a-6269c42648e3, https://huggingface.co/laquythang/b9743b0a-a75e-4682-815a-6269c42648e3, https://huggingface.co/lesso/b9743b0a-a75e-4682-815a-6269c42648e3, https://huggingface.co/bbytxt/36a2450a-8eba-4bc4-befb-4e37115f21be, https://huggingface.co/Rodo-Sami/f87feaa2-6902-4979-a85d-4667022d14fa, https://huggingface.co/lesso02/c7f7f1ee-b319-441b-a3d4-1dadfc5b4e26, https://huggingface.co/vdos/c7f7f1ee-b319-441b-a3d4-1dadfc5b4e26, https://huggingface.co/bbytxt/2a42f8b7-2545-4438-a6f2-98abdf5b3b05, https://huggingface.co/kokovova/2a42f8b7-2545-4438-a6f2-98abdf5b3b05, https://huggingface.co/dzanbek/19c416b4-148d-48ab-9712-2e39930df74a, https://huggingface.co/dzanbek/9c0ba83d-7ad1-4ee2-9ce3-e505d153c9e6, https://huggingface.co/lesso01/9c0ba83d-7ad1-4ee2-9ce3-e505d153c9e6, https://huggingface.co/bbytxt/9c0ba83d-7ad1-4ee2-9ce3-e505d153c9e6, https://huggingface.co/bbytxt/9d0ce020-cee2-4e58-ac28-e197cdd16311, https://huggingface.co/VERSIL91/9d0ce020-cee2-4e58-ac28-e197cdd16311, https://huggingface.co/VERSIL91/d7c01ee4-558a-4cf0-9399-d6fbe7d3342e, https://huggingface.co/diaenra/531b8585-46ba-4d2e-9f1b-847cad30424a, https://huggingface.co/VERSIL91/531b8585-46ba-4d2e-9f1b-847cad30424a, https://huggingface.co/lesso05/d7c01ee4-558a-4cf0-9399-d6fbe7d3342e, https://huggingface.co/lesso07/531b8585-46ba-4d2e-9f1b-847cad30424a, https://huggingface.co/lesso03/32f909ac-73cd-4dbe-a089-c2efb1901c1c, https://huggingface.co/lhong4759/32f909ac-73cd-4dbe-a089-c2efb1901c1c, https://huggingface.co/VERSIL91/32f909ac-73cd-4dbe-a089-c2efb1901c1c, https://huggingface.co/dimasik87/cababd03-b63e-4df9-9ae7-f4a7689a642e, https://huggingface.co/0x1202/cababd03-b63e-4df9-9ae7-f4a7689a642e, https://huggingface.co/lesso03/02ccbb20-14de-4cea-8d87-bdde9fb3494a, https://huggingface.co/VERSIL91/02ccbb20-14de-4cea-8d87-bdde9fb3494a, https://huggingface.co/lesso07/02ccbb20-14de-4cea-8d87-bdde9fb3494a, https://huggingface.co/bbytxt/2c5e14f5-a4b0-49f7-8bac-09832d4ef59d, https://huggingface.co/VERSIL91/2c5e14f5-a4b0-49f7-8bac-09832d4ef59d, https://huggingface.co/VERSIL91/0ee17e99-e4db-4fa3-abd0-9bed08cd6392, https://huggingface.co/tuanna08go/f7066ca4-1bb2-46f2-aad4-fa115d7cc993, https://huggingface.co/0x1202/6dbd4980-5f6b-4f7c-b4a4-040a5358306f, https://huggingface.co/lesso01/6dbd4980-5f6b-4f7c-b4a4-040a5358306f, https://huggingface.co/denbeo/a5ae63ec-bdaa-4674-bc87-01bae2d56132, https://huggingface.co/VERSIL91/6dbd4980-5f6b-4f7c-b4a4-040a5358306f, https://huggingface.co/VERSIL91/a5ae63ec-bdaa-4674-bc87-01bae2d56132, https://huggingface.co/lesso03/fbae26a7-2096-4666-b7e0-58649d86ecc1, https://huggingface.co/thaffggg/fbae26a7-2096-4666-b7e0-58649d86ecc1, https://huggingface.co/dimasik2987/fbae26a7-2096-4666-b7e0-58649d86ecc1, https://huggingface.co/lesso08/fbae26a7-2096-4666-b7e0-58649d86ecc1, https://huggingface.co/lesso05/193ed95a-68b8-479b-8d11-d1b09aa7b1b1, https://huggingface.co/lesso04/193ed95a-68b8-479b-8d11-d1b09aa7b1b1, https://huggingface.co/nblinh63/b8b3afcd-1b38-43a3-9aea-c29b36b66cdd, https://huggingface.co/lesso11/b8b3afcd-1b38-43a3-9aea-c29b36b66cdd, https://huggingface.co/nttx/5b09106a-7f1d-4a11-8394-e6fda774da7f, https://huggingface.co/fedovtt/5b09106a-7f1d-4a11-8394-e6fda774da7f, https://huggingface.co/tuanna08go/5b09106a-7f1d-4a11-8394-e6fda774da7f, https://huggingface.co/dzanbek/416b21ba-d246-4c0a-b2da-b447999ca4e7, https://huggingface.co/lesso02/416b21ba-d246-4c0a-b2da-b447999ca4e7, https://huggingface.co/tuanna08go/416b21ba-d246-4c0a-b2da-b447999ca4e7, https://huggingface.co/duyphu/5b09106a-7f1d-4a11-8394-e6fda774da7f, https://huggingface.co/lesso05/12b4dedc-4a1c-4d27-8b97-821a89efae83, https://huggingface.co/lesso02/12b4dedc-4a1c-4d27-8b97-821a89efae83, https://huggingface.co/nadejdatarabukina/ee572c31-fde4-4067-aa00-109b4aa50218, https://huggingface.co/duyphu/416b21ba-d246-4c0a-b2da-b447999ca4e7, https://huggingface.co/nttx/787423bb-04b6-43e5-9d31-8e1dd89bc318, https://huggingface.co/lesso07/3d78b842-f5e0-4e9d-ac3c-ba768422ce46, https://huggingface.co/chauhoang/787423bb-04b6-43e5-9d31-8e1dd89bc318, https://huggingface.co/duyphu/787423bb-04b6-43e5-9d31-8e1dd89bc318, https://huggingface.co/chauhoang/3d78b842-f5e0-4e9d-ac3c-ba768422ce46, https://huggingface.co/cunghoctienganh/e9d8889f-f6d7-4bbd-bb88-af9b095914a4, https://huggingface.co/nhung03/9090231f-fde2-4443-9c5f-27c54a06b688, https://huggingface.co/nbninh/9090231f-fde2-4443-9c5f-27c54a06b688, https://huggingface.co/ivangrapher/7b7095fd-1252-4b40-8fa6-a7f023713074, https://huggingface.co/bbytxt/1e2b72af-4c8b-4baf-8a1f-ba4c3e68a5bc, https://huggingface.co/fedovtt/1e2b72af-4c8b-4baf-8a1f-ba4c3e68a5bc, https://huggingface.co/dimasik87/1e2b72af-4c8b-4baf-8a1f-ba4c3e68a5bc, https://huggingface.co/phungkhaccuong/d2c9ea4c-68fd-29ea-df17-087dc9949829, https://huggingface.co/phungkhaccuong/bcdb26b0-c59c-b2ea-fb2b-fdce9a80a9e1, https://huggingface.co/vertings6/fcdcc8a8-a7a1-41d5-96cb-d44fefa77a56, https://huggingface.co/dimasik1987/9fd6709d-ea4c-4cc0-9ab6-17d871e23dca, https://huggingface.co/Best000/572e0dc5-1b90-4868-978c-e655b49efcac, https://huggingface.co/chauhoang/af0a794e-fd5d-d6ce-b438-59813be1612b, https://huggingface.co/vertings6/f10e8f07-5154-445a-b681-bea1731548e8, https://huggingface.co/sergioalves/2bac89da-7f46-450a-9b4b-67ffb98634da, https://huggingface.co/lesso03/e6661d20-0980-49c7-b4b8-433a74777501, https://huggingface.co/marialvsantiago/22b56d1e-1ec2-42c4-89e6-d71511307bb5, https://huggingface.co/havinash-ai/aee291b5-26fd-4eec-81a4-46c9cf495b8e, https://huggingface.co/VERSIL91/6642efce-554c-4b67-99b4-70866b7afee7, https://huggingface.co/thalllsssss/945cd25a-7cc3-404a-9cb5-ef9051635a17, https://huggingface.co/ClarenceDan/ef0c6b5f-0625-4367-8462-0502444cb58e, https://huggingface.co/chauhoang/d0919cb7-fe0c-4cca-6f6a-9ece51e590fa, https://huggingface.co/mamung/0d86ebab-9e6e-43dc-81a3-109976fe2611, https://huggingface.co/lesso08/c76c4e64-725f-470c-8133-1b1a456a3ed0, https://huggingface.co/filipesantoscv11/d540b3ce-ba28-4d1e-8b4c-c012d60079ba, https://huggingface.co/nhung03/bcb4c2d3-4f51-4d48-8b5e-5074dba38275, https://huggingface.co/dimasik1987/3ff3c859-7246-46bf-a855-0330fc38c0bf, https://huggingface.co/thalllsssss/84da3cdd-4475-4eeb-bb45-95cf67a24f3a, https://huggingface.co/duyphu/19948140-836d-def8-658f-3a493c042d53, https://huggingface.co/rak-r05/7abdf163-ccda-45f6-86af-79ad12206db7, https://huggingface.co/nhung03/970b720f-d173-4b72-9472-c6b04b7ae447, https://huggingface.co/lesso07/fc3ea227-0278-4de2-8038-79d0a6e9e397, https://huggingface.co/demohong/b5bc7373-16e8-43e3-a099-5e669229b29e, https://huggingface.co/lesso01/59391ff6-95bc-48f4-9a97-70dc84ba9e3c, https://huggingface.co/lesso08/99dba256-d8f4-4a91-b9bd-fdfd37ace32d, https://huggingface.co/duyphu/9a1ba25f-3afd-402a-863b-0618e6a1501d, https://huggingface.co/prxy5607/824bc767-af18-4035-8c30-c18c6e80b705, https://huggingface.co/0x1202/9acd4452-4a5c-442d-8332-8d20e0fe1c71, https://huggingface.co/aleegis09/f456b81e-3091-4352-bd72-94d5b6736110, https://huggingface.co/mrHunghddddd/b81725a8-614f-46cf-9331-638ca1cc5ce2, https://huggingface.co/nadejdatarabukina/da231eaf-f9b5-412e-a18a-f383b02718b8, https://huggingface.co/joboffer/64a5119a-d917-4633-ac1d-7ef5bb5f8c9c, https://huggingface.co/mamung/ed69a0b2-6ac6-49f4-8084-e439c97e1710, https://huggingface.co/nhoxinh/208fe935-2400-4d50-8093-4dc32602519d, https://huggingface.co/adammandic87/d3bc7513-0890-491a-b5eb-cc20dad81ba2, https://huggingface.co/vmpsergio/a9e43dbb-d0b8-466d-9b72-69749186ba92, https://huggingface.co/chauhoang/76f5467b-81f7-4d3b-95e3-ced0c6ad7a3c, https://huggingface.co/bbytxt/e1a00640-d674-4af0-8b23-23b73d18cddf, https://huggingface.co/nhung03/dc9ea4e8-ad36-4796-96c3-a669bb20d00d, https://huggingface.co/cunghoctienganh/7b3c78fa-7bb9-40b4-8778-d017f0da1081, https://huggingface.co/thangla01/a66321e6-085f-44a8-8497-82f3703da0e6, https://huggingface.co/nblinh63/a2f4fe2c-d90d-45fa-8774-15880112dc23, https://huggingface.co/cvoffer/e15c09ac-c714-4ee0-b7cf-115ea8acd536, https://huggingface.co/tarabukinivan/074915d1-f403-46ce-92cd-cfb4aa4eb495, https://huggingface.co/ClarenceDan/5750cb9c-1aaa-499e-bbc4-760588ed2897, https://huggingface.co/ajtaltarabukin2022/85aeddcc-7883-4e75-a0c1-deeaac3027a0, https://huggingface.co/nhoxinh/2230a6a7-f3a8-4fe2-a52c-2f98f8be87de, https://huggingface.co/aleegis09/ea7cc5f6-dd49-43c2-9aee-75019b7a63c2, https://huggingface.co/bbytxt/b47e360a-7412-494b-b179-20a0147599dd, https://huggingface.co/tuanna08go/accc9330-d07e-4a8b-9849-05b6e245cd16, https://huggingface.co/cunghoctienganh/ce15a04e-03cb-49ca-bf7d-e008fc96bd44, https://huggingface.co/thaffggg/4f55e6b6-2071-4deb-8f15-40d17bfb72a3, https://huggingface.co/hongngo/203124a9-51be-4014-8087-b497839f4a39, https://huggingface.co/laquythang/3f23f1ba-dc7b-473c-a6bd-e7f190fad174, https://huggingface.co/lesso01/e0f28105-9689-419f-987a-eefcb9069953, https://huggingface.co/nhunglaaaaaaa/04349e0e-e181-4b94-8a17-c541d110549f, https://huggingface.co/marialvsantiago/fe3324a8-27cb-458e-962c-ce0b5d0bffac, https://huggingface.co/mrHunghddddd/54d64f67-23a6-4ff2-ab78-cf94bb94f483, https://huggingface.co/nhung03/0b4c9ab2-45ac-4e39-90ef-4d13697b831f, https://huggingface.co/nhung01/3972e51c-82a6-4013-a10d-0d2bca53ec0f, https://huggingface.co/nhoxinh/f027fc7e-2e44-4db6-a8a8-23c0b16a1cfe, https://huggingface.co/aleegis09/014d3986-3552-41d2-8876-17aee7a31b53, https://huggingface.co/tuanna08go/5dd3864e-1aac-4a59-8570-4dbb6467fc1b, https://huggingface.co/mrHunghddddd/dda115f8-61b9-4fbf-8fb0-6e1d38238076, https://huggingface.co/denbeo/56e19c0a-2181-4f50-a3ce-208355bbb729, https://huggingface.co/lesso06/0a40280c-6eaf-4c26-b9d2-71e077ad6668, https://huggingface.co/lesso08/3a743a86-993d-4172-926a-2902e4c14428, https://huggingface.co/ClarenceDan/47f7f978-6753-4d48-86e0-429a9bfc6f8f, https://huggingface.co/prxy5606/f9b08f8b-b314-479d-950b-892276a02946, https://huggingface.co/demohong/7e4d75d3-2173-4e59-bcf3-5b90428e0987, https://huggingface.co/thangla01/3c9d9826-b369-445b-b591-1f679f03bad3, https://huggingface.co/datlaaaaaaa/cb644934-2259-4311-b026-f8dff912050a, https://huggingface.co/marialvsantiago/5105c92a-0c4d-479f-bbaf-f8a0f564cd20, https://huggingface.co/joboffer/7bf08e59-ebe1-4066-a758-409c1318454f, https://huggingface.co/lhong4759/65e9c4cf-663b-4466-adc6-7a52d231d4cd, https://huggingface.co/kk-aivio/2ae6a5a4-e037-451c-8040-05a9185e8aa9, https://huggingface.co/great0001/55b1310f-5779-488a-88af-3fdd0e2f6eda, https://huggingface.co/vertings6/837e2b08-eaee-4560-a3f6-afcf187db53c, https://huggingface.co/fedovtt/52d5f091-5ce6-46be-b808-14007ca8e370, https://huggingface.co/aleegis10/1e204fc1-3127-45a1-96a4-911204855c44, https://huggingface.co/0x1202/48e60b09-5193-4d48-b36f-01c5f377f815, https://huggingface.co/prxy5605/718defe4-e93b-4fb0-931c-eab06ee1d34d, https://huggingface.co/nhunglaaaaaaa/5456a706-b062-4789-b888-75e861b2b990, https://huggingface.co/lesso05/139d1902-b751-4aad-9ae7-a69411bce92a, https://huggingface.co/nadejdatarabukina/bb96903f-f09d-4bf2-bc75-f6fc0f43d503, https://huggingface.co/eddysang/4e562d4b-5820-45c7-8b04-a951d371aeaa, https://huggingface.co/tuanna08go/c2d239b9-a981-4b10-8595-48e2ffeb6b59, https://huggingface.co/prxy5608/e88b36c9-0be9-47c0-93cc-74c638eccc13, https://huggingface.co/kokovova/f0cd1f36-c1ba-4d08-b42a-4fd2ef34a038, https://huggingface.co/cunghoctienganh/0c33942c-0ea9-45b8-8573-e4c1448ccd7c, https://huggingface.co/trangtrannnnn/a1dd0b66-642d-4edb-8e2b-e64357bc35ce, https://huggingface.co/lesso09/d057a9f9-1f4f-444c-a835-0d6d1772459a, https://huggingface.co/nhoxinh/6ce28499-06a3-4928-b794-8803e241107b, https://huggingface.co/lesso02/cab029de-09a5-432f-a78b-1f1478289fde, https://huggingface.co/denbeo/f8c15019-f5a7-4498-979d-74c46c2eeb96, https://huggingface.co/nbninh/9227fbdd-064f-41b6-a5b8-2e277ff1221b, https://huggingface.co/lesso04/b26df9a3-7bb0-4a30-a496-02eeb09879f6, https://huggingface.co/duyphu/9a9cd471-62a0-4a94-88ec-dfbe0ddc44ff, https://huggingface.co/gavrilstep/85aa9e5d-cbb6-4fe6-9e71-d69f333295b0, https://huggingface.co/nblinh63/f2a017a9-3a1f-4a97-a88f-120de2696d37, https://huggingface.co/lesso11/d789c124-0bbb-487c-a4f9-cc60eb8aaa5a, https://huggingface.co/dimasik2987/1ea1c90d-68d7-41ce-adaf-b5eee7086f52, https://huggingface.co/nhung01/aa1226c4-965c-47d1-8efe-094397731410, https://huggingface.co/thaffggg/5ad7a051-4b92-43f3-9d6a-9bc1eee421ff, https://huggingface.co/maksf8486/8f585d98-e7c9-4d6c-8a6a-230223943dd8, https://huggingface.co/datlaaaaaaa/8549e9bb-5f3d-473d-9fb6-03c7f619d65d, https://huggingface.co/lesso03/ea8357ac-a1d3-4d0e-a46c-738185c04333, https://huggingface.co/nhoxinh/e92fb9de-3da1-4075-82b4-dfb5967481ae, https://huggingface.co/nhung02/67256797-fd7c-4eda-8297-d5e038b350ea, https://huggingface.co/havinash-ai/889df6d9-9a65-4e5a-a55d-6ec730f045a2, https://huggingface.co/havinash-ai/a6000eb5-e9e6-441a-b977-7046dcf87137, https://huggingface.co/lesso14/aea177ff-4791-47f0-8feb-22f77bb019be, https://huggingface.co/adammandic87/3e819179-30b0-4736-b569-c1356f476a5e, https://huggingface.co/nttx/fa65660a-39da-4a3f-accb-145b18a6a40b, https://huggingface.co/bbytxt/00fda684-c8e9-47a2-9809-5d7ce8584107, https://huggingface.co/kokovova/3367a419-e8c6-4fae-a092-b8385c05e3ee, https://huggingface.co/denbeo/d87ef830-2ae5-46dc-a05e-8702014443e0, https://huggingface.co/mrHungddddh/9867b3ab-cdba-421a-b905-da1bf47cf590, https://huggingface.co/nhung02/6c0ff621-9fb4-4b3f-a3f8-171c02bdc0a4, https://huggingface.co/mrhunghd/5bacb922-92c2-4b22-b486-6df725d290f3, https://huggingface.co/nblinh/3c2fa23e-66df-468c-950e-9df52a4e744e, https://huggingface.co/nbninh/1173195c-ea25-47bf-a267-2dea56cadcc4, https://huggingface.co/infogep/f870efc3-f7b9-43af-bc07-11e579c6d9a4, https://huggingface.co/Best000/de562928-38b9-4acf-ac6d-2ed00379ea9d, https://huggingface.co/maksf8486/33c6c053-b244-4aa7-85d1-345c6e563c28, https://huggingface.co/marialvsantiago/3a3d2f43-8168-48e7-9df9-758a4d41f62f, https://huggingface.co/thangla01/2304e726-afc4-40fa-a394-472ecdeff03d, https://huggingface.co/lesso10/e0ecb62a-668d-4267-a0f8-fe2815a47778, https://huggingface.co/joboffer/f0f9f2db-331c-4211-b6d4-ba9834a65cd9, https://huggingface.co/ClarenceDan/306a2e73-98f3-4a28-ad89-40c7c5d88a3c, https://huggingface.co/kokovova/601df53d-50aa-4658-834c-29f049b7e311, https://huggingface.co/mrhunghd/6b9a230c-1aab-4787-94b8-15065cf73516, https://huggingface.co/nhung02/2dc4cefc-4929-4ed5-816e-a2789444ca1a, https://huggingface.co/adammandic87/99f79006-205e-4678-8f9c-0eeefb56120a, https://huggingface.co/sniperfix/30ad6502-69f7-4c04-af01-59b0a99a7e28, https://huggingface.co/myhaaaaaaa/6d2213d0-ac4d-49b0-8187-bcaafadde42d, https://huggingface.co/nhung01/d20f2f86-0caa-4523-a048-4275e8c63183, https://huggingface.co/nbninh/2c90dd11-bf1e-4d25-810c-42d8f9c7083e, https://huggingface.co/laquythang/bb3a25ee-4803-41bb-8c4a-80c8af4c1b84, https://huggingface.co/great0001/403016e5-2ed5-4bf4-8b43-c76efb0de2ab, https://huggingface.co/gavrilstep/ae56e39c-538d-4b28-afe6-b40f0d556971, https://huggingface.co/kk-aivio/82f19e13-556f-4273-ac60-954c0c1d6cf9, https://huggingface.co/chauhoang/a29af960-ee69-493e-9305-ae86eede4a42, https://huggingface.co/havinash-ai/aa96bae1-86c5-49d1-ba14-c7b7378b544c, https://huggingface.co/prxy5605/86b26149-8035-4168-8bb4-428bf867e6bd, https://huggingface.co/minhnguyennnnnn/99fa811f-f99f-42b8-b604-f450c039d144, https://huggingface.co/nhung03/ce4d7870-7616-4a4f-b5c7-bff24a04b468, https://huggingface.co/minhnguyennnnnn/8be4f9bf-1302-4968-b73a-d6d56383c845, https://huggingface.co/lesso05/61c0a917-b689-4fac-8975-acbf2aff1de2, https://huggingface.co/lesso18/ad03c4c5-1964-480a-b434-1758b7e5368b, https://huggingface.co/lesso11/f3c8eee8-5f0c-4a60-bcc9-e743236af4ed, https://huggingface.co/nhungphammmmm/3b6b258a-4597-4f15-aa63-059dc8b8e1ef, https://huggingface.co/datlaaaaaaa/f1cbdd12-94e6-4f8d-9428-dc550a8eb4c2, https://huggingface.co/nhunglaaaaaaa/28b23bc3-0661-4f1b-b2cd-6978f23562b6, https://huggingface.co/myhaaaaaaa/d842e6c0-6052-43b4-8f33-ae95419578ee, https://huggingface.co/robiulawaldev/110df085-588f-4c70-b01d-1111475281a6, https://huggingface.co/ancient41/0ae287c6-1dd6-496d-a8ee-5cb0a1bc51d9, https://huggingface.co/lesso14/5e6f72a3-26d6-48af-9d08-4be21ae5bcb7, https://huggingface.co/baby-dev/3478bc1f-4118-440c-b290-a66214080d1d, https://huggingface.co/nhung03/19632a98-4a51-4377-8e36-431f3861fb5d, https://huggingface.co/aseratus1/9a2ea7bb-3ed0-4faa-b5c7-0a94fa37c3e0, https://huggingface.co/lesso/78124d06-1624-47a9-8321-1a13c042ecc2, https://huggingface.co/beast33/84e574ba-89b8-45d9-925c-5e3902aaecd5, https://huggingface.co/alchemist69/ab9b4d6c-416b-4eac-8d54-d042fc62d17c, https://huggingface.co/alchemist69/4574a479-3fac-4b0f-a0ce-bd94b4ee0434, https://huggingface.co/arcwarden46/296b886b-b3f2-4a83-8f1a-6f29cf20b1d9, https://huggingface.co/arcwarden46/b133f290-e79c-4f58-9822-f1c9c3d5fc09, https://huggingface.co/nttx/350c63d0-889c-48d8-9a11-5808126fba4f, https://huggingface.co/lesso/2f68269d-9952-480e-b790-b4beb12521fd, https://huggingface.co/datlaaaaaaa/2ba58984-8aa6-4044-b446-1d7232524fc7, https://huggingface.co/robiual-awal/327ff058-380d-4474-ba6c-089da64316b3, https://huggingface.co/lesso/e3a25fd5-771a-4012-b7e1-36d59cb04153, https://huggingface.co/nttx/2bf28ba6-10f9-420c-8dd4-8c83ba6bb537, https://huggingface.co/nhung03/bc94c880-76f5-4ec7-9f02-489ad380346a, https://huggingface.co/nblinh/785ec37f-59ad-46ee-982c-2d96c247d692, https://huggingface.co/laquythang/1180afb5-5c43-4636-9700-e839744dfcfc, https://huggingface.co/havinash-ai/65358531-f6a2-46f2-b5fe-9c1b727527e8, https://huggingface.co/lesso/c621868f-87aa-4ee4-931f-0e683f03161c, https://huggingface.co/ciloku/5ed31d55-8db6-45ef-925a-a9019695b13f, https://huggingface.co/adammandic87/1be6c54b-b42a-4947-97a9-96102f562ce9, https://huggingface.co/filipesantoscv11/0c668a10-b4aa-4ff9-94ff-2a2927c45fa0, https://huggingface.co/fedovtt/357bf71a-304b-44c6-b0aa-6f2e5f897fc5, https://huggingface.co/daniel40/9d82966a-7d6b-4049-9e8c-b87e50d8768c, https://huggingface.co/lesso/3b543c8e-3007-4b36-94f1-623f164afc94, https://huggingface.co/lesso03/9d74494f-9df8-425b-a6e2-250a9d10c059, https://huggingface.co/romainnn/6b437cd7-2758-4c6d-b6c1-647cb41b8440, https://huggingface.co/lesso13/c211d6d1-cfb5-4aaa-9ab1-23653eafaef1, https://huggingface.co/lesso17/4f9e41a9-9e70-4a33-bb82-766f5af766a7, https://huggingface.co/lesso02/ef32e551-f023-4993-ae3a-6ff6900c3930, https://huggingface.co/lesso06/feedec57-f067-42ef-8b03-e064389d77d4, https://huggingface.co/tuantmdev/644de173-3999-472d-adc3-1b773db75309, https://huggingface.co/lesso08/d6d72ddf-f743-4d69-9694-f6d812708ad2, https://huggingface.co/lesso03/93bcadcf-69b8-47e6-a623-6939f113fd66, https://huggingface.co/lesso17/aea6b918-362b-4b72-8888-7cd3e61ca40a, https://huggingface.co/lesso14/18ba57ad-c1f1-4eb9-8ebc-a9253d7e28d6, https://huggingface.co/lesso01/a63215fc-5fcf-400a-ad15-00b24da70f2e, https://huggingface.co/tuantmdev/e8ed3510-3de5-45a5-b3b3-a6ce06154e0b, https://huggingface.co/baby-dev/8a8719db-5751-442a-a7bc-7adc343c3525, https://huggingface.co/daniel40/dea22ed2-54ae-4395-ac92-c26c345d2e93, https://huggingface.co/daniel40/a9a55b78-3756-4d6d-aeba-45149f856a25, https://huggingface.co/tuantmdev/6860d41c-54b0-429a-b48d-4e896b5c9961, https://huggingface.co/ClarenceDan/b95d612f-e481-48c3-bd82-95bc37b28c5f, https://huggingface.co/great0001/c1237075-8fbe-450a-998e-8f03a3489451, https://huggingface.co/lesso14/0cd42900-b207-46a8-a141-5dca641f3a1e, https://huggingface.co/lesso03/8a97b40c-f552-4c25-8078-a9bae8136d59, https://huggingface.co/lesso07/7163d263-4eed-4dbc-96b1-0f36d349c9bb, https://huggingface.co/lesso11/58934fca-caea-44ce-94be-6c2e1356b241, https://huggingface.co/Romain-XV/1a5fcc3e-11dc-43f5-998d-c464a25bc43c, https://huggingface.co/ClarenceDan/94de0e57-218a-413d-a43d-0520ca5ee638, https://huggingface.co/lesso12/4ea09535-8072-4c45-bcb0-152ed56a5e41, https://huggingface.co/lesso03/c0b6d4e7-dfe3-47a0-81a3-d8400b0912ea, https://huggingface.co/fats-fme/cd827e4b-c73b-457f-9681-cedf9475525a, https://huggingface.co/Alphatao/5b15e52c-37ef-490e-9438-b0adb403aa74, https://huggingface.co/lesso06/b42e56f9-a327-482a-94a8-baa529840a01, https://huggingface.co/lesso10/a77357d3-0a32-4037-877f-ee95f4633e9a, https://huggingface.co/Alphatao/f40d88eb-c453-498b-bd33-0bdda9bdd0b9, https://huggingface.co/Alphatao/909db229-f2a4-40f3-934f-b1c727822330, https://huggingface.co/ClarenceDan/a81ca747-6ba0-4998-864f-14dc4a579de6, https://huggingface.co/lesso09/07dc4bad-9fa1-489d-a57e-b26ca162fdf9, https://huggingface.co/Alphatao/f9560213-c08b-4802-b2e8-9aa9f6064549, https://huggingface.co/lesso10/14595f4c-b4e4-43b2-8352-bbc968e6fb51, https://huggingface.co/lesso01/d8496d47-d820-4096-9467-7a8defb66512, https://huggingface.co/Alphatao/3497e3b6-7b76-48db-9418-49f92bea5054, https://huggingface.co/lesso01/6b1bfb30-7f0c-43e5-9dc7-547a07998560, https://huggingface.co/Alphatao/553557da-e328-4664-b30a-bf24d2730c36, https://huggingface.co/lesso17/ef044443-441a-4d2d-b56c-be7489fa18dc, https://huggingface.co/lesso12/08ce581c-ec16-45e7-9de2-3a0cf7a3a5bc, https://huggingface.co/lesso08/89b30d71-7104-4248-8c8a-bf8aee56bcc1, https://huggingface.co/samoline/6b2ea11d-fd01-415c-a480-ce2c5f780ecd, https://huggingface.co/lesso04/e0525b21-b978-4e94-a4cc-14e6f55d266c, https://huggingface.co/lesso15/b8f404ac-04b8-4b04-bd1e-4fc0e7dbb7f7, https://huggingface.co/lesso06/7d6b7284-f6c4-4093-8870-a8f976e01f17, https://huggingface.co/lesso10/6fa9403c-a9d5-4f35-8634-53ff14e82465, https://huggingface.co/Alphatao/7d25a2da-da3c-47ad-9fba-201617ad09d6, https://huggingface.co/Alphatao/e25ad6b0-1fe0-4d02-9af7-d5502f8b5dbb, https://huggingface.co/souging/fba4df36-5661-4b30-a3ac-038824749802, https://huggingface.co/lesso11/63e098d3-d0da-44a7-be1b-78219c7380aa, https://huggingface.co/lesso01/96172f9b-875d-4057-876e-77f4a87e80ee, https://huggingface.co/samoline/cc0abb08-0c6a-4c02-8a72-597853acb6d5, https://huggingface.co/lesso01/df7bd38d-0e78-49c1-baff-efc6f8e57f1f, https://huggingface.co/lesso17/09da2a53-55e4-44d1-a61c-1173de83e0d4, https://huggingface.co/robiulawaldev/6f49ce5f-f055-4382-aa99-f5a659479a27",329,"https://huggingface.co/GazTrab/depression_turtle, https://huggingface.co/GazTrab/depression_turtle_GGUF_q4_k_m, https://huggingface.co/solidrust/Hermes-2-Theta-Llama-3-8B-AWQ, https://huggingface.co/QuantFactory/Hermes-2-Theta-Llama-3-8B-GGUF, https://huggingface.co/legraphista/Hermes-2-Theta-Llama-3-8B-IMat-GGUF, https://huggingface.co/mlc-ai/Hermes-2-Theta-Llama-3-8B-q0f16-MLC, https://huggingface.co/mlc-ai/Hermes-2-Theta-Llama-3-8B-q3f16_1-MLC, https://huggingface.co/mlc-ai/Hermes-2-Theta-Llama-3-8B-q4f32_1-MLC, https://huggingface.co/mlc-ai/Hermes-2-Theta-Llama-3-8B-q4f16_1-MLC",9,"https://huggingface.co/saucam/Proteus-8B, https://huggingface.co/mergekit-community/TopStarClaudioArena, https://huggingface.co/Jebadiah/Tess-gradient-ruby-p2, https://huggingface.co/wwe180/Llama3-15B-lingyang-v0.1, https://huggingface.co/mergekit-community/TopEvolution16, https://huggingface.co/ClaudioItaly/TopEvolution16-Q5_K_M-GGUF, https://huggingface.co/wwe180/Llama3-15B-lingyang-v0.1-Q6_K-GGUF, https://huggingface.co/Azazelle/L3-Dumb-Lois-Griffin-8b, https://huggingface.co/Locutusque/Llama-3-Yggdrasil-2.0-8B, https://huggingface.co/bartowski/Llama-3-Yggdrasil-2.0-8B-GGUF, https://huggingface.co/bartowski/Llama-3-Yggdrasil-2.0-8B-exl2, https://huggingface.co/Azazelle/L3-Decent-Lois-Griffin-8B, https://huggingface.co/ayan-sh003/hermes-2-theta-neural-llama3, https://huggingface.co/Auber/LearnitInnov-8B-test, https://huggingface.co/DreadPoor/ONeil-model_stock-8B, https://huggingface.co/mergekit-community/mergekit-ties-qzeyxfl, https://huggingface.co/mergekit-community/mergekit-dare_ties-lryrqma, https://huggingface.co/Trisert/Llama3-8B-jakov-dare, https://huggingface.co/Bottlecap2023/my-Nous-Merge, https://huggingface.co/QuantFactory/Llama-3-Yggdrasil-2.0-8B-GGUF",20,"Aston-xMAD/1bit_llama3_instruct_xmad_chatbot, Darok/Featherless-Feud, KBaba7/llama.cpp, SC999/NV_Nemotron, Steven10429/apply_lora_and_quantize, WildEval/ZebraLogic, allenai/WildBench, allenai/ZebraLogic, eduagarcia/open_pt_llm_leaderboard, emekaboris/try-this-model, featherless-ai/try-this-model, huggingface/InferenceSupport/discussions/new?title=NousResearch/Hermes-2-Theta-Llama-3-8B&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BNousResearch%2FHermes-2-Theta-Llama-3-8B%5D(%2FNousResearch%2FHermes-2-Theta-Llama-3-8B)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, xmadai/1bit_llama3_instruct_xmad_chatbot",13
         
     | 
| 333 | 
         
            +
            texanrangee/2eadce08-9a7f-4da8-bd01-2625325f3744,"---
         
     | 
| 334 | 
         
            +
            base_model: NousResearch/Hermes-2-Theta-Llama-3-8B
         
     | 
| 335 | 
         
            +
            tags:
         
     | 
| 336 | 
         
            +
            - text-generation-inference
         
     | 
| 337 | 
         
            +
            - transformers
         
     | 
| 338 | 
         
            +
            - unsloth
         
     | 
| 339 | 
         
            +
            - llama
         
     | 
| 340 | 
         
            +
            - trl
         
     | 
| 341 | 
         
            +
            license: apache-2.0
         
     | 
| 342 | 
         
            +
            language:
         
     | 
| 343 | 
         
            +
            - en
         
     | 
| 344 | 
         
            +
            ---
         
     | 
| 345 | 
         
            +
             
     | 
| 346 | 
         
            +
            # Uploaded  model
         
     | 
| 347 | 
         
            +
             
     | 
| 348 | 
         
            +
            - **Developed by:** texanrangee
         
     | 
| 349 | 
         
            +
            - **License:** apache-2.0
         
     | 
| 350 | 
         
            +
            - **Finetuned from model :** NousResearch/Hermes-2-Theta-Llama-3-8B
         
     | 
| 351 | 
         
            +
             
     | 
| 352 | 
         
            +
            This llama model was trained 2x faster with [Unsloth](https://github.com/unslothai/unsloth) and Huggingface's TRL library.
         
     | 
| 353 | 
         
            +
             
     | 
| 354 | 
         
            +
            [<img src=""https://raw.githubusercontent.com/unslothai/unsloth/main/images/unsloth%20made%20with%20love.png"" width=""200""/>](https://github.com/unslothai/unsloth)
         
     | 
| 355 | 
         
            +
            ","{""id"": ""texanrangee/2eadce08-9a7f-4da8-bd01-2625325f3744"", ""author"": ""texanrangee"", ""sha"": ""4c35a045955b534a54c27bd86464f75da06f163b"", ""last_modified"": ""2025-03-23 08:32:18+00:00"", ""created_at"": ""2025-03-23 08:31:11+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""text-generation-inference"", ""unsloth"", ""llama"", ""trl"", ""en"", ""base_model:NousResearch/Hermes-2-Theta-Llama-3-8B"", ""base_model:finetune:NousResearch/Hermes-2-Theta-Llama-3-8B"", ""license:apache-2.0"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": null, ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: NousResearch/Hermes-2-Theta-Llama-3-8B\nlanguage:\n- en\nlicense: apache-2.0\ntags:\n- text-generation-inference\n- transformers\n- unsloth\n- llama\n- trl"", ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": {""auto_model"": ""AutoModel"", ""custom_class"": null, ""pipeline_tag"": null, ""processor"": null}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='adapter_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='adapter_model.safetensors', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-03-23 08:32:18+00:00"", ""cardData"": ""base_model: NousResearch/Hermes-2-Theta-Llama-3-8B\nlanguage:\n- en\nlicense: apache-2.0\ntags:\n- text-generation-inference\n- transformers\n- unsloth\n- llama\n- trl"", ""transformersInfo"": {""auto_model"": ""AutoModel"", ""custom_class"": null, ""pipeline_tag"": null, ""processor"": null}, ""_id"": ""67dfc6cf729fffffd8993567"", ""modelId"": ""texanrangee/2eadce08-9a7f-4da8-bd01-2625325f3744"", ""usedStorage"": 1721822432}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=texanrangee/2eadce08-9a7f-4da8-bd01-2625325f3744&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Btexanrangee%2F2eadce08-9a7f-4da8-bd01-2625325f3744%5D(%2Ftexanrangee%2F2eadce08-9a7f-4da8-bd01-2625325f3744)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
         
     | 
    	
        Higgs-Llama-3-70B_finetunes_20250426_171734.csv_finetunes_20250426_171734.csv
    ADDED
    
    | 
         @@ -0,0 +1,208 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
         
     | 
| 2 | 
         
            +
            bosonai/Higgs-Llama-3-70B,"---
         
     | 
| 3 | 
         
            +
            license: other
         
     | 
| 4 | 
         
            +
            base_model: meta-llama/Meta-Llama-3-70B
         
     | 
| 5 | 
         
            +
            ---
         
     | 
| 6 | 
         
            +
            # Higgs-Llama-3-70B
         
     | 
| 7 | 
         
            +
             
     | 
| 8 | 
         
            +
            Higgs-Llama-3-70B is post-trained from [meta-llama/Meta-Llama-3-70B](https://huggingface.co/meta-llama/Meta-Llama-3-70B), specially tuned for role-playing while being competitive in general-domain instruction-following and reasoning.
         
     | 
| 9 | 
         
            +
             
     | 
| 10 | 
         
            +
            We perform supervised fine-tuning with our in-house instruction-following and chat datasets. Afterwards, we construct preference pairs with a semi-automated pipeline that relies on both human-labelers and our private LLMs.
         
     | 
| 11 | 
         
            +
            We conduct iterative preference optimization to align the model. During alignment, we adopted a special strategy to align the model’s behavior with the system message.
         
     | 
| 12 | 
         
            +
            Compared with other instruct models, Higgs models follow their roles more closely.
         
     | 
| 13 | 
         
            +
             
     | 
| 14 | 
         
            +
            See our [release blog](https://boson.ai/higgs-opensource/).
         
     | 
| 15 | 
         
            +
             
     | 
| 16 | 
         
            +
            ## Evaluation
         
     | 
| 17 | 
         
            +
             
     | 
| 18 | 
         
            +
            All benchmarks lead to eventual overfitting, including those for LLMs. Training on data, particularly beneficial for benchmarks typically does not improve (or even worsen) role-playing performance. We worked to exclude benchmark data, including their training examples, from our fine-tuning data.
         
     | 
| 19 | 
         
            +
             
     | 
| 20 | 
         
            +
            We highlight our results on two new and challenging benchmarks: [MMLU-Pro](https://huggingface.co/datasets/TIGER-Lab/MMLU-Pro) and [Arena-Hard](https://github.com/lm-sys/arena-hard-auto). MMLU-Pro extends the popular MMLU benchmark. We believe that it suffers from less overfitting by other released models as well, as it was released only recently (it was released after our models finished training).
         
     | 
| 21 | 
         
            +
             
     | 
| 22 | 
         
            +
            ### MMLU-Pro
         
     | 
| 23 | 
         
            +
             
     | 
| 24 | 
         
            +
            <table class=""col-12 col-md-6"" width=""100px"">
         
     | 
| 25 | 
         
            +
              <tr>
         
     | 
| 26 | 
         
            +
                <td><b>Model</b></td>
         
     | 
| 27 | 
         
            +
                <td><b>MMLU-Pro</b></td>
         
     | 
| 28 | 
         
            +
              </tr>
         
     | 
| 29 | 
         
            +
              <tr>
         
     | 
| 30 | 
         
            +
                <td>GPT-4o</td>
         
     | 
| 31 | 
         
            +
                <td>72.6</td>
         
     | 
| 32 | 
         
            +
              </tr>
         
     | 
| 33 | 
         
            +
              <tr>
         
     | 
| 34 | 
         
            +
                <td>Gemini-1.5-Pro</td>
         
     | 
| 35 | 
         
            +
                <td>69.0</td>
         
     | 
| 36 | 
         
            +
              </tr>
         
     | 
| 37 | 
         
            +
              <tr>
         
     | 
| 38 | 
         
            +
                <td>Claude-3-Opus</td>
         
     | 
| 39 | 
         
            +
                <td>68.5</td>
         
     | 
| 40 | 
         
            +
              </tr>
         
     | 
| 41 | 
         
            +
              <tr>
         
     | 
| 42 | 
         
            +
                <td>GPT-4-Turbo</td>
         
     | 
| 43 | 
         
            +
                <td>63.7</td>
         
     | 
| 44 | 
         
            +
              </tr>
         
     | 
| 45 | 
         
            +
              <tr style=""font-weight: bold"">
         
     | 
| 46 | 
         
            +
                <td>Higgs-Llama-3-70B</td>
         
     | 
| 47 | 
         
            +
                <td>63.2</td>
         
     | 
| 48 | 
         
            +
              </tr>
         
     | 
| 49 | 
         
            +
              <tr>
         
     | 
| 50 | 
         
            +
                <td>Gemini-1.5-Flash</td>
         
     | 
| 51 | 
         
            +
                <td>59.1</td>
         
     | 
| 52 | 
         
            +
              </tr>
         
     | 
| 53 | 
         
            +
              <tr>
         
     | 
| 54 | 
         
            +
                <td>Claude-3-Sonnet</td>
         
     | 
| 55 | 
         
            +
                <td>56.8</td>
         
     | 
| 56 | 
         
            +
              </tr>
         
     | 
| 57 | 
         
            +
              <tr>
         
     | 
| 58 | 
         
            +
                <td>Llama-3-70B-Instruct</td>
         
     | 
| 59 | 
         
            +
                <td>56.2</td>
         
     | 
| 60 | 
         
            +
              </tr>
         
     | 
| 61 | 
         
            +
            </table>
         
     | 
| 62 | 
         
            +
             
     | 
| 63 | 
         
            +
             
     | 
| 64 | 
         
            +
            ### Arena-Hard
         
     | 
| 65 | 
         
            +
             
     | 
| 66 | 
         
            +
            <table class=""col-12 col-md-6"">
         
     | 
| 67 | 
         
            +
              <tr>
         
     | 
| 68 | 
         
            +
                <td><b>Model</b></td>
         
     | 
| 69 | 
         
            +
                <td><b>Arena-Hard</b></td>
         
     | 
| 70 | 
         
            +
              </tr>
         
     | 
| 71 | 
         
            +
              <tr>
         
     | 
| 72 | 
         
            +
                <td>GPT-4o</td>
         
     | 
| 73 | 
         
            +
                <td>79.5</td>
         
     | 
| 74 | 
         
            +
              </tr>
         
     | 
| 75 | 
         
            +
              <tr>
         
     | 
| 76 | 
         
            +
                <td>Gemini-1.5-Pro</td>
         
     | 
| 77 | 
         
            +
                <td>72.0</td>
         
     | 
| 78 | 
         
            +
              </tr>
         
     | 
| 79 | 
         
            +
              <tr>
         
     | 
| 80 | 
         
            +
                <td>Claude-3-Opus</td>
         
     | 
| 81 | 
         
            +
                <td>60.4</td>
         
     | 
| 82 | 
         
            +
              </tr>
         
     | 
| 83 | 
         
            +
              <tr style=""font-weight: bold"">
         
     | 
| 84 | 
         
            +
                <td>Higgs-Llama-3-70B</td>
         
     | 
| 85 | 
         
            +
                <td>49.6</td>
         
     | 
| 86 | 
         
            +
              </tr>
         
     | 
| 87 | 
         
            +
              <tr>
         
     | 
| 88 | 
         
            +
                <td>Gemini-1.5-Flash</td>
         
     | 
| 89 | 
         
            +
                <td>49.6</td>
         
     | 
| 90 | 
         
            +
              </tr>
         
     | 
| 91 | 
         
            +
              <tr>
         
     | 
| 92 | 
         
            +
                <td>Claude-3-Sonnet</td>
         
     | 
| 93 | 
         
            +
                <td>46.8</td>
         
     | 
| 94 | 
         
            +
              </tr>
         
     | 
| 95 | 
         
            +
              <tr>
         
     | 
| 96 | 
         
            +
                <td>Claude-3-Haiku</td>
         
     | 
| 97 | 
         
            +
                <td>41.5</td>
         
     | 
| 98 | 
         
            +
              </tr>
         
     | 
| 99 | 
         
            +
              <tr>
         
     | 
| 100 | 
         
            +
                <td>Llama-3-70B-Instruct</td>
         
     | 
| 101 | 
         
            +
                <td>41.1</td>
         
     | 
| 102 | 
         
            +
              </tr>
         
     | 
| 103 | 
         
            +
              <tr>
         
     | 
| 104 | 
         
            +
                <td>GPT-4-0613</td>
         
     | 
| 105 | 
         
            +
                <td>37.9</td>
         
     | 
| 106 | 
         
            +
              </tr>
         
     | 
| 107 | 
         
            +
              <tr>
         
     | 
| 108 | 
         
            +
                <td>Mistral-Large</td>
         
     | 
| 109 | 
         
            +
                <td>37.7</td>
         
     | 
| 110 | 
         
            +
              </tr>
         
     | 
| 111 | 
         
            +
            </table>
         
     | 
| 112 | 
         
            +
             
     | 
| 113 | 
         
            +
            ## Overall Results
         
     | 
| 114 | 
         
            +
             
     | 
| 115 | 
         
            +
            In the following, we compare our model's performance with `gpt-4o` and `Llama-3-70B-Instruct` on [MMLU-Pro](https://github.com/TIGER-AI-Lab/MMLU-Pro), [Arena-Hard](https://github.com/lm-sys/arena-hard-auto/tree/main), [AlpacaEval 2.0 LC](https://github.com/tatsu-lab/alpaca_eval), MMLU, GPQA and DROP. For MMLU, GPQA and DROP, we adopt [openai/simple-evals](https://github.com/openai/simple-evals) for evaluation. For the other benchmarks, we evaluate via the official implementation.
         
     | 
| 116 | 
         
            +
             
     | 
| 117 | 
         
            +
            <div style=""overflow: auto"">
         
     | 
| 118 | 
         
            +
              <table>
         
     | 
| 119 | 
         
            +
                <tr>
         
     | 
| 120 | 
         
            +
                  <th></th>
         
     | 
| 121 | 
         
            +
                  <td><b>MMLU-Pro</td>
         
     | 
| 122 | 
         
            +
                  <td><b>Arena-Hard</td>
         
     | 
| 123 | 
         
            +
                  <td><b>AlpacaEval <br> 2.0 LC</b></td>
         
     | 
| 124 | 
         
            +
                  <td><b>MMLU</b></td>
         
     | 
| 125 | 
         
            +
                  <td><b>GPQA</b></td>
         
     | 
| 126 | 
         
            +
                  <td><b>DROP <br> (F1,3-shot)</b></td>
         
     | 
| 127 | 
         
            +
                </tr>
         
     | 
| 128 | 
         
            +
                <tr>
         
     | 
| 129 | 
         
            +
                  <td>GPT-4o</td>
         
     | 
| 130 | 
         
            +
                  <td>72.6</td>
         
     | 
| 131 | 
         
            +
                  <td>79.5*</td>
         
     | 
| 132 | 
         
            +
                  <td>57.5</td>
         
     | 
| 133 | 
         
            +
                  <td>87.2</td>
         
     | 
| 134 | 
         
            +
                  <td>49.9</td>
         
     | 
| 135 | 
         
            +
                  <td>83.7</td>
         
     | 
| 136 | 
         
            +
                </tr>
         
     | 
| 137 | 
         
            +
                <tr style=""font-weight: bold"">
         
     | 
| 138 | 
         
            +
                  <td>Higgs-Llama-3-70B</td>
         
     | 
| 139 | 
         
            +
                  <td>63.2</td>
         
     | 
| 140 | 
         
            +
                  <td>49.6</td>
         
     | 
| 141 | 
         
            +
                  <td>38.6</td>
         
     | 
| 142 | 
         
            +
                  <td>80.8</td>
         
     | 
| 143 | 
         
            +
                  <td>42.1</td>
         
     | 
| 144 | 
         
            +
                  <td>81.6</td>
         
     | 
| 145 | 
         
            +
                </tr>
         
     | 
| 146 | 
         
            +
                <tr>
         
     | 
| 147 | 
         
            +
                  <td>Llama-3-70B-Instruct*</td>
         
     | 
| 148 | 
         
            +
                  <td>56.2</td>
         
     | 
| 149 | 
         
            +
                  <td>41.1</td>
         
     | 
| 150 | 
         
            +
                  <td>34.4</td>
         
     | 
| 151 | 
         
            +
                  <td>80.2</td>
         
     | 
| 152 | 
         
            +
                  <td>41.3</td>
         
     | 
| 153 | 
         
            +
                  <td>81.4</td>
         
     | 
| 154 | 
         
            +
                </tr>
         
     | 
| 155 | 
         
            +
              </table>
         
     | 
| 156 | 
         
            +
            </div>
         
     | 
| 157 | 
         
            +
             
     | 
| 158 | 
         
            +
            <small>*For Llama-3-70B-Instruct, the MMLU-Pro number is copied from the [MMLU-Pro leaderboard](https://huggingface.co/spaces/TIGER-Lab/MMLU-Pro); the Arena-Hard numbers are copied from the [leaderboard updated on 5/21](https://github.com/lm-sys/arena-hard-auto/tree/main?tab=readme-ov-file#full-leaderboard-updated-0521) while we run gpt-4o ourselves; and the MMLU/GPQA/DROP are copied from [simple-evals](https://github.com/openai/simple-evals).</small>
         
     | 
| 159 | 
         
            +
             
     | 
| 160 | 
         
            +
             
     | 
| 161 | 
         
            +
            ## How to use
         
     | 
| 162 | 
         
            +
             
     | 
| 163 | 
         
            +
            We use the same prompting format as in Meta-Llama-3-70B-Instruct.
         
     | 
| 164 | 
         
            +
             
     | 
| 165 | 
         
            +
            ### Use with transformers
         
     | 
| 166 | 
         
            +
             
     | 
| 167 | 
         
            +
            See the snippet below for usage with Transformers:
         
     | 
| 168 | 
         
            +
             
     | 
| 169 | 
         
            +
            ```python
         
     | 
| 170 | 
         
            +
            import transformers
         
     | 
| 171 | 
         
            +
            import torch
         
     | 
| 172 | 
         
            +
             
     | 
| 173 | 
         
            +
            model_id = ""bosonai/Higgs-Llama-3-70B""
         
     | 
| 174 | 
         
            +
             
     | 
| 175 | 
         
            +
            pipeline = transformers.pipeline(
         
     | 
| 176 | 
         
            +
              ""text-generation"",
         
     | 
| 177 | 
         
            +
              model=model_id,
         
     | 
| 178 | 
         
            +
              model_kwargs={""torch_dtype"": torch.bfloat16},
         
     | 
| 179 | 
         
            +
              device_map=""auto"",
         
     | 
| 180 | 
         
            +
            )
         
     | 
| 181 | 
         
            +
             
     | 
| 182 | 
         
            +
            messages = [
         
     | 
| 183 | 
         
            +
              {""role"": ""system"", ""content"": ""You are an AI assistant that speaks in the style of Sheldon Cooper. You are arguing with the user and is trying to prove the opposite of what the user said.""},
         
     | 
| 184 | 
         
            +
              {""role"": ""user"", ""content"": ""The earth is round.""},
         
     | 
| 185 | 
         
            +
            ]
         
     | 
| 186 | 
         
            +
             
     | 
| 187 | 
         
            +
            prompt = pipeline.tokenizer.apply_chat_template(
         
     | 
| 188 | 
         
            +
              messages,
         
     | 
| 189 | 
         
            +
              tokenize=False,
         
     | 
| 190 | 
         
            +
              add_generation_prompt=True
         
     | 
| 191 | 
         
            +
            )
         
     | 
| 192 | 
         
            +
             
     | 
| 193 | 
         
            +
            outputs = pipeline(
         
     | 
| 194 | 
         
            +
              prompt,
         
     | 
| 195 | 
         
            +
              max_new_tokens=256,
         
     | 
| 196 | 
         
            +
              eos_token_id=[
         
     | 
| 197 | 
         
            +
                pipeline.tokenizer.convert_tokens_to_ids(""<|eot_id|>""),
         
     | 
| 198 | 
         
            +
                pipeline.tokenizer.eos_token_id,
         
     | 
| 199 | 
         
            +
              ],
         
     | 
| 200 | 
         
            +
              do_sample=True,
         
     | 
| 201 | 
         
            +
              temperature=1.0,
         
     | 
| 202 | 
         
            +
              top_p=0.95,
         
     | 
| 203 | 
         
            +
            )
         
     | 
| 204 | 
         
            +
            print(outputs[0][""generated_text""][len(prompt):])
         
     | 
| 205 | 
         
            +
            ```
         
     | 
| 206 | 
         
            +
             
     | 
| 207 | 
         
            +
            ## License
         
     | 
| 208 | 
         
            +
            [Our license](https://huggingface.co/bosonai/Higgs-Llama-3-70B/blob/main/LICENSE) is based on Meta's LLama 3 Community License.","{""id"": ""bosonai/Higgs-Llama-3-70B"", ""author"": ""bosonai"", ""sha"": ""ecad51d809c049016a7cbd8380769ad19b0b55bf"", ""last_modified"": ""2024-08-20 03:18:12+00:00"", ""created_at"": ""2024-06-05 20:10:04+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 113, ""downloads_all_time"": null, ""likes"": 220, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""llama"", ""text-generation"", ""conversational"", ""base_model:meta-llama/Meta-Llama-3-70B"", ""base_model:finetune:meta-llama/Meta-Llama-3-70B"", ""license:other"", ""autotrain_compatible"", ""text-generation-inference"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: meta-llama/Meta-Llama-3-70B\nlicense: other"", ""widget_data"": [{""text"": ""Hi, what can you help me with?""}, {""text"": ""What is 84 * 3 / 2?""}, {""text"": ""Tell me an interesting fact about the universe!""}, {""text"": ""Explain quantum computing in simple terms.""}], ""model_index"": null, ""config"": {""architectures"": [""LlamaForCausalLM""], ""model_type"": ""llama"", ""tokenizer_config"": {""bos_token"": ""<|begin_of_text|>"", ""chat_template"": ""{% set loop_messages = messages %}{% for message in loop_messages %}{% set content = '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n'+ message['content'] | trim + '<|eot_id|>' %}{% if loop.index0 == 0 %}{% set content = '<|begin_of_text|>' + content %}{% endif %}{{ content }}{% endfor %}{% if add_generation_prompt %}{{ '<|start_header_id|>assistant<|end_header_id|>\n\n' }}{% endif %}"", ""eos_token"": ""<|end_of_text|>"", ""pad_token"": ""<|end_of_text|>""}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='LICENSE', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='arena-hard-v0.1/model_answer/higgs-llama-3-70b.jsonl', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='arena-hard-v0.1/model_judgement/gpt-4-1106-preview/higgs-llama-3-70b.jsonl', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00001-of-00062.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00002-of-00062.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00003-of-00062.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00004-of-00062.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00005-of-00062.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00006-of-00062.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00007-of-00062.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00008-of-00062.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00009-of-00062.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00010-of-00062.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00011-of-00062.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00012-of-00062.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00013-of-00062.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00014-of-00062.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00015-of-00062.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00016-of-00062.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00017-of-00062.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00018-of-00062.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00019-of-00062.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00020-of-00062.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00021-of-00062.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00022-of-00062.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00023-of-00062.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00024-of-00062.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00025-of-00062.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00026-of-00062.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00027-of-00062.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00028-of-00062.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00029-of-00062.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00030-of-00062.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00031-of-00062.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00032-of-00062.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00033-of-00062.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00034-of-00062.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00035-of-00062.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00036-of-00062.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00037-of-00062.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00038-of-00062.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00039-of-00062.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00040-of-00062.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00041-of-00062.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00042-of-00062.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00043-of-00062.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00044-of-00062.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00045-of-00062.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00046-of-00062.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00047-of-00062.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00048-of-00062.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00049-of-00062.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00050-of-00062.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00051-of-00062.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00052-of-00062.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00053-of-00062.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00054-of-00062.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00055-of-00062.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00056-of-00062.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00057-of-00062.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00058-of-00062.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00059-of-00062.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00060-of-00062.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00061-of-00062.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00062-of-00062.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": {""parameters"": {""F32"": 70553706496}, ""total"": 70553706496}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-08-20 03:18:12+00:00"", ""cardData"": ""base_model: meta-llama/Meta-Llama-3-70B\nlicense: other"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""_id"": ""6660c61c221a94f56b7c19c4"", ""modelId"": ""bosonai/Higgs-Llama-3-70B"", ""usedStorage"": 282214910752}",0,,0,,0,"https://huggingface.co/legraphista/Higgs-Llama-3-70B-IMat-GGUF, https://huggingface.co/mradermacher/Higgs-Llama-3-70B-GGUF, https://huggingface.co/mradermacher/Higgs-Llama-3-70B-i1-GGUF, https://huggingface.co/XelotX/Higgs-Llama-3-70B-GGUF-XelotX-Quants, https://huggingface.co/altomek/Higgs-Llama-3-70B-4bpw-EXL2",5,"https://huggingface.co/sophosympatheia/New-Dawn-Llama-3-70B-32K-v1.0, https://huggingface.co/waldie/New-Dawn-Llama-3-70B-32K-v1.0-2.25bpw-h6-exl2, https://huggingface.co/q18/sophosympatheia-New-Dawn-Llama-3-70B-32K-v1.0-exl2-8.0bpw-h8, https://huggingface.co/bartowski/New-Dawn-Llama-3-70B-32K-v1.0-GGUF, https://huggingface.co/BigHuggyD/sophosympatheia_New-Dawn-Llama-3-70B-32K-v1.0_exl2_4.5bpw_h8, https://huggingface.co/BigHuggyD/sophosympatheia_New-Dawn-Llama-3-70B-32K-v1.0_exl2_5.5bpw_h8, https://huggingface.co/BigHuggyD/sophosympatheia_New-Dawn-Llama-3-70B-32K-v1.0_exl2_6.0bpw_h8, https://huggingface.co/BigHuggyD/sophosympatheia_New-Dawn-Llama-3-70B-32K-v1.0_exl2_6.5bpw_h8, https://huggingface.co/BigHuggyD/sophosympatheia_New-Dawn-Llama-3-70B-32K-v1.0_exl2_7.0bpw_h8, https://huggingface.co/BigHuggyD/sophosympatheia_New-Dawn-Llama-3-70B-32K-v1.0_exl2_7.5bpw_h8, https://huggingface.co/BigHuggyD/sophosympatheia_New-Dawn-Llama-3-70B-32K-v1.0_exl2_8.0bpw_h8, https://huggingface.co/Duplo/mergekit-slerp-qbvgbso, https://huggingface.co/Jellon/New-Dawn-Llama-3-70B-32K-v1.0-3bpw, https://huggingface.co/Nexesenex/Llama_3.x_70b_Tess_Higgs_128K_v1.0, https://huggingface.co/Nexesenex/Llama_3.x_70b_L3.3_Higgs_128K_v1.02, https://huggingface.co/Nexesenex/Llama_3.x_70b_L3.3_VulpeculHiggs_128K_v1.02",16,"TIGER-Lab/MMLU-Pro, huggingface/InferenceSupport/discussions/922",2
         
     | 
    	
        HunyuanVideo_comfy_finetunes_20250426_014322.csv_finetunes_20250426_014322.csv
    ADDED
    
    | 
         @@ -0,0 +1,13 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
         
     | 
| 2 | 
         
            +
            Kijai/HunyuanVideo_comfy,"---
         
     | 
| 3 | 
         
            +
            license: other
         
     | 
| 4 | 
         
            +
            license_name: tencent-hunyuan-community
         
     | 
| 5 | 
         
            +
            license_link: LICENSE
         
     | 
| 6 | 
         
            +
            ---
         
     | 
| 7 | 
         
            +
            Safetensors and fp8 version of HunhuanVideo models: https://huggingface.co/tencent/HunyuanVideo
         
     | 
| 8 | 
         
            +
             
         
     | 
| 9 | 
         
            +
            To be used with ComfyUI native HunyuanVideo implementation, or my wrapper: https://github.com/kijai/ComfyUI-HunyuanVideoWrapper
         
     | 
| 10 | 
         
            +
             
     | 
| 11 | 
         
            +
            FastVideo's distilled version original from: https://huggingface.co/FastVideo/FastHunyuan
         
     | 
| 12 | 
         
            +
             
     | 
| 13 | 
         
            +
            GGUF's created using city96's scripts, and only works with their nodes: https://github.com/city96/ComfyUI-GGUF","{""id"": ""Kijai/HunyuanVideo_comfy"", ""author"": ""Kijai"", ""sha"": ""a7a84aaa50cf02771b3d516ce8f66278548eb3a5"", ""last_modified"": ""2025-04-18 22:19:31+00:00"", ""created_at"": ""2024-12-03 16:19:27+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 10743, ""downloads_all_time"": null, ""likes"": 415, ""library_name"": null, ""gguf"": {""total"": 12821012544, ""architecture"": ""hyvid""}, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""gguf"", ""license:other"", ""region:us""], ""pipeline_tag"": null, ""mask_token"": null, ""trending_score"": null, ""card_data"": ""license: other\nlicense_name: tencent-hunyuan-community\nlicense_link: LICENSE"", ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='FramePackI2V_HY_bf16.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='FramePackI2V_HY_converted_experimental_fp8_e4m3fn.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='FramePackI2V_HY_fp8_e4m3fn.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='HunyuanI2V_basic_native_workflow_example.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='HunyuanVideo_dashtoon_keyframe_lora_converted_bf16.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='HunyuanVideo_dashtoon_keyframe_lora_converted_comfy_bf16.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='LICENSE', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='hunyuan_video_720_cfgdistill_bf16.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='hunyuan_video_720_cfgdistill_fp8_e4m3fn.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='hunyuan_video_FastVideo_720_fp8_e4m3fn.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='hunyuan_video_I2V-Q3_K_S.gguf', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='hunyuan_video_I2V-Q4_K_S.gguf', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='hunyuan_video_I2V-Q6_K.gguf', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='hunyuan_video_I2V-Q8_0.gguf', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='hunyuan_video_I2V_720_fixed_bf16.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='hunyuan_video_I2V_720_fixed_fp8_e4m3fn.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='hunyuan_video_I2V_fp8_e4m3fn.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='hunyuan_video_I2V_fp8_e5m2.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='hunyuan_video_accvid-t2v-5-steps_fp8_e4m3fn.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='hunyuan_video_accvid_5_steps_lora_rank16_fp8_e4m3fn.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='hunyuan_video_accvid_t2v-5-steps_Q3_K_S.gguf', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='hunyuan_video_accvid_t2v-5-steps_Q4_K_S.gguf', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='hunyuan_video_accvid_t2v-5-steps_Q6_K.gguf', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='hunyuan_video_accvid_t2v-5-steps_Q8_0.gguf', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='hunyuan_video_vae_bf16.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='hunyuan_video_vae_fp32.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='hyvid_I2V_lora_embrace.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='hyvid_I2V_lora_hair_growth.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='hyvid_dashtoon_keyframe_native_example_01.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='hyvideo_FastVideo_LoRA-fp8.safetensors', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-04-18 22:19:31+00:00"", ""cardData"": ""license: other\nlicense_name: tencent-hunyuan-community\nlicense_link: LICENSE"", ""transformersInfo"": null, ""_id"": ""674f2f8f51a64ee560f8ae65"", ""modelId"": ""Kijai/HunyuanVideo_comfy"", ""usedStorage"": 299608272484}",0,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=Kijai/HunyuanVideo_comfy&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BKijai%2FHunyuanVideo_comfy%5D(%2FKijai%2FHunyuanVideo_comfy)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
         
     | 
    	
        HunyuanVideo_finetunes_20250424_193500.csv_finetunes_20250424_193500.csv
    ADDED
    
    | 
         The diff for this file is too large to render. 
		See raw diff 
     | 
| 
         | 
    	
        Illustration-Diffusion_finetunes_20250427_003734.csv_finetunes_20250427_003734.csv
    ADDED
    
    | 
         @@ -0,0 +1,15 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
         
     | 
| 2 | 
         
            +
            ogkalu/Illustration-Diffusion,"---
         
     | 
| 3 | 
         
            +
            license: creativeml-openrail-m
         
     | 
| 4 | 
         
            +
            tags:
         
     | 
| 5 | 
         
            +
            - text-to-image
         
     | 
| 6 | 
         
            +
            ---
         
     | 
| 7 | 
         
            +
            2D Illustration Styles are scarce on Stable Diffusion. Inspired by Hollie Mengert, this a fine-tuned Stable Diffusion model trained on her work. The correct token is holliemengert artstyle.
         
     | 
| 8 | 
         
            +
             
     | 
| 9 | 
         
            +
            Hollie is **not** affiliated with this. You can read about her stance on the issue here - https://waxy.org/2022/11/invasive-diffusion-how-one-unwilling-illustrator-found-herself-turned-into-an-ai-model/
         
     | 
| 10 | 
         
            +
             
     | 
| 11 | 
         
            +
            **Portraits generated by this model:**
         
     | 
| 12 | 
         
            +
            
         
     | 
| 13 | 
         
            +
            **Lanscapes generated by this model:**
         
     | 
| 14 | 
         
            +
            
         
     | 
| 15 | 
         
            +
            ","{""id"": ""ogkalu/Illustration-Diffusion"", ""author"": ""ogkalu"", ""sha"": ""d422f88702aafe2a62b3943aad1e094972c7a7fa"", ""last_modified"": ""2022-11-15 12:57:36+00:00"", ""created_at"": ""2022-10-22 02:13:26+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 162, ""library_name"": null, ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""text-to-image"", ""license:creativeml-openrail-m"", ""region:us""], ""pipeline_tag"": ""text-to-image"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""license: creativeml-openrail-m\ntags:\n- text-to-image"", ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generated-landscapes1.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generated-landscapes2.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generated-portraits.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='hollie-mengert.ckpt', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2022-11-15 12:57:36+00:00"", ""cardData"": ""license: creativeml-openrail-m\ntags:\n- text-to-image"", ""transformersInfo"": null, ""_id"": ""635351c602da56b12ed6f05c"", ""modelId"": ""ogkalu/Illustration-Diffusion"", ""usedStorage"": 4279087471}",0,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=ogkalu/Illustration-Diffusion&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bogkalu%2FIllustration-Diffusion%5D(%2Fogkalu%2FIllustration-Diffusion)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
         
     | 
    	
        InfiniteYou_finetunes_20250425_143346.csv_finetunes_20250425_143346.csv
    ADDED
    
    | 
         @@ -0,0 +1,144 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
         
     | 
| 2 | 
         
            +
            ByteDance/InfiniteYou,"---
         
     | 
| 3 | 
         
            +
            license: cc-by-nc-4.0
         
     | 
| 4 | 
         
            +
            language:
         
     | 
| 5 | 
         
            +
            - en
         
     | 
| 6 | 
         
            +
            library_name: infinite-you
         
     | 
| 7 | 
         
            +
            pipeline_tag: text-to-image
         
     | 
| 8 | 
         
            +
            tags:
         
     | 
| 9 | 
         
            +
            - Text-to-Image
         
     | 
| 10 | 
         
            +
            - FLUX.1-dev
         
     | 
| 11 | 
         
            +
            - image-generation
         
     | 
| 12 | 
         
            +
            - Diffusion-Transformer
         
     | 
| 13 | 
         
            +
            - subject-personalization
         
     | 
| 14 | 
         
            +
            base_model: black-forest-labs/FLUX.1-dev
         
     | 
| 15 | 
         
            +
            ---
         
     | 
| 16 | 
         
            +
             
     | 
| 17 | 
         
            +
             
     | 
| 18 | 
         
            +
            # InfiniteYou Model Card
         
     | 
| 19 | 
         
            +
             
     | 
| 20 | 
         
            +
            <div style=""display:flex;justify-content: center"">
         
     | 
| 21 | 
         
            +
            <a href=""https://bytedance.github.io/InfiniteYou""><img src=""https://img.shields.io/static/v1?label=Project&message=Page&color=blue&logo=github-pages""></a>  
         
     | 
| 22 | 
         
            +
            <a href=""https://arxiv.org/abs/2503.16418""><img src=""https://img.shields.io/static/v1?label=ArXiv&message=Paper&color=darkred&logo=arxiv""></a>  
         
     | 
| 23 | 
         
            +
            <a href=""https://github.com/bytedance/InfiniteYou""><img src=""https://img.shields.io/static/v1?label=GitHub&message=Code&color=green&logo=github""></a>  
         
     | 
| 24 | 
         
            +
            <a href=""https://github.com/bytedance/ComfyUI_InfiniteYou""><img src=""https://img.shields.io/static/v1?label=%E2%9A%99%EF%B8%8F%20ComfyUI&message=Node&color=purple""></a>  
         
     | 
| 25 | 
         
            +
            <a href=""https://huggingface.co/spaces/ByteDance/InfiniteYou-FLUX""><img src=""https://img.shields.io/static/v1?label=%F0%9F%A4%97%20Hugging%20Face&message=Demo&color=orange""></a>  
         
     | 
| 26 | 
         
            +
            </div>
         
     | 
| 27 | 
         
            +
             
     | 
| 28 | 
         
            +
            
         
     | 
| 29 | 
         
            +
             
     | 
| 30 | 
         
            +
            This repository provides the official models for the following paper:
         
     | 
| 31 | 
         
            +
             
     | 
| 32 | 
         
            +
            [**InfiniteYou: Flexible Photo Recrafting While Preserving Your Identity**](https://arxiv.org/abs/2503.16418)<br />
         
     | 
| 33 | 
         
            +
            [Liming Jiang](https://liming-jiang.com/), 
         
     | 
| 34 | 
         
            +
            [Qing Yan](https://scholar.google.com/citations?user=0TIYjPAAAAAJ), 
         
     | 
| 35 | 
         
            +
            [Yumin Jia](https://www.linkedin.com/in/yuminjia/), 
         
     | 
| 36 | 
         
            +
            [Zichuan Liu](https://scholar.google.com/citations?user=-H18WY8AAAAJ), 
         
     | 
| 37 | 
         
            +
            [Hao Kang](https://scholar.google.com/citations?user=VeTCSyEAAAAJ), 
         
     | 
| 38 | 
         
            +
            [Xin Lu](https://scholar.google.com/citations?user=mFC0wp8AAAAJ)<br />
         
     | 
| 39 | 
         
            +
            ByteDance Intelligent Creation
         
     | 
| 40 | 
         
            +
             
     | 
| 41 | 
         
            +
            > **Abstract:** Achieving flexible and high-fidelity identity-preserved image generation remains formidable, particularly with advanced Diffusion Transformers (DiTs) like FLUX. We introduce **InfiniteYou (InfU)**, one of the earliest robust frameworks leveraging DiTs for this task. InfU addresses significant issues of existing methods, such as insufficient identity similarity, poor text-image alignment, and low generation quality and aesthetics. Central to InfU is InfuseNet, a component that injects identity features into the DiT base model via residual connections, enhancing identity similarity while maintaining generation capabilities. A multi-stage training strategy, including pretraining and supervised fine-tuning (SFT) with synthetic single-person-multiple-sample (SPMS) data, further improves text-image alignment, ameliorates image quality, and alleviates face copy-pasting. Extensive experiments demonstrate that InfU achieves state-of-the-art performance, surpassing existing baselines. In addition, the plug-and-play design of InfU ensures compatibility with various existing methods, offering a valuable contribution to the broader community.
         
     | 
| 42 | 
         
            +
             
     | 
| 43 | 
         
            +
             
     | 
| 44 | 
         
            +
            ## 🔧 Installation and Usage
         
     | 
| 45 | 
         
            +
             
     | 
| 46 | 
         
            +
            Please clone our [GitHub code repository](https://github.com/bytedance/InfiniteYou) and follow the [detailed instructions](https://github.com/bytedance/InfiniteYou#-requirements-and-installation) to install and use the released models for local inference.
         
     | 
| 47 | 
         
            +
             
     | 
| 48 | 
         
            +
            We appreciate the GPU grant from the Hugging Face team. 
         
     | 
| 49 | 
         
            +
            You can also try our [InfiniteYou-FLUX Hugging Face demo](https://huggingface.co/spaces/ByteDance/InfiniteYou-FLUX) online.
         
     | 
| 50 | 
         
            +
             
     | 
| 51 | 
         
            +
             
     | 
| 52 | 
         
            +
            ## 💡 Important Usage Tips
         
     | 
| 53 | 
         
            +
             
     | 
| 54 | 
         
            +
            - We released two model variants of InfiniteYou-FLUX v1.0: [aes_stage2](https://huggingface.co/ByteDance/InfiniteYou/tree/main/infu_flux_v1.0/aes_stage2) and [sim_stage1](https://huggingface.co/ByteDance/InfiniteYou/tree/main/infu_flux_v1.0/sim_stage1). The `aes_stage2` is our model after stage-2 SFT, which is used by default for better text-image alignment and aesthetics. If you wish to achieve higher ID similarity, please try `sim_stage1`.
         
     | 
| 55 | 
         
            +
             
     | 
| 56 | 
         
            +
            - To better fit specific personal needs, we find that two arguments are highly useful to adjust in our [code](https://github.com/bytedance/InfiniteYou): `--infusenet_conditioning_scale` (default: `1.0`) and `--infusenet_guidance_start` (default: `0.0`). Usually, you may NOT need to adjust them. If necessary, start by trying a slightly larger `--infusenet_guidance_start` (*e.g.*, `0.1`) only (especially helpful for `sim_stage1`). If still not satisfactory, then try a slightly smaller `--infusenet_conditioning_scale` (*e.g.*, `0.9`).
         
     | 
| 57 | 
         
            +
             
     | 
| 58 | 
         
            +
            - We also provided two LoRAs ([Realism](https://civitai.com/models/631986?modelVersionId=706528) and [Anti-blur](https://civitai.com/models/675581/anti-blur-flux-lora)) to enable additional usage flexibility. If needed, try `Realism` only first.  They are *entirely optional*, which are examples to try but are NOT used in our paper.
         
     | 
| 59 | 
         
            +
             
     | 
| 60 | 
         
            +
            - If the generated gender is not preferred, try adding specific words in the text prompt, such as 'a man', 'a woman', *etc*. We encourage using inclusive and respectful language.
         
     | 
| 61 | 
         
            +
             
     | 
| 62 | 
         
            +
             
     | 
| 63 | 
         
            +
            ## 🏰 Model Zoo
         
     | 
| 64 | 
         
            +
             
     | 
| 65 | 
         
            +
            | InfiniteYou Version | Model Version | Base Model Trained with | Description |  
         
     | 
| 66 | 
         
            +
            | :---: | :---: | :---: | :---: |
         
     | 
| 67 | 
         
            +
            | [InfiniteYou-FLUX v1.0](https://huggingface.co/ByteDance/InfiniteYou) | [aes_stage2](https://huggingface.co/ByteDance/InfiniteYou/tree/main/infu_flux_v1.0/aes_stage2) | [FLUX.1-dev](https://huggingface.co/black-forest-labs/FLUX.1-dev) | Stage-2 model after SFT. Better text-image alignment and aesthetics. |
         
     | 
| 68 | 
         
            +
            | [InfiniteYou-FLUX v1.0](https://huggingface.co/ByteDance/InfiniteYou) | [sim_stage1](https://huggingface.co/ByteDance/InfiniteYou/tree/main/infu_flux_v1.0/sim_stage1) | [FLUX.1-dev](https://huggingface.co/black-forest-labs/FLUX.1-dev) | Stage-1 model before SFT. Higher identity similarity. |
         
     | 
| 69 | 
         
            +
             
     | 
| 70 | 
         
            +
             
     | 
| 71 | 
         
            +
            ## 🆚 Comparison with State-of-the-Art Relevant Methods
         
     | 
| 72 | 
         
            +
             
     | 
| 73 | 
         
            +
            
         
     | 
| 74 | 
         
            +
             
     | 
| 75 | 
         
            +
            Qualitative comparison results of InfU with the state-of-the-art baselines, FLUX.1-dev IP-Adapter and PuLID-FLUX. The identity similarity and text-image alignment of the results generated by FLUX.1-dev IP-Adapter (IPA) are inadequate. PuLID-FLUX generates images with decent identity similarity. However, it suffers from poor text-image alignment (Columns 1, 2, 4), and the image quality (e.g., bad hands in Column 5) and aesthetic appeal are degraded. In addition, the face copy-paste issue of PuLID-FLUX is evident (Column 5). In comparison, the proposed InfU outperforms the baselines across all dimensions.
         
     | 
| 76 | 
         
            +
             
     | 
| 77 | 
         
            +
             
     | 
| 78 | 
         
            +
            ## ⚙️ Plug-and-Play Property with Off-the-Shelf Popular Approaches
         
     | 
| 79 | 
         
            +
             
     | 
| 80 | 
         
            +
            
         
     | 
| 81 | 
         
            +
             
     | 
| 82 | 
         
            +
            InfU features a desirable plug-and-play design, compatible with many existing methods. It naturally supports base model replacement with any variants of FLUX.1-dev, such as FLUX.1-schnell for more efficient generation (e.g., in 4 steps). The compatibility with ControlNets and LoRAs provides more controllability and flexibility for customized tasks. Notably, the compatibility with OminiControl extends our potential for multi-concept personalization, such as interacted identity (ID) and object personalized generation. InfU is also compatible with IP-Adapter (IPA) for stylization of personalized images, producing decent results when injecting style references via IPA. Our plug-and-play feature may extend to even more approaches, providing valuable contributions to the broader community.
         
     | 
| 83 | 
         
            +
             
     | 
| 84 | 
         
            +
             
     | 
| 85 | 
         
            +
            ## 📜 Disclaimer and Licenses
         
     | 
| 86 | 
         
            +
             
     | 
| 87 | 
         
            +
            The images used in this repository and related demos are sourced from consented subjects or generated by the models. 
         
     | 
| 88 | 
         
            +
            These pictures are intended solely to showcase the capabilities of our research. If you have any concerns, please feel free to contact us, and we will promptly remove any inappropriate content.
         
     | 
| 89 | 
         
            +
             
     | 
| 90 | 
         
            +
            Our model is released under the [Creative Commons Attribution-NonCommercial 4.0 International Public License](./LICENSE) for academic research purposes only. Any manual or automatic downloading of the face models from [InsightFace](https://github.com/deepinsight/insightface), the [FLUX.1-dev](https://huggingface.co/black-forest-labs/FLUX.1-dev) base model, LoRAs ([Realism](https://civitai.com/models/631986?modelVersionId=706528) and [Anti-blur](https://civitai.com/models/675581/anti-blur-flux-lora)), *etc.*, must follow their original licenses and be used only for academic research purposes.
         
     | 
| 91 | 
         
            +
             
     | 
| 92 | 
         
            +
            This research aims to positively impact the field of Generative AI. Any usage of this method must be responsible and comply with local laws. The developers do not assume any responsibility for any potential misuse.
         
     | 
| 93 | 
         
            +
             
     | 
| 94 | 
         
            +
             
     | 
| 95 | 
         
            +
            ## 📖 Citation
         
     | 
| 96 | 
         
            +
             
     | 
| 97 | 
         
            +
            If you find InfiniteYou useful for your research or applications, please cite our paper:
         
     | 
| 98 | 
         
            +
             
     | 
| 99 | 
         
            +
            ```bibtex
         
     | 
| 100 | 
         
            +
            @article{jiang2025infiniteyou,
         
     | 
| 101 | 
         
            +
              title={{InfiniteYou}: Flexible Photo Recrafting While Preserving Your Identity},
         
     | 
| 102 | 
         
            +
              author={Jiang, Liming and Yan, Qing and Jia, Yumin and Liu, Zichuan and Kang, Hao and Lu, Xin},
         
     | 
| 103 | 
         
            +
              journal={arXiv preprint},
         
     | 
| 104 | 
         
            +
              volume={arXiv:2503.16418},
         
     | 
| 105 | 
         
            +
              year={2025}
         
     | 
| 106 | 
         
            +
            }
         
     | 
| 107 | 
         
            +
            ```
         
     | 
| 108 | 
         
            +
             
     | 
| 109 | 
         
            +
            We also appreciate it if you could give a star ⭐ to our [Github repository](https://github.com/bytedance/InfiniteYou). Thanks a lot!
         
     | 
| 110 | 
         
            +
            ","{""id"": ""ByteDance/InfiniteYou"", ""author"": ""ByteDance"", ""sha"": ""fcaf3a938b2bce2a38ed88491a648eb771ea3376"", ""last_modified"": ""2025-04-16 06:47:33+00:00"", ""created_at"": ""2025-03-12 23:43:45+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 12987, ""downloads_all_time"": null, ""likes"": 586, ""library_name"": ""infinite-you"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""infinite-you"", ""onnx"", ""diffusers"", ""safetensors"", ""Text-to-Image"", ""FLUX.1-dev"", ""image-generation"", ""Diffusion-Transformer"", ""subject-personalization"", ""text-to-image"", ""en"", ""arxiv:2503.16418"", ""base_model:black-forest-labs/FLUX.1-dev"", ""base_model:quantized:black-forest-labs/FLUX.1-dev"", ""license:cc-by-nc-4.0"", ""region:us""], ""pipeline_tag"": ""text-to-image"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: black-forest-labs/FLUX.1-dev\nlanguage:\n- en\nlibrary_name: infinite-you\nlicense: cc-by-nc-4.0\npipeline_tag: text-to-image\ntags:\n- Text-to-Image\n- FLUX.1-dev\n- image-generation\n- Diffusion-Transformer\n- subject-personalization"", ""widget_data"": null, ""model_index"": null, ""config"": {}, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='LICENSE', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/comparative_results.jpg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/plug_and_play.jpg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/teaser.jpg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='infu_flux_v1.0/aes_stage2/InfuseNetModel/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='infu_flux_v1.0/aes_stage2/InfuseNetModel/diffusion_pytorch_model-00001-of-00002.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='infu_flux_v1.0/aes_stage2/InfuseNetModel/diffusion_pytorch_model-00002-of-00002.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='infu_flux_v1.0/aes_stage2/InfuseNetModel/diffusion_pytorch_model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='infu_flux_v1.0/aes_stage2/image_proj_model.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='infu_flux_v1.0/aes_stage2/infusenet_aes_bf16.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='infu_flux_v1.0/aes_stage2/infusenet_aes_fp8e4m3fn.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='infu_flux_v1.0/sim_stage1/InfuseNetModel/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='infu_flux_v1.0/sim_stage1/InfuseNetModel/diffusion_pytorch_model-00001-of-00002.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='infu_flux_v1.0/sim_stage1/InfuseNetModel/diffusion_pytorch_model-00002-of-00002.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='infu_flux_v1.0/sim_stage1/InfuseNetModel/diffusion_pytorch_model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='infu_flux_v1.0/sim_stage1/image_proj_model.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='infu_flux_v1.0/sim_stage1/infusenet_sim_bf16.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='infu_flux_v1.0/sim_stage1/infusenet_sim_fp8e4m3fn.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='supports/insightface/models/antelopev2/1k3d68.onnx', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='supports/insightface/models/antelopev2/2d106det.onnx', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='supports/insightface/models/antelopev2/genderage.onnx', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='supports/insightface/models/antelopev2/glintr100.onnx', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='supports/insightface/models/antelopev2/scrfd_10g_bnkps.onnx', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='supports/optional_loras/flux_anti_blur_lora.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='supports/optional_loras/flux_realism_lora.safetensors', size=None, blob_id=None, lfs=None)""], ""spaces"": [""ByteDance/InfiniteYou-FLUX"", ""azhan77168/ify"", ""tuan2308/InfiniteYou-FLUX"", ""hyper-upscale/InfiniteYou-FLUX"", ""svjack/InfiniteYou-FLUX"", ""ObiWanMaxim/frameme"", ""aiqtech/InfiniteYou-FLUX"", ""Kouroshhhhh/Demo-stable-diffusion""], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-04-16 06:47:33+00:00"", ""cardData"": ""base_model: black-forest-labs/FLUX.1-dev\nlanguage:\n- en\nlibrary_name: infinite-you\nlicense: cc-by-nc-4.0\npipeline_tag: text-to-image\ntags:\n- Text-to-Image\n- FLUX.1-dev\n- image-generation\n- Diffusion-Transformer\n- subject-personalization"", ""transformersInfo"": null, ""_id"": ""67d21c317e86b8424d64adeb"", ""modelId"": ""ByteDance/InfiniteYou"", ""usedStorage"": 43162209653}",0,"https://huggingface.co/cenk32/odevtakip, https://huggingface.co/Hilton03/christopher",2,"https://huggingface.co/Andree2025/cordeiro, https://huggingface.co/flyingsnitch/flyingsn, https://huggingface.co/ahmeddoma/lijkoikl, https://huggingface.co/EymenPasha/NeuroAI_deneme_1_Seg, https://huggingface.co/gleebergoob/abc123, https://huggingface.co/gleebergoob/abc4321, https://huggingface.co/gleebergoob/abc54321, https://huggingface.co/gleebergoob/abc65321, https://huggingface.co/gleebergoob/cba654321, https://huggingface.co/leocafe/pocketoption-ai, https://huggingface.co/Alpha-singularity/Qubitron-Omega, https://huggingface.co/gleebergoob/abcumshot, https://huggingface.co/pbsabbi/pbsabbi-lora, https://huggingface.co/destinP72/cow",14,,0,,0,"ByteDance/InfiniteYou-FLUX, Kouroshhhhh/Demo-stable-diffusion, ObiWanMaxim/frameme, aiqtech/InfiniteYou-FLUX, azhan77168/ify, huggingface/InferenceSupport/discussions/2, hyper-upscale/InfiniteYou-FLUX, svjack/InfiniteYou-FLUX, tuan2308/InfiniteYou-FLUX",9
         
     | 
| 111 | 
         
            +
            cenk32/odevtakip,"---
         
     | 
| 112 | 
         
            +
            license: other
         
     | 
| 113 | 
         
            +
            license_name: cenk3232
         
     | 
| 114 | 
         
            +
            license_link: LICENSE
         
     | 
| 115 | 
         
            +
            datasets:
         
     | 
| 116 | 
         
            +
            - HuggingFaceFW/fineweb
         
     | 
| 117 | 
         
            +
            metrics:
         
     | 
| 118 | 
         
            +
            - accuracy
         
     | 
| 119 | 
         
            +
            new_version: sesame/csm-1b
         
     | 
| 120 | 
         
            +
            library_name: espnet
         
     | 
| 121 | 
         
            +
            language:
         
     | 
| 122 | 
         
            +
            - tr
         
     | 
| 123 | 
         
            +
            base_model:
         
     | 
| 124 | 
         
            +
            - ByteDance/InfiniteYou
         
     | 
| 125 | 
         
            +
            tags:
         
     | 
| 126 | 
         
            +
            - not-for-all-audiences
         
     | 
| 127 | 
         
            +
            ---","{""id"": ""cenk32/odevtakip"", ""author"": ""cenk32"", ""sha"": ""2e167f956010e14ed2f478224c2d3afdc5704c9c"", ""last_modified"": ""2025-04-05 19:43:31+00:00"", ""created_at"": ""2025-04-05 19:39:28+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""espnet"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""espnet"", ""not-for-all-audiences"", ""tr"", ""dataset:HuggingFaceFW/fineweb"", ""base_model:ByteDance/InfiniteYou"", ""base_model:finetune:ByteDance/InfiniteYou"", ""license:other"", ""region:us""], ""pipeline_tag"": null, ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- ByteDance/InfiniteYou\ndatasets:\n- HuggingFaceFW/fineweb\nlanguage:\n- tr\nlibrary_name: espnet\nlicense: other\nlicense_name: cenk3232\nlicense_link: LICENSE\nmetrics:\n- accuracy\ntags:\n- not-for-all-audiences\nnew_version: sesame/csm-1b"", ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='LICENSE', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-04-05 19:43:31+00:00"", ""cardData"": ""base_model:\n- ByteDance/InfiniteYou\ndatasets:\n- HuggingFaceFW/fineweb\nlanguage:\n- tr\nlibrary_name: espnet\nlicense: other\nlicense_name: cenk3232\nlicense_link: LICENSE\nmetrics:\n- accuracy\ntags:\n- not-for-all-audiences\nnew_version: sesame/csm-1b"", ""transformersInfo"": null, ""_id"": ""67f186f01a9347225d5d486d"", ""modelId"": ""cenk32/odevtakip"", ""usedStorage"": 0}",1,,0,,0,,0,,0,,0
         
     | 
| 128 | 
         
            +
            Hilton03/christopher,"---
         
     | 
| 129 | 
         
            +
            license: apache-2.0
         
     | 
| 130 | 
         
            +
            datasets:
         
     | 
| 131 | 
         
            +
            - nvidia/Llama-Nemotron-Post-Training-Dataset
         
     | 
| 132 | 
         
            +
            language:
         
     | 
| 133 | 
         
            +
            - af
         
     | 
| 134 | 
         
            +
            metrics:
         
     | 
| 135 | 
         
            +
            - code_eval
         
     | 
| 136 | 
         
            +
            base_model:
         
     | 
| 137 | 
         
            +
            - ByteDance/InfiniteYou
         
     | 
| 138 | 
         
            +
            - HiDream-ai/HiDream-I1-Full
         
     | 
| 139 | 
         
            +
            new_version: mistralai/Mistral-Small-3.1-24B-Instruct-2503
         
     | 
| 140 | 
         
            +
            pipeline_tag: feature-extraction
         
     | 
| 141 | 
         
            +
            library_name: fastai
         
     | 
| 142 | 
         
            +
            tags:
         
     | 
| 143 | 
         
            +
            - art
         
     | 
| 144 | 
         
            +
            ---","{""id"": ""Hilton03/christopher"", ""author"": ""Hilton03"", ""sha"": ""2eaa539d669abc8f9406d2d7d52c734cd98e108b"", ""last_modified"": ""2025-04-18 02:20:24+00:00"", ""created_at"": ""2025-04-18 02:06:26+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""fastai"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""fastai"", ""art"", ""feature-extraction"", ""af"", ""dataset:nvidia/Llama-Nemotron-Post-Training-Dataset"", ""base_model:ByteDance/InfiniteYou"", ""base_model:finetune:ByteDance/InfiniteYou"", ""license:apache-2.0"", ""region:us""], ""pipeline_tag"": ""feature-extraction"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- ByteDance/InfiniteYou\n- HiDream-ai/HiDream-I1-Full\ndatasets:\n- nvidia/Llama-Nemotron-Post-Training-Dataset\nlanguage:\n- af\nlibrary_name: fastai\nlicense: apache-2.0\nmetrics:\n- code_eval\npipeline_tag: feature-extraction\ntags:\n- art\nnew_version: mistralai/Mistral-Small-3.1-24B-Instruct-2503"", ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-04-18 02:20:24+00:00"", ""cardData"": ""base_model:\n- ByteDance/InfiniteYou\n- HiDream-ai/HiDream-I1-Full\ndatasets:\n- nvidia/Llama-Nemotron-Post-Training-Dataset\nlanguage:\n- af\nlibrary_name: fastai\nlicense: apache-2.0\nmetrics:\n- code_eval\npipeline_tag: feature-extraction\ntags:\n- art\nnew_version: mistralai/Mistral-Small-3.1-24B-Instruct-2503"", ""transformersInfo"": null, ""_id"": ""6801b3a23822504831036ccd"", ""modelId"": ""Hilton03/christopher"", ""usedStorage"": 0}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=Hilton03/christopher&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BHilton03%2Fchristopher%5D(%2FHilton03%2Fchristopher)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
         
     | 
    	
        Kokoro-82M_finetunes_20250422_220003.csv
    ADDED
    
    | 
         @@ -0,0 +1,450 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
         
     | 
| 2 | 
         
            +
            hexgrad/Kokoro-82M,"---
         
     | 
| 3 | 
         
            +
            license: apache-2.0
         
     | 
| 4 | 
         
            +
            language:
         
     | 
| 5 | 
         
            +
            - en
         
     | 
| 6 | 
         
            +
            base_model:
         
     | 
| 7 | 
         
            +
            - yl4579/StyleTTS2-LJSpeech
         
     | 
| 8 | 
         
            +
            pipeline_tag: text-to-speech
         
     | 
| 9 | 
         
            +
            ---
         
     | 
| 10 | 
         
            +
            **Kokoro** is an open-weight TTS model with 82 million parameters. Despite its lightweight architecture, it delivers comparable quality to larger models while being significantly faster and more cost-efficient. With Apache-licensed weights, Kokoro can be deployed anywhere from production environments to personal projects.
         
     | 
| 11 | 
         
            +
             
     | 
| 12 | 
         
            +
            <audio controls><source src=""https://huggingface.co/hexgrad/Kokoro-82M/resolve/main/samples/HEARME.wav"" type=""audio/wav""></audio>
         
     | 
| 13 | 
         
            +
             
     | 
| 14 | 
         
            +
            🐈 **GitHub**: https://github.com/hexgrad/kokoro
         
     | 
| 15 | 
         
            +
             
     | 
| 16 | 
         
            +
            🚀 **Demo**: https://hf.co/spaces/hexgrad/Kokoro-TTS
         
     | 
| 17 | 
         
            +
             
     | 
| 18 | 
         
            +
            > [!NOTE]
         
     | 
| 19 | 
         
            +
            > As of April 2025, the market rate of Kokoro served over API is **under $1 per million characters of text input**, or under $0.06 per hour of audio output. (On average, 1000 characters of input is about 1 minute of output.) Sources: [ArtificialAnalysis/Replicate at 65 cents per M chars](https://artificialanalysis.ai/text-to-speech/model-family/kokoro#price) and [DeepInfra at 80 cents per M chars](https://deepinfra.com/hexgrad/Kokoro-82M).
         
     | 
| 20 | 
         
            +
            >
         
     | 
| 21 | 
         
            +
            > This is an Apache-licensed model, and Kokoro has been deployed in numerous projects and commercial APIs. We welcome the deployment of the model in real use cases.
         
     | 
| 22 | 
         
            +
             
     | 
| 23 | 
         
            +
            > [!CAUTION]
         
     | 
| 24 | 
         
            +
            > Fake websites like kokorottsai_com (snapshot: https://archive.ph/nRRnk) and kokorotts_net (snapshot: https://archive.ph/60opa) are likely scams masquerading under the banner of a popular model.
         
     | 
| 25 | 
         
            +
            >
         
     | 
| 26 | 
         
            +
            > Any website containing ""kokoro"" in its root domain (e.g. kokorottsai_com, kokorotts_net) is **NOT owned by and NOT affiliated with this model page or its author**, and attempts to imply otherwise are red flags.
         
     | 
| 27 | 
         
            +
             
     | 
| 28 | 
         
            +
            - [Releases](#releases)
         
     | 
| 29 | 
         
            +
            - [Usage](#usage)
         
     | 
| 30 | 
         
            +
            - [EVAL.md](https://huggingface.co/hexgrad/Kokoro-82M/blob/main/EVAL.md) ↗️
         
     | 
| 31 | 
         
            +
            - [SAMPLES.md](https://huggingface.co/hexgrad/Kokoro-82M/blob/main/SAMPLES.md) ↗️
         
     | 
| 32 | 
         
            +
            - [VOICES.md](https://huggingface.co/hexgrad/Kokoro-82M/blob/main/VOICES.md) ↗️
         
     | 
| 33 | 
         
            +
            - [Model Facts](#model-facts)
         
     | 
| 34 | 
         
            +
            - [Training Details](#training-details)
         
     | 
| 35 | 
         
            +
            - [Creative Commons Attribution](#creative-commons-attribution)
         
     | 
| 36 | 
         
            +
            - [Acknowledgements](#acknowledgements)
         
     | 
| 37 | 
         
            +
             
     | 
| 38 | 
         
            +
            ### Releases
         
     | 
| 39 | 
         
            +
             
     | 
| 40 | 
         
            +
            | Model | Published | Training Data | Langs & Voices | SHA256 |
         
     | 
| 41 | 
         
            +
            | ----- | --------- | ------------- | -------------- | ------ |
         
     | 
| 42 | 
         
            +
            | **v1.0** | **2025 Jan 27** | **Few hundred hrs** | [**8 & 54**](https://huggingface.co/hexgrad/Kokoro-82M/blob/main/VOICES.md) | `496dba11` |
         
     | 
| 43 | 
         
            +
            | [v0.19](https://huggingface.co/hexgrad/kLegacy/tree/main/v0.19) | 2024 Dec 25 | <100 hrs | 1 & 10 | `3b0c392f` |
         
     | 
| 44 | 
         
            +
             
     | 
| 45 | 
         
            +
            | Training Costs | v0.19 | v1.0 | **Total** |
         
     | 
| 46 | 
         
            +
            | -------------- | ----- | ---- | ----- |
         
     | 
| 47 | 
         
            +
            | in A100 80GB GPU hours | 500 | 500 | **1000** |
         
     | 
| 48 | 
         
            +
            | average hourly rate | $0.80/h | $1.20/h | **$1/h** |
         
     | 
| 49 | 
         
            +
            | in USD | $400 | $600 | **$1000** |
         
     | 
| 50 | 
         
            +
             
     | 
| 51 | 
         
            +
            ### Usage
         
     | 
| 52 | 
         
            +
            You can run this basic cell on [Google Colab](https://colab.research.google.com/). [Listen to samples](https://huggingface.co/hexgrad/Kokoro-82M/blob/main/SAMPLES.md). For more languages and details, see [Advanced Usage](https://github.com/hexgrad/kokoro?tab=readme-ov-file#advanced-usage).
         
     | 
| 53 | 
         
            +
            ```py
         
     | 
| 54 | 
         
            +
            !pip install -q kokoro>=0.9.2 soundfile
         
     | 
| 55 | 
         
            +
            !apt-get -qq -y install espeak-ng > /dev/null 2>&1
         
     | 
| 56 | 
         
            +
            from kokoro import KPipeline
         
     | 
| 57 | 
         
            +
            from IPython.display import display, Audio
         
     | 
| 58 | 
         
            +
            import soundfile as sf
         
     | 
| 59 | 
         
            +
            import torch
         
     | 
| 60 | 
         
            +
            pipeline = KPipeline(lang_code='a')
         
     | 
| 61 | 
         
            +
            text = '''
         
     | 
| 62 | 
         
            +
            [Kokoro](/kˈOkəɹO/) is an open-weight TTS model with 82 million parameters. Despite its lightweight architecture, it delivers comparable quality to larger models while being significantly faster and more cost-efficient. With Apache-licensed weights, [Kokoro](/kˈOkəɹO/) can be deployed anywhere from production environments to personal projects.
         
     | 
| 63 | 
         
            +
            '''
         
     | 
| 64 | 
         
            +
            generator = pipeline(text, voice='af_heart')
         
     | 
| 65 | 
         
            +
            for i, (gs, ps, audio) in enumerate(generator):
         
     | 
| 66 | 
         
            +
                print(i, gs, ps)
         
     | 
| 67 | 
         
            +
                display(Audio(data=audio, rate=24000, autoplay=i==0))
         
     | 
| 68 | 
         
            +
                sf.write(f'{i}.wav', audio, 24000)
         
     | 
| 69 | 
         
            +
            ```
         
     | 
| 70 | 
         
            +
            Under the hood, `kokoro` uses [`misaki`](https://pypi.org/project/misaki/), a G2P library at https://github.com/hexgrad/misaki
         
     | 
| 71 | 
         
            +
             
     | 
| 72 | 
         
            +
            ### Model Facts
         
     | 
| 73 | 
         
            +
             
     | 
| 74 | 
         
            +
            **Architecture:**
         
     | 
| 75 | 
         
            +
            - StyleTTS 2: https://arxiv.org/abs/2306.07691
         
     | 
| 76 | 
         
            +
            - ISTFTNet: https://arxiv.org/abs/2203.02395
         
     | 
| 77 | 
         
            +
            - Decoder only: no diffusion, no encoder release
         
     | 
| 78 | 
         
            +
             
     | 
| 79 | 
         
            +
            **Architected by:** Li et al @ https://github.com/yl4579/StyleTTS2
         
     | 
| 80 | 
         
            +
             
     | 
| 81 | 
         
            +
            **Trained by**: `@rzvzn` on Discord
         
     | 
| 82 | 
         
            +
             
     | 
| 83 | 
         
            +
            **Languages:** Multiple
         
     | 
| 84 | 
         
            +
             
     | 
| 85 | 
         
            +
            **Model SHA256 Hash:** `496dba118d1a58f5f3db2efc88dbdc216e0483fc89fe6e47ee1f2c53f18ad1e4`
         
     | 
| 86 | 
         
            +
             
     | 
| 87 | 
         
            +
            ### Training Details
         
     | 
| 88 | 
         
            +
             
     | 
| 89 | 
         
            +
            **Data:** Kokoro was trained exclusively on **permissive/non-copyrighted audio data** and IPA phoneme labels. Examples of permissive/non-copyrighted audio include:
         
     | 
| 90 | 
         
            +
            - Public domain audio
         
     | 
| 91 | 
         
            +
            - Audio licensed under Apache, MIT, etc
         
     | 
| 92 | 
         
            +
            - Synthetic audio<sup>[1]</sup> generated by closed<sup>[2]</sup> TTS models from large providers<br/>
         
     | 
| 93 | 
         
            +
            [1] https://copyright.gov/ai/ai_policy_guidance.pdf<br/>
         
     | 
| 94 | 
         
            +
            [2] No synthetic audio from open TTS models or ""custom voice clones""
         
     | 
| 95 | 
         
            +
             
     | 
| 96 | 
         
            +
            **Total Dataset Size:** A few hundred hours of audio
         
     | 
| 97 | 
         
            +
             
     | 
| 98 | 
         
            +
            **Total Training Cost:** About $1000 for 1000 hours of A100 80GB vRAM
         
     | 
| 99 | 
         
            +
             
     | 
| 100 | 
         
            +
            ### Creative Commons Attribution
         
     | 
| 101 | 
         
            +
             
     | 
| 102 | 
         
            +
            The following CC BY audio was part of the dataset used to train Kokoro v1.0.
         
     | 
| 103 | 
         
            +
             
     | 
| 104 | 
         
            +
            | Audio Data | Duration Used | License | Added to Training Set After |
         
     | 
| 105 | 
         
            +
            | ---------- | ------------- | ------- | --------------------------- |
         
     | 
| 106 | 
         
            +
            | [Koniwa](https://github.com/koniwa/koniwa) `tnc` | <1h | [CC BY 3.0](https://creativecommons.org/licenses/by/3.0/deed.ja) | v0.19 / 22 Nov 2024 |
         
     | 
| 107 | 
         
            +
            | [SIWIS](https://datashare.ed.ac.uk/handle/10283/2353) | <11h | [CC BY 4.0](https://datashare.ed.ac.uk/bitstream/handle/10283/2353/license_text) | v0.19 / 22 Nov 2024 |
         
     | 
| 108 | 
         
            +
             
     | 
| 109 | 
         
            +
            ### Acknowledgements
         
     | 
| 110 | 
         
            +
             
     | 
| 111 | 
         
            +
            - 🛠️ [@yl4579](https://huggingface.co/yl4579) for architecting StyleTTS 2.
         
     | 
| 112 | 
         
            +
            - 🏆 [@Pendrokar](https://huggingface.co/Pendrokar) for adding Kokoro as a contender in the TTS Spaces Arena.
         
     | 
| 113 | 
         
            +
            - 📊 Thank you to everyone who contributed synthetic training data.
         
     | 
| 114 | 
         
            +
            - ❤️ Special thanks to all compute sponsors.
         
     | 
| 115 | 
         
            +
            - 👾 Discord server: https://discord.gg/QuGxSWBfQy
         
     | 
| 116 | 
         
            +
            - 🪽 Kokoro is a Japanese word that translates to ""heart"" or ""spirit"". It is also the name of an [AI in the Terminator franchise](https://terminator.fandom.com/wiki/Kokoro).
         
     | 
| 117 | 
         
            +
             
     | 
| 118 | 
         
            +
            <img src=""https://static0.gamerantimages.com/wordpress/wp-content/uploads/2024/08/terminator-zero-41-1.jpg"" width=""400"" alt=""kokoro"" />
         
     | 
| 119 | 
         
            +
            ","{""id"": ""hexgrad/Kokoro-82M"", ""author"": ""hexgrad"", ""sha"": ""f3ff3571791e39611d31c381e3a41a3af07b4987"", ""last_modified"": ""2025-04-10 18:12:48+00:00"", ""created_at"": ""2024-12-26 00:20:08+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 1981704, ""downloads_all_time"": null, ""likes"": 4086, ""library_name"": null, ""gguf"": null, ""inference"": null, ""tags"": [""text-to-speech"", ""en"", ""arxiv:2306.07691"", ""arxiv:2203.02395"", ""base_model:yl4579/StyleTTS2-LJSpeech"", ""base_model:finetune:yl4579/StyleTTS2-LJSpeech"", ""doi:10.57967/hf/4329"", ""license:apache-2.0"", ""region:us""], ""pipeline_tag"": ""text-to-speech"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- yl4579/StyleTTS2-LJSpeech\nlanguage:\n- en\nlicense: apache-2.0\npipeline_tag: text-to-speech"", ""widget_data"": null, ""model_index"": null, ""config"": {}, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DONATE.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='EVAL.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='SAMPLES.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='VOICES.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='eval/ArtificialAnalysis-2025-02-26.jpeg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='eval/TTS_Arena-2025-02-26.jpeg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='eval/TTS_Spaces_Arena-2025-02-26.jpeg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='kokoro-v1_0.pth', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='samples/HEARME.wav', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='samples/af_heart_0.wav', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='samples/af_heart_1.wav', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='samples/af_heart_2.wav', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='samples/af_heart_3.wav', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='samples/af_heart_4.wav', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='samples/af_heart_5.wav', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='voices/af_alloy.pt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='voices/af_aoede.pt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='voices/af_bella.pt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='voices/af_heart.pt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='voices/af_jessica.pt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='voices/af_kore.pt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='voices/af_nicole.pt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='voices/af_nova.pt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='voices/af_river.pt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='voices/af_sarah.pt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='voices/af_sky.pt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='voices/am_adam.pt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='voices/am_echo.pt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='voices/am_eric.pt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='voices/am_fenrir.pt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='voices/am_liam.pt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='voices/am_michael.pt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='voices/am_onyx.pt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='voices/am_puck.pt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='voices/am_santa.pt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='voices/bf_alice.pt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='voices/bf_emma.pt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='voices/bf_isabella.pt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='voices/bf_lily.pt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='voices/bm_daniel.pt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='voices/bm_fable.pt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='voices/bm_george.pt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='voices/bm_lewis.pt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='voices/ef_dora.pt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='voices/em_alex.pt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='voices/em_santa.pt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='voices/ff_siwis.pt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='voices/hf_alpha.pt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='voices/hf_beta.pt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='voices/hm_omega.pt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='voices/hm_psi.pt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='voices/if_sara.pt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='voices/im_nicola.pt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='voices/jf_alpha.pt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='voices/jf_gongitsune.pt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='voices/jf_nezumi.pt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='voices/jf_tebukuro.pt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='voices/jm_kumo.pt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='voices/pf_dora.pt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='voices/pm_alex.pt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='voices/pm_santa.pt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='voices/zf_xiaobei.pt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='voices/zf_xiaoni.pt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='voices/zf_xiaoxiao.pt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='voices/zf_xiaoyi.pt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='voices/zm_yunjian.pt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='voices/zm_yunxi.pt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='voices/zm_yunxia.pt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='voices/zm_yunyang.pt', size=None, blob_id=None, lfs=None)""], ""spaces"": [""hexgrad/Kokoro-TTS"", ""TTS-AGI/TTS-Arena"", ""Pendrokar/TTS-Spaces-Arena"", ""Remsky/Kokoro-TTS-Zero"", ""ginipick/DeepSeekR1-LIVE"", ""mozilla-ai/document-to-podcast"", ""NeuralFalcon/KOKORO-TTS-1.0"", ""sagar007/DeepSeekR1_Search"", ""Wismut/Kokoro_TTS_Compare"", ""NeuralFalcon/Kokoro-TTS"", ""RobinsAIWorld/Kokoro-TTS-cpu"", ""RORONovaLuffy/hexgrad-Kokoro-82M"", ""shukdevdatta123/Kokoro-TTS"", ""ngxson/kokoro-podcast-backend"", ""Remsky/FastKoko"", ""RobinsAIWorld/KokoZero"", ""rahul7star/Kokoro-TTS-Hindi"", ""sdafd/Kokoro-TTS"", ""sdafd/KOKORO-TTS-1.0"", ""BasToTheMax/FREE-KokoroTTS"", ""Badger123t/Kokoro-TTS"", ""Libra8ed-Tech/Kokoro-TTS"", ""Ogo123/Kokoro-TTS"", ""tgu6/hexgrad-Kokoro-82M"", ""Masterdqqq/vepp-tts"", ""brainzcode/hexgrad-Kokoro-82M"", ""joseph6377/hexgrad-Kokoro-82M9"", ""piopio88/hexgrad-Kokoro-82M"", ""traghav/hexgrad-Kokoro-82M"", ""joaocarloscruz/hexgrad-Kokoro-82M"", ""ahmedabdelali/hexgrad-Kokoro-82M"", ""liaskos/hexgrad-Kokoro-82M"", ""kunkun8888666/hexgrad-Kokoro-82M"", ""REL9X/hexgrad-Kokoro-82M"", ""bertglo/hexgrad-Kokoro-82M"", ""liuliuha/hexgrad-Kokoro-82M"", ""RTCode-ai/hexgrad-Kokoro-82M"", ""jhon823/hexgrad-Kokoro-82M"", ""ChrisWren/hexgrad-Kokoro-82M"", ""jayhust/hexgrad-Kokoro-82M"", ""kukikoki/hexgrad-Kokoro-82M"", ""hiteshganjoo/hexgrad-Kokoro-82M"", ""Shahzadasghar/hexgrad-Kokoro-82M"", ""realviligant/hexgrad-Kokoro-82M"", ""imrnh/AutiMate_tts_hexgrad-Kokoro-82M"", ""TrueGoat/hexgrad-Kokoro-82M"", ""xiex/hexgrad-Kokoro-82M"", ""segelyang/hexgrad-Kokoro-82M"", ""ClickAI/hexgrad-Kokoro-82M"", ""practice22/hexgrad-Kokoro-82M"", ""HeyiAgency/hexgrad-Kokoro-82M"", ""Messer185/hexgrad-Kokoro-82M"", ""peixl/hexgrad-Kokoro-82M"", ""rtzti2000/hexgrad-Kokoro-82M"", ""LGBTjews/hexgrad-Kokoro-82M"", ""Marcel637838383/hexgrad-Kokoro-82M"", ""Ahmadbagzada/hexgrad-Kokoro-82M"", ""snoopsy/hexgrad-Kokoro-82M"", ""xofal40967/hexgrad-Kokoro-82M"", ""fatwang2/hexgrad-Kokoro-82M"", ""Aranzo/hexgrad-Kokoro-82M"", ""vikaswakde/hexgrad-Kokoro-82M"", ""sajidanwar/hexgrad-Kokoro-82M"", ""jallenjia/Kokoro-TTS-Zero"", ""Abdalmohsen/hexgrad-Kokoro-82M"", ""Houkii/hexgrad-Kokoro-82M"", ""Krass/hexgrad-Kokoro-82M"", ""Toby12woolsey/hexgrad-Kokoro-82M"", ""Rybackmasikalass1998/hexgrad-Kokoro-82M"", ""AmpleBasis/Kokoro-TTS-Zero"", ""IamNotChris/hexgrad-Kokoro-82M"", ""Scorpjr9/hexgrad-Kokoro-82M"", ""pengaturan/hexgrad-Kokoro-82M"", ""Jay1012/hexgrad-Kokoro-82M"", ""Sinisterj12/hexgrad-Kokoro-82M"", ""Artifex1/hexgrad-Kokoro-82M"", ""Nialluytrruuee/hexgrad-Kokoro-82M"", ""nwent/hexgrad-Kokoro-82M"", ""alperall/hexgrad-Kokoro-82M"", ""cheshireterminal/kokottsolana"", ""jhay555/hexgrad-Kokoro-82M"", ""jacobwjx/hexgrad-Kokoro-82M"", ""eddiego2017/hexgrad-Kokoro-82M"", ""prembhai/Voice-Generator"", ""fdsgfdvbf/hexgrad-Kokoro-82M"", ""sahilviolet/hexgrad-Kokoro-82M"", ""Weswise/hexgrad-Kokoro-82M"", ""MYY007/hexgrad-Kokoro-82M"", ""mediaguild/Text2Speech"", ""marsyao/Kokoro-TTS"", ""doyaf99486/Kokoro-TTS"", ""eric-cli/Kokoro-TTS-Local"", ""un4b0mer/hexgrad-Kokoro-82M"", ""Maznichka/hexgrad-Kokoro-82M"", ""gudao119/hexgrad-Kokoro-82M"", ""Neyvan001/Ney007"", ""versantus/hexgrad-Kokoro-82M"", ""xMPB/hexgrad-Kokoro-82M"", ""Lokya1/hexgrad-Kokoro-82M"", ""wore99/hexgrad-Kokoro-82M""], ""safetensors"": null, ""security_repo_status"": null, ""lastModified"": ""2025-04-10 18:12:48+00:00"", ""cardData"": ""base_model:\n- yl4579/StyleTTS2-LJSpeech\nlanguage:\n- en\nlicense: apache-2.0\npipeline_tag: text-to-speech"", ""transformersInfo"": null, ""_id"": ""676ca1388118866906abbd7c"", ""modelId"": ""hexgrad/Kokoro-82M"", ""usedStorage"": 1234555949}",0,"https://huggingface.co/hexgrad/Kokoro-82M-v1.1-zh, https://huggingface.co/ThornoDen/Sonya, https://huggingface.co/Oscuridad/aaaa, https://huggingface.co/chernikov1/test, https://huggingface.co/GabrielMedinaTorres/genshin, https://huggingface.co/Khanhho/mai, https://huggingface.co/Hasani-ali-reza/Iran, https://huggingface.co/Aicraftar/Tharo.G-Neo, https://huggingface.co/maj666/test1, https://huggingface.co/Princehart1234/Sonia, https://huggingface.co/umersattar/girl, https://huggingface.co/David856412/David89675, https://huggingface.co/crossyroadgame/unblocked",13,"https://huggingface.co/Aicraftar/Tharo.G-Eco, https://huggingface.co/9w092wi/Photos, https://huggingface.co/budxeedev/akseAI, https://huggingface.co/Farzinam30/Manpower, https://huggingface.co/Carlos728/Tinder, https://huggingface.co/alfered2000/anna, https://huggingface.co/Maryyssa998/Model998, https://huggingface.co/Sanchesavg1/Elona, https://huggingface.co/Kooshaghaedi/Koosha",9,"https://huggingface.co/onnx-community/Kokoro-82M-v1.0-ONNX, https://huggingface.co/Remsky/kokoro-82m-mirror, https://huggingface.co/onnx-community/Kokoro-82M-ONNX, https://huggingface.co/NeuML/kokoro-fp16-onnx, https://huggingface.co/NeuML/kokoro-int8-onnx, https://huggingface.co/anuragsingh922/VocRT, https://huggingface.co/onnx-community/Kokoro-82M-v1.0-ONNX-timestamped, https://huggingface.co/NeuML/kokoro-base-onnx, https://huggingface.co/adrianlyjak/kokoro-onnx",9,,0,"NeuralFalcon/KOKORO-TTS-1.0, NeuralFalcon/Kokoro-TTS, Pendrokar/TTS-Spaces-Arena, RORONovaLuffy/hexgrad-Kokoro-82M, Remsky/Kokoro-TTS-Zero, RobinsAIWorld/Kokoro-TTS-cpu, TTS-AGI/TTS-Arena, Wismut/Kokoro_TTS_Compare, ginipick/DeepSeekR1-LIVE, hexgrad/Kokoro-TTS, huggingface/InferenceSupport/discussions/63, sagar007/DeepSeekR1_Search, shukdevdatta123/Kokoro-TTS",13
         
     | 
| 120 | 
         
            +
            https://huggingface.co/hexgrad/Kokoro-82M-v1.1-zh,N/A,N/A,1,,0,,0,,0,,0,,0
         
     | 
| 121 | 
         
            +
            ThornoDen/Sonya,"---
         
     | 
| 122 | 
         
            +
            license: cc-by-nc-nd-4.0
         
     | 
| 123 | 
         
            +
            datasets:
         
     | 
| 124 | 
         
            +
            - fka/awesome-chatgpt-prompts
         
     | 
| 125 | 
         
            +
            language:
         
     | 
| 126 | 
         
            +
            - ab
         
     | 
| 127 | 
         
            +
            metrics:
         
     | 
| 128 | 
         
            +
            - bleu
         
     | 
| 129 | 
         
            +
            - character
         
     | 
| 130 | 
         
            +
            base_model:
         
     | 
| 131 | 
         
            +
            - hexgrad/Kokoro-82M
         
     | 
| 132 | 
         
            +
            new_version: deepseek-ai/DeepSeek-R1
         
     | 
| 133 | 
         
            +
            pipeline_tag: text-classification
         
     | 
| 134 | 
         
            +
            library_name: fairseq
         
     | 
| 135 | 
         
            +
            ---","{""id"": ""ThornoDen/Sonya"", ""author"": ""ThornoDen"", ""sha"": ""200ae5dece5a776986e2fb69cb111b4fe42b9a8d"", ""last_modified"": ""2025-01-27 22:57:03+00:00"", ""created_at"": ""2025-01-27 22:45:37+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 1, ""library_name"": ""fairseq"", ""gguf"": null, ""inference"": null, ""tags"": [""fairseq"", ""text-classification"", ""ab"", ""dataset:fka/awesome-chatgpt-prompts"", ""base_model:hexgrad/Kokoro-82M"", ""base_model:finetune:hexgrad/Kokoro-82M"", ""license:cc-by-nc-nd-4.0"", ""region:us""], ""pipeline_tag"": ""text-classification"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- hexgrad/Kokoro-82M\ndatasets:\n- fka/awesome-chatgpt-prompts\nlanguage:\n- ab\nlibrary_name: fairseq\nlicense: cc-by-nc-nd-4.0\nmetrics:\n- bleu\n- character\npipeline_tag: text-classification\nnew_version: deepseek-ai/DeepSeek-R1"", ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Screenshot_2025-01-17-02-11-11-22_68e74cc0265513bba3f4cf8e2c9891fe.jpg', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""lastModified"": ""2025-01-27 22:57:03+00:00"", ""cardData"": ""base_model:\n- hexgrad/Kokoro-82M\ndatasets:\n- fka/awesome-chatgpt-prompts\nlanguage:\n- ab\nlibrary_name: fairseq\nlicense: cc-by-nc-nd-4.0\nmetrics:\n- bleu\n- character\npipeline_tag: text-classification\nnew_version: deepseek-ai/DeepSeek-R1"", ""transformersInfo"": null, ""_id"": ""67980c91e05ca91d7e01c1a4"", ""modelId"": ""ThornoDen/Sonya"", ""usedStorage"": 0}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=ThornoDen/Sonya&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BThornoDen%2FSonya%5D(%2FThornoDen%2FSonya)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
         
     | 
| 136 | 
         
            +
            Oscuridad/aaaa,"---
         
     | 
| 137 | 
         
            +
            license: unknown
         
     | 
| 138 | 
         
            +
            datasets:
         
     | 
| 139 | 
         
            +
            - HumanLLMs/Human-Like-DPO-Dataset
         
     | 
| 140 | 
         
            +
            language:
         
     | 
| 141 | 
         
            +
            - aa
         
     | 
| 142 | 
         
            +
            metrics:
         
     | 
| 143 | 
         
            +
            - bleurt
         
     | 
| 144 | 
         
            +
            base_model:
         
     | 
| 145 | 
         
            +
            - hexgrad/Kokoro-82M
         
     | 
| 146 | 
         
            +
            new_version: hexgrad/Kokoro-82M
         
     | 
| 147 | 
         
            +
            pipeline_tag: audio-to-audio
         
     | 
| 148 | 
         
            +
            library_name: asteroid
         
     | 
| 149 | 
         
            +
            tags:
         
     | 
| 150 | 
         
            +
            - music
         
     | 
| 151 | 
         
            +
            ---","{""id"": ""Oscuridad/aaaa"", ""author"": ""Oscuridad"", ""sha"": ""fd4abf3a33e5b35c7ceccfec6201f88caabf9b8d"", ""last_modified"": ""2025-01-21 02:09:19+00:00"", ""created_at"": ""2025-01-21 02:07:03+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""asteroid"", ""gguf"": null, ""inference"": null, ""tags"": [""asteroid"", ""music"", ""audio-to-audio"", ""aa"", ""dataset:HumanLLMs/Human-Like-DPO-Dataset"", ""base_model:hexgrad/Kokoro-82M"", ""base_model:finetune:hexgrad/Kokoro-82M"", ""license:unknown"", ""region:us""], ""pipeline_tag"": ""audio-to-audio"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- hexgrad/Kokoro-82M\ndatasets:\n- HumanLLMs/Human-Like-DPO-Dataset\nlanguage:\n- aa\nlibrary_name: asteroid\nlicense: unknown\nmetrics:\n- bleurt\npipeline_tag: audio-to-audio\ntags:\n- music\nnew_version: hexgrad/Kokoro-82M"", ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""lastModified"": ""2025-01-21 02:09:19+00:00"", ""cardData"": ""base_model:\n- hexgrad/Kokoro-82M\ndatasets:\n- HumanLLMs/Human-Like-DPO-Dataset\nlanguage:\n- aa\nlibrary_name: asteroid\nlicense: unknown\nmetrics:\n- bleurt\npipeline_tag: audio-to-audio\ntags:\n- music\nnew_version: hexgrad/Kokoro-82M"", ""transformersInfo"": null, ""_id"": ""678f01474253b8500c91f6dd"", ""modelId"": ""Oscuridad/aaaa"", ""usedStorage"": 0}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=Oscuridad/aaaa&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BOscuridad%2Faaaa%5D(%2FOscuridad%2Faaaa)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
         
     | 
| 152 | 
         
            +
            chernikov1/test,"---
         
     | 
| 153 | 
         
            +
            license: apache-2.0
         
     | 
| 154 | 
         
            +
            base_model:
         
     | 
| 155 | 
         
            +
            - hexgrad/Kokoro-82M
         
     | 
| 156 | 
         
            +
            pipeline_tag: image-to-3d
         
     | 
| 157 | 
         
            +
            ---
         
     | 
| 158 | 
         
            +
             
     | 
| 159 | 
         
            +
            # Model Card for Model ID
         
     | 
| 160 | 
         
            +
             
     | 
| 161 | 
         
            +
            <!-- Provide a quick summary of what the model is/does. -->
         
     | 
| 162 | 
         
            +
             
     | 
| 163 | 
         
            +
            This modelcard aims to be a base template for new models. It has been generated using [this raw template](https://github.com/huggingface/huggingface_hub/blob/main/src/huggingface_hub/templates/modelcard_template.md?plain=1).
         
     | 
| 164 | 
         
            +
             
     | 
| 165 | 
         
            +
            ## Model Details
         
     | 
| 166 | 
         
            +
             
     | 
| 167 | 
         
            +
            ### Model Description
         
     | 
| 168 | 
         
            +
             
     | 
| 169 | 
         
            +
            <!-- Provide a longer summary of what this model is. -->
         
     | 
| 170 | 
         
            +
             
     | 
| 171 | 
         
            +
             
     | 
| 172 | 
         
            +
             
     | 
| 173 | 
         
            +
            - **Developed by:** [More Information Needed]
         
     | 
| 174 | 
         
            +
            - **Funded by [optional]:** [More Information Needed]
         
     | 
| 175 | 
         
            +
            - **Shared by [optional]:** [More Information Needed]
         
     | 
| 176 | 
         
            +
            - **Model type:** [More Information Needed]
         
     | 
| 177 | 
         
            +
            - **Language(s) (NLP):** [More Information Needed]
         
     | 
| 178 | 
         
            +
            - **License:** [More Information Needed]
         
     | 
| 179 | 
         
            +
            - **Finetuned from model [optional]:** [More Information Needed]
         
     | 
| 180 | 
         
            +
             
     | 
| 181 | 
         
            +
            ### Model Sources [optional]
         
     | 
| 182 | 
         
            +
             
     | 
| 183 | 
         
            +
            <!-- Provide the basic links for the model. -->
         
     | 
| 184 | 
         
            +
             
     | 
| 185 | 
         
            +
            - **Repository:** [More Information Needed]
         
     | 
| 186 | 
         
            +
            - **Paper [optional]:** [More Information Needed]
         
     | 
| 187 | 
         
            +
            - **Demo [optional]:** [More Information Needed]
         
     | 
| 188 | 
         
            +
             
     | 
| 189 | 
         
            +
            ## Uses
         
     | 
| 190 | 
         
            +
             
     | 
| 191 | 
         
            +
            <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
         
     | 
| 192 | 
         
            +
             
     | 
| 193 | 
         
            +
            ### Direct Use
         
     | 
| 194 | 
         
            +
             
     | 
| 195 | 
         
            +
            <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
         
     | 
| 196 | 
         
            +
             
     | 
| 197 | 
         
            +
            [More Information Needed]
         
     | 
| 198 | 
         
            +
             
     | 
| 199 | 
         
            +
            ### Downstream Use [optional]
         
     | 
| 200 | 
         
            +
             
     | 
| 201 | 
         
            +
            <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
         
     | 
| 202 | 
         
            +
             
     | 
| 203 | 
         
            +
            [More Information Needed]
         
     | 
| 204 | 
         
            +
             
     | 
| 205 | 
         
            +
            ### Out-of-Scope Use
         
     | 
| 206 | 
         
            +
             
     | 
| 207 | 
         
            +
            <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
         
     | 
| 208 | 
         
            +
             
     | 
| 209 | 
         
            +
            [More Information Needed]
         
     | 
| 210 | 
         
            +
             
     | 
| 211 | 
         
            +
            ## Bias, Risks, and Limitations
         
     | 
| 212 | 
         
            +
             
     | 
| 213 | 
         
            +
            <!-- This section is meant to convey both technical and sociotechnical limitations. -->
         
     | 
| 214 | 
         
            +
             
     | 
| 215 | 
         
            +
            [More Information Needed]
         
     | 
| 216 | 
         
            +
             
     | 
| 217 | 
         
            +
            ### Recommendations
         
     | 
| 218 | 
         
            +
             
     | 
| 219 | 
         
            +
            <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
         
     | 
| 220 | 
         
            +
             
     | 
| 221 | 
         
            +
            Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
         
     | 
| 222 | 
         
            +
             
     | 
| 223 | 
         
            +
            ## How to Get Started with the Model
         
     | 
| 224 | 
         
            +
             
     | 
| 225 | 
         
            +
            Use the code below to get started with the model.
         
     | 
| 226 | 
         
            +
             
     | 
| 227 | 
         
            +
            [More Information Needed]
         
     | 
| 228 | 
         
            +
             
     | 
| 229 | 
         
            +
            ## Training Details
         
     | 
| 230 | 
         
            +
             
     | 
| 231 | 
         
            +
            ### Training Data
         
     | 
| 232 | 
         
            +
             
     | 
| 233 | 
         
            +
            <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
         
     | 
| 234 | 
         
            +
             
     | 
| 235 | 
         
            +
            [More Information Needed]
         
     | 
| 236 | 
         
            +
             
     | 
| 237 | 
         
            +
            ### Training Procedure
         
     | 
| 238 | 
         
            +
             
     | 
| 239 | 
         
            +
            <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
         
     | 
| 240 | 
         
            +
             
     | 
| 241 | 
         
            +
            #### Preprocessing [optional]
         
     | 
| 242 | 
         
            +
             
     | 
| 243 | 
         
            +
            [More Information Needed]
         
     | 
| 244 | 
         
            +
             
     | 
| 245 | 
         
            +
             
     | 
| 246 | 
         
            +
            #### Training Hyperparameters
         
     | 
| 247 | 
         
            +
             
     | 
| 248 | 
         
            +
            - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
         
     | 
| 249 | 
         
            +
             
     | 
| 250 | 
         
            +
            #### Speeds, Sizes, Times [optional]
         
     | 
| 251 | 
         
            +
             
     | 
| 252 | 
         
            +
            <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
         
     | 
| 253 | 
         
            +
             
     | 
| 254 | 
         
            +
            [More Information Needed]
         
     | 
| 255 | 
         
            +
             
     | 
| 256 | 
         
            +
            ## Evaluation
         
     | 
| 257 | 
         
            +
             
     | 
| 258 | 
         
            +
            <!-- This section describes the evaluation protocols and provides the results. -->
         
     | 
| 259 | 
         
            +
             
     | 
| 260 | 
         
            +
            ### Testing Data, Factors & Metrics
         
     | 
| 261 | 
         
            +
             
     | 
| 262 | 
         
            +
            #### Testing Data
         
     | 
| 263 | 
         
            +
             
     | 
| 264 | 
         
            +
            <!-- This should link to a Dataset Card if possible. -->
         
     | 
| 265 | 
         
            +
             
     | 
| 266 | 
         
            +
            [More Information Needed]
         
     | 
| 267 | 
         
            +
             
     | 
| 268 | 
         
            +
            #### Factors
         
     | 
| 269 | 
         
            +
             
     | 
| 270 | 
         
            +
            <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
         
     | 
| 271 | 
         
            +
             
     | 
| 272 | 
         
            +
            [More Information Needed]
         
     | 
| 273 | 
         
            +
             
     | 
| 274 | 
         
            +
            #### Metrics
         
     | 
| 275 | 
         
            +
             
     | 
| 276 | 
         
            +
            <!-- These are the evaluation metrics being used, ideally with a description of why. -->
         
     | 
| 277 | 
         
            +
             
     | 
| 278 | 
         
            +
            [More Information Needed]
         
     | 
| 279 | 
         
            +
             
     | 
| 280 | 
         
            +
            ### Results
         
     | 
| 281 | 
         
            +
             
     | 
| 282 | 
         
            +
            [More Information Needed]
         
     | 
| 283 | 
         
            +
             
     | 
| 284 | 
         
            +
            #### Summary
         
     | 
| 285 | 
         
            +
             
     | 
| 286 | 
         
            +
             
     | 
| 287 | 
         
            +
             
     | 
| 288 | 
         
            +
            ## Model Examination [optional]
         
     | 
| 289 | 
         
            +
             
     | 
| 290 | 
         
            +
            <!-- Relevant interpretability work for the model goes here -->
         
     | 
| 291 | 
         
            +
             
     | 
| 292 | 
         
            +
            [More Information Needed]
         
     | 
| 293 | 
         
            +
             
     | 
| 294 | 
         
            +
            ## Environmental Impact
         
     | 
| 295 | 
         
            +
             
     | 
| 296 | 
         
            +
            <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
         
     | 
| 297 | 
         
            +
             
     | 
| 298 | 
         
            +
            Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
         
     | 
| 299 | 
         
            +
             
     | 
| 300 | 
         
            +
            - **Hardware Type:** [More Information Needed]
         
     | 
| 301 | 
         
            +
            - **Hours used:** [More Information Needed]
         
     | 
| 302 | 
         
            +
            - **Cloud Provider:** [More Information Needed]
         
     | 
| 303 | 
         
            +
            - **Compute Region:** [More Information Needed]
         
     | 
| 304 | 
         
            +
            - **Carbon Emitted:** [More Information Needed]
         
     | 
| 305 | 
         
            +
             
     | 
| 306 | 
         
            +
            ## Technical Specifications [optional]
         
     | 
| 307 | 
         
            +
             
     | 
| 308 | 
         
            +
            ### Model Architecture and Objective
         
     | 
| 309 | 
         
            +
             
     | 
| 310 | 
         
            +
            [More Information Needed]
         
     | 
| 311 | 
         
            +
             
     | 
| 312 | 
         
            +
            ### Compute Infrastructure
         
     | 
| 313 | 
         
            +
             
     | 
| 314 | 
         
            +
            [More Information Needed]
         
     | 
| 315 | 
         
            +
             
     | 
| 316 | 
         
            +
            #### Hardware
         
     | 
| 317 | 
         
            +
             
     | 
| 318 | 
         
            +
            [More Information Needed]
         
     | 
| 319 | 
         
            +
             
     | 
| 320 | 
         
            +
            #### Software
         
     | 
| 321 | 
         
            +
             
     | 
| 322 | 
         
            +
            [More Information Needed]
         
     | 
| 323 | 
         
            +
             
     | 
| 324 | 
         
            +
            ## Citation [optional]
         
     | 
| 325 | 
         
            +
             
     | 
| 326 | 
         
            +
            <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
         
     | 
| 327 | 
         
            +
             
     | 
| 328 | 
         
            +
            **BibTeX:**
         
     | 
| 329 | 
         
            +
             
     | 
| 330 | 
         
            +
            [More Information Needed]
         
     | 
| 331 | 
         
            +
             
     | 
| 332 | 
         
            +
            **APA:**
         
     | 
| 333 | 
         
            +
             
     | 
| 334 | 
         
            +
            [More Information Needed]
         
     | 
| 335 | 
         
            +
             
     | 
| 336 | 
         
            +
            ## Glossary [optional]
         
     | 
| 337 | 
         
            +
             
     | 
| 338 | 
         
            +
            <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
         
     | 
| 339 | 
         
            +
             
     | 
| 340 | 
         
            +
            [More Information Needed]
         
     | 
| 341 | 
         
            +
             
     | 
| 342 | 
         
            +
            ## More Information [optional]
         
     | 
| 343 | 
         
            +
             
     | 
| 344 | 
         
            +
            [More Information Needed]
         
     | 
| 345 | 
         
            +
             
     | 
| 346 | 
         
            +
            ## Model Card Authors [optional]
         
     | 
| 347 | 
         
            +
             
     | 
| 348 | 
         
            +
            [More Information Needed]
         
     | 
| 349 | 
         
            +
             
     | 
| 350 | 
         
            +
            ## Model Card Contact
         
     | 
| 351 | 
         
            +
             
     | 
| 352 | 
         
            +
            [More Information Needed]","{""id"": ""chernikov1/test"", ""author"": ""chernikov1"", ""sha"": ""12f21b35b05a92e37485bbbd9fa46994d0fa4f1a"", ""last_modified"": ""2025-01-23 06:18:43+00:00"", ""created_at"": ""2025-01-23 06:14:43+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": null, ""gguf"": null, ""inference"": null, ""tags"": [""image-to-3d"", ""arxiv:1910.09700"", ""base_model:hexgrad/Kokoro-82M"", ""base_model:finetune:hexgrad/Kokoro-82M"", ""license:apache-2.0"", ""region:us""], ""pipeline_tag"": ""image-to-3d"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- hexgrad/Kokoro-82M\nlicense: apache-2.0\npipeline_tag: image-to-3d"", ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""lastModified"": ""2025-01-23 06:18:43+00:00"", ""cardData"": ""base_model:\n- hexgrad/Kokoro-82M\nlicense: apache-2.0\npipeline_tag: image-to-3d"", ""transformersInfo"": null, ""_id"": ""6791de532cb4bcc37396e8cb"", ""modelId"": ""chernikov1/test"", ""usedStorage"": 0}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=chernikov1/test&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bchernikov1%2Ftest%5D(%2Fchernikov1%2Ftest)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
         
     | 
| 353 | 
         
            +
            GabrielMedinaTorres/genshin,"---
         
     | 
| 354 | 
         
            +
            language:
         
     | 
| 355 | 
         
            +
            - es
         
     | 
| 356 | 
         
            +
            base_model:
         
     | 
| 357 | 
         
            +
            - hexgrad/Kokoro-82M
         
     | 
| 358 | 
         
            +
            ---","{""id"": ""GabrielMedinaTorres/genshin"", ""author"": ""GabrielMedinaTorres"", ""sha"": ""df4d5fc1fc115e416e3976af3b80dbce40e11c89"", ""last_modified"": ""2025-01-24 12:43:40+00:00"", ""created_at"": ""2025-01-24 12:43:07+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": null, ""gguf"": null, ""inference"": null, ""tags"": [""es"", ""base_model:hexgrad/Kokoro-82M"", ""base_model:finetune:hexgrad/Kokoro-82M"", ""region:us""], ""pipeline_tag"": null, ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- hexgrad/Kokoro-82M\nlanguage:\n- es"", ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""lastModified"": ""2025-01-24 12:43:40+00:00"", ""cardData"": ""base_model:\n- hexgrad/Kokoro-82M\nlanguage:\n- es"", ""transformersInfo"": null, ""_id"": ""67938adb82eb457aba83de02"", ""modelId"": ""GabrielMedinaTorres/genshin"", ""usedStorage"": 0}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=GabrielMedinaTorres/genshin&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BGabrielMedinaTorres%2Fgenshin%5D(%2FGabrielMedinaTorres%2Fgenshin)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
         
     | 
| 359 | 
         
            +
            Khanhho/mai,"---
         
     | 
| 360 | 
         
            +
            language:
         
     | 
| 361 | 
         
            +
            - vi
         
     | 
| 362 | 
         
            +
            base_model:
         
     | 
| 363 | 
         
            +
            - hexgrad/Kokoro-82M
         
     | 
| 364 | 
         
            +
            ---","{""id"": ""Khanhho/mai"", ""author"": ""Khanhho"", ""sha"": ""f3b7740350fbc7c69baed0e9c2c0a4d8c00a16a5"", ""last_modified"": ""2025-01-24 16:37:59+00:00"", ""created_at"": ""2025-01-24 16:36:17+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": null, ""gguf"": null, ""inference"": null, ""tags"": [""vi"", ""base_model:hexgrad/Kokoro-82M"", ""base_model:finetune:hexgrad/Kokoro-82M"", ""region:us""], ""pipeline_tag"": null, ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- hexgrad/Kokoro-82M\nlanguage:\n- vi"", ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""lastModified"": ""2025-01-24 16:37:59+00:00"", ""cardData"": ""base_model:\n- hexgrad/Kokoro-82M\nlanguage:\n- vi"", ""transformersInfo"": null, ""_id"": ""6793c18139a0cc8662af3560"", ""modelId"": ""Khanhho/mai"", ""usedStorage"": 0}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=Khanhho/mai&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BKhanhho%2Fmai%5D(%2FKhanhho%2Fmai)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
         
     | 
| 365 | 
         
            +
            Hasani-ali-reza/Iran,"---
         
     | 
| 366 | 
         
            +
            license: openrail
         
     | 
| 367 | 
         
            +
            datasets:
         
     | 
| 368 | 
         
            +
            - fka/awesome-chatgpt-prompts
         
     | 
| 369 | 
         
            +
            metrics:
         
     | 
| 370 | 
         
            +
            - bleu
         
     | 
| 371 | 
         
            +
            base_model:
         
     | 
| 372 | 
         
            +
            - hexgrad/Kokoro-82M
         
     | 
| 373 | 
         
            +
            new_version: deepseek-ai/DeepSeek-R1
         
     | 
| 374 | 
         
            +
            pipeline_tag: question-answering
         
     | 
| 375 | 
         
            +
            library_name: asteroid
         
     | 
| 376 | 
         
            +
            tags:
         
     | 
| 377 | 
         
            +
            - biology
         
     | 
| 378 | 
         
            +
            ---","{""id"": ""Hasani-ali-reza/Iran"", ""author"": ""Hasani-ali-reza"", ""sha"": ""2691864b9f384ebe9c1215b0e4abf97e5bcdfadf"", ""last_modified"": ""2025-01-28 11:13:54+00:00"", ""created_at"": ""2025-01-28 11:12:11+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""asteroid"", ""gguf"": null, ""inference"": null, ""tags"": [""asteroid"", ""biology"", ""question-answering"", ""dataset:fka/awesome-chatgpt-prompts"", ""base_model:hexgrad/Kokoro-82M"", ""base_model:finetune:hexgrad/Kokoro-82M"", ""license:openrail"", ""region:us""], ""pipeline_tag"": ""question-answering"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- hexgrad/Kokoro-82M\ndatasets:\n- fka/awesome-chatgpt-prompts\nlibrary_name: asteroid\nlicense: openrail\nmetrics:\n- bleu\npipeline_tag: question-answering\ntags:\n- biology\nnew_version: deepseek-ai/DeepSeek-R1"", ""widget_data"": [{""text"": ""Where do I live?"", ""context"": ""My name is Wolfgang and I live in Berlin""}, {""text"": ""Where do I live?"", ""context"": ""My name is Sarah and I live in London""}, {""text"": ""What's my name?"", ""context"": ""My name is Clara and I live in Berkeley.""}, {""text"": ""Which name is also used to describe the Amazon rainforest in English?"", ""context"": ""The Amazon rainforest (Portuguese: Floresta Amaz\u00f4nica or Amaz\u00f4nia; Spanish: Selva Amaz\u00f3nica, Amazon\u00eda or usually Amazonia; French: For\u00eat amazonienne; Dutch: Amazoneregenwoud), also known in English as Amazonia or the Amazon Jungle, is a moist broadleaf forest that covers most of the Amazon basin of South America. This basin encompasses 7,000,000 square kilometres (2,700,000 sq mi), of which 5,500,000 square kilometres (2,100,000 sq mi) are covered by the rainforest. This region includes territory belonging to nine nations. The majority of the forest is contained within Brazil, with 60% of the rainforest, followed by Peru with 13%, Colombia with 10%, and with minor amounts in Venezuela, Ecuador, Bolivia, Guyana, Suriname and French Guiana. States or departments in four nations contain \""Amazonas\"" in their names. The Amazon represents over half of the planet's remaining rainforests, and comprises the largest and most biodiverse tract of tropical rainforest in the world, with an estimated 390 billion individual trees divided into 16,000 species.""}], ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""lastModified"": ""2025-01-28 11:13:54+00:00"", ""cardData"": ""base_model:\n- hexgrad/Kokoro-82M\ndatasets:\n- fka/awesome-chatgpt-prompts\nlibrary_name: asteroid\nlicense: openrail\nmetrics:\n- bleu\npipeline_tag: question-answering\ntags:\n- biology\nnew_version: deepseek-ai/DeepSeek-R1"", ""transformersInfo"": null, ""_id"": ""6798bb8b39bf18bcca5b59e3"", ""modelId"": ""Hasani-ali-reza/Iran"", ""usedStorage"": 0}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=Hasani-ali-reza/Iran&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BHasani-ali-reza%2FIran%5D(%2FHasani-ali-reza%2FIran)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
         
     | 
| 379 | 
         
            +
            https://huggingface.co/Aicraftar/Tharo.G-Neo,N/A,N/A,1,,0,,0,,0,,0,,0
         
     | 
| 380 | 
         
            +
            maj666/test1,"---
         
     | 
| 381 | 
         
            +
            datasets:
         
     | 
| 382 | 
         
            +
            - fka/awesome-chatgpt-prompts
         
     | 
| 383 | 
         
            +
            language:
         
     | 
| 384 | 
         
            +
            - ar
         
     | 
| 385 | 
         
            +
            metrics:
         
     | 
| 386 | 
         
            +
            - bertscore
         
     | 
| 387 | 
         
            +
            base_model:
         
     | 
| 388 | 
         
            +
            - hexgrad/Kokoro-82M
         
     | 
| 389 | 
         
            +
            new_version: deepseek-ai/DeepSeek-R1
         
     | 
| 390 | 
         
            +
            pipeline_tag: video-text-to-text
         
     | 
| 391 | 
         
            +
            library_name: allennlp
         
     | 
| 392 | 
         
            +
            ---","{""id"": ""maj666/test1"", ""author"": ""maj666"", ""sha"": ""e828105ce54d949ca7d19772ababf26df8b28993"", ""last_modified"": ""2025-02-17 15:23:22+00:00"", ""created_at"": ""2025-02-17 15:18:19+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""allennlp"", ""gguf"": null, ""inference"": null, ""tags"": [""allennlp"", ""video-text-to-text"", ""ar"", ""dataset:fka/awesome-chatgpt-prompts"", ""base_model:hexgrad/Kokoro-82M"", ""base_model:finetune:hexgrad/Kokoro-82M"", ""region:us""], ""pipeline_tag"": ""video-text-to-text"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- hexgrad/Kokoro-82M\ndatasets:\n- fka/awesome-chatgpt-prompts\nlanguage:\n- ar\nlibrary_name: allennlp\nmetrics:\n- bertscore\npipeline_tag: video-text-to-text\nnew_version: deepseek-ai/DeepSeek-R1"", ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""lastModified"": ""2025-02-17 15:23:22+00:00"", ""cardData"": ""base_model:\n- hexgrad/Kokoro-82M\ndatasets:\n- fka/awesome-chatgpt-prompts\nlanguage:\n- ar\nlibrary_name: allennlp\nmetrics:\n- bertscore\npipeline_tag: video-text-to-text\nnew_version: deepseek-ai/DeepSeek-R1"", ""transformersInfo"": null, ""_id"": ""67b3533b730b84a4b58b57d2"", ""modelId"": ""maj666/test1"", ""usedStorage"": 0}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=maj666/test1&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bmaj666%2Ftest1%5D(%2Fmaj666%2Ftest1)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
         
     | 
| 393 | 
         
            +
            Princehart1234/Sonia,"---
         
     | 
| 394 | 
         
            +
            license: mit
         
     | 
| 395 | 
         
            +
            datasets:
         
     | 
| 396 | 
         
            +
            - fka/awesome-chatgpt-prompts
         
     | 
| 397 | 
         
            +
            language:
         
     | 
| 398 | 
         
            +
            - ak
         
     | 
| 399 | 
         
            +
            metrics:
         
     | 
| 400 | 
         
            +
            - bleurt
         
     | 
| 401 | 
         
            +
            base_model:
         
     | 
| 402 | 
         
            +
            - hexgrad/Kokoro-82M
         
     | 
| 403 | 
         
            +
            new_version: deepseek-ai/Janus-Pro-7B
         
     | 
| 404 | 
         
            +
            pipeline_tag: zero-shot-classification
         
     | 
| 405 | 
         
            +
            library_name: asteroid
         
     | 
| 406 | 
         
            +
            ---","{""id"": ""Princehart1234/Sonia"", ""author"": ""Princehart1234"", ""sha"": ""faa426d12f9a9ab91e84f02cf24bc4309583b715"", ""last_modified"": ""2025-02-21 11:58:14+00:00"", ""created_at"": ""2025-02-21 11:56:35+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""asteroid"", ""gguf"": null, ""inference"": null, ""tags"": [""asteroid"", ""zero-shot-classification"", ""ak"", ""dataset:fka/awesome-chatgpt-prompts"", ""base_model:hexgrad/Kokoro-82M"", ""base_model:finetune:hexgrad/Kokoro-82M"", ""license:mit"", ""region:us""], ""pipeline_tag"": ""zero-shot-classification"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- hexgrad/Kokoro-82M\ndatasets:\n- fka/awesome-chatgpt-prompts\nlanguage:\n- ak\nlibrary_name: asteroid\nlicense: mit\nmetrics:\n- bleurt\npipeline_tag: zero-shot-classification\nnew_version: deepseek-ai/Janus-Pro-7B"", ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""lastModified"": ""2025-02-21 11:58:14+00:00"", ""cardData"": ""base_model:\n- hexgrad/Kokoro-82M\ndatasets:\n- fka/awesome-chatgpt-prompts\nlanguage:\n- ak\nlibrary_name: asteroid\nlicense: mit\nmetrics:\n- bleurt\npipeline_tag: zero-shot-classification\nnew_version: deepseek-ai/Janus-Pro-7B"", ""transformersInfo"": null, ""_id"": ""67b869f3c5dfba0d121be754"", ""modelId"": ""Princehart1234/Sonia"", ""usedStorage"": 0}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=Princehart1234/Sonia&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BPrincehart1234%2FSonia%5D(%2FPrincehart1234%2FSonia)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
         
     | 
| 407 | 
         
            +
            umersattar/girl,"---
         
     | 
| 408 | 
         
            +
            license: artistic-2.0
         
     | 
| 409 | 
         
            +
            datasets:
         
     | 
| 410 | 
         
            +
            - open-thoughts/OpenThoughts-114k
         
     | 
| 411 | 
         
            +
            metrics:
         
     | 
| 412 | 
         
            +
            - character
         
     | 
| 413 | 
         
            +
            base_model:
         
     | 
| 414 | 
         
            +
            - hexgrad/Kokoro-82M
         
     | 
| 415 | 
         
            +
            new_version: deepseek-ai/DeepSeek-R1
         
     | 
| 416 | 
         
            +
            pipeline_tag: image-to-video
         
     | 
| 417 | 
         
            +
            library_name: flair
         
     | 
| 418 | 
         
            +
            tags:
         
     | 
| 419 | 
         
            +
            - music
         
     | 
| 420 | 
         
            +
            ---","{""id"": ""umersattar/girl"", ""author"": ""umersattar"", ""sha"": ""39a1d9c70b85aa443289fee51f9a82392f94e0cc"", ""last_modified"": ""2025-02-24 16:22:44+00:00"", ""created_at"": ""2025-02-24 16:19:18+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""flair"", ""gguf"": null, ""inference"": null, ""tags"": [""flair"", ""music"", ""image-to-video"", ""dataset:open-thoughts/OpenThoughts-114k"", ""base_model:hexgrad/Kokoro-82M"", ""base_model:finetune:hexgrad/Kokoro-82M"", ""license:artistic-2.0"", ""region:us""], ""pipeline_tag"": ""image-to-video"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- hexgrad/Kokoro-82M\ndatasets:\n- open-thoughts/OpenThoughts-114k\nlibrary_name: flair\nlicense: artistic-2.0\nmetrics:\n- character\npipeline_tag: image-to-video\ntags:\n- music\nnew_version: deepseek-ai/DeepSeek-R1"", ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""lastModified"": ""2025-02-24 16:22:44+00:00"", ""cardData"": ""base_model:\n- hexgrad/Kokoro-82M\ndatasets:\n- open-thoughts/OpenThoughts-114k\nlibrary_name: flair\nlicense: artistic-2.0\nmetrics:\n- character\npipeline_tag: image-to-video\ntags:\n- music\nnew_version: deepseek-ai/DeepSeek-R1"", ""transformersInfo"": null, ""_id"": ""67bc9c0687f197cb897f22ef"", ""modelId"": ""umersattar/girl"", ""usedStorage"": 0}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=umersattar/girl&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bumersattar%2Fgirl%5D(%2Fumersattar%2Fgirl)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
         
     | 
| 421 | 
         
            +
            David856412/David89675,"---
         
     | 
| 422 | 
         
            +
            datasets:
         
     | 
| 423 | 
         
            +
            - FreedomIntelligence/medical-o1-reasoning-SFT
         
     | 
| 424 | 
         
            +
            language:
         
     | 
| 425 | 
         
            +
            - ae
         
     | 
| 426 | 
         
            +
            metrics:
         
     | 
| 427 | 
         
            +
            - bleu
         
     | 
| 428 | 
         
            +
            base_model:
         
     | 
| 429 | 
         
            +
            - hexgrad/Kokoro-82M
         
     | 
| 430 | 
         
            +
            new_version: Wan-AI/Wan2.1-T2V-14B
         
     | 
| 431 | 
         
            +
            pipeline_tag: translation
         
     | 
| 432 | 
         
            +
            ---","{""id"": ""David856412/David89675"", ""author"": ""David856412"", ""sha"": ""437e4756d5d3dd8a8fdb02b35bb3b34ff589e4d6"", ""last_modified"": ""2025-03-04 02:12:50+00:00"", ""created_at"": ""2025-03-04 02:10:24+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": null, ""gguf"": null, ""inference"": null, ""tags"": [""translation"", ""ae"", ""dataset:FreedomIntelligence/medical-o1-reasoning-SFT"", ""base_model:hexgrad/Kokoro-82M"", ""base_model:finetune:hexgrad/Kokoro-82M"", ""region:us""], ""pipeline_tag"": ""translation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- hexgrad/Kokoro-82M\ndatasets:\n- FreedomIntelligence/medical-o1-reasoning-SFT\nlanguage:\n- ae\nmetrics:\n- bleu\npipeline_tag: translation\nnew_version: Wan-AI/Wan2.1-T2V-14B"", ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""lastModified"": ""2025-03-04 02:12:50+00:00"", ""cardData"": ""base_model:\n- hexgrad/Kokoro-82M\ndatasets:\n- FreedomIntelligence/medical-o1-reasoning-SFT\nlanguage:\n- ae\nmetrics:\n- bleu\npipeline_tag: translation\nnew_version: Wan-AI/Wan2.1-T2V-14B"", ""transformersInfo"": null, ""_id"": ""67c6611090a284952e34c619"", ""modelId"": ""David856412/David89675"", ""usedStorage"": 0}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=David856412/David89675&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BDavid856412%2FDavid89675%5D(%2FDavid856412%2FDavid89675)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
         
     | 
| 433 | 
         
            +
            crossyroadgame/unblocked,"---
         
     | 
| 434 | 
         
            +
            license: openrail
         
     | 
| 435 | 
         
            +
            datasets:
         
     | 
| 436 | 
         
            +
            - facebook/natural_reasoning
         
     | 
| 437 | 
         
            +
            language:
         
     | 
| 438 | 
         
            +
            - af
         
     | 
| 439 | 
         
            +
            metrics:
         
     | 
| 440 | 
         
            +
            - bertscore
         
     | 
| 441 | 
         
            +
            base_model:
         
     | 
| 442 | 
         
            +
            - hexgrad/Kokoro-82M
         
     | 
| 443 | 
         
            +
            new_version: perplexity-ai/r1-1776
         
     | 
| 444 | 
         
            +
            pipeline_tag: sentence-similarity
         
     | 
| 445 | 
         
            +
            library_name: flair
         
     | 
| 446 | 
         
            +
            tags:
         
     | 
| 447 | 
         
            +
            - not-for-all-audiences
         
     | 
| 448 | 
         
            +
            - code
         
     | 
| 449 | 
         
            +
            - legal
         
     | 
| 450 | 
         
            +
            ---","{""id"": ""crossyroadgame/unblocked"", ""author"": ""crossyroadgame"", ""sha"": ""79fcebe6cd5b99017bffa6b684676c1ea6a14774"", ""last_modified"": ""2025-03-04 08:13:15+00:00"", ""created_at"": ""2025-03-04 08:12:04+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""flair"", ""gguf"": null, ""inference"": null, ""tags"": [""flair"", ""not-for-all-audiences"", ""code"", ""legal"", ""sentence-similarity"", ""af"", ""dataset:facebook/natural_reasoning"", ""base_model:hexgrad/Kokoro-82M"", ""base_model:finetune:hexgrad/Kokoro-82M"", ""license:openrail"", ""region:us""], ""pipeline_tag"": ""sentence-similarity"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- hexgrad/Kokoro-82M\ndatasets:\n- facebook/natural_reasoning\nlanguage:\n- af\nlibrary_name: flair\nlicense: openrail\nmetrics:\n- bertscore\npipeline_tag: sentence-similarity\ntags:\n- not-for-all-audiences\n- code\n- legal\nnew_version: perplexity-ai/r1-1776"", ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""lastModified"": ""2025-03-04 08:13:15+00:00"", ""cardData"": ""base_model:\n- hexgrad/Kokoro-82M\ndatasets:\n- facebook/natural_reasoning\nlanguage:\n- af\nlibrary_name: flair\nlicense: openrail\nmetrics:\n- bertscore\npipeline_tag: sentence-similarity\ntags:\n- not-for-all-audiences\n- code\n- legal\nnew_version: perplexity-ai/r1-1776"", ""transformersInfo"": null, ""_id"": ""67c6b5d43f47007858d912c9"", ""modelId"": ""crossyroadgame/unblocked"", ""usedStorage"": 0}",1,,0,,0,,0,,0,,0
         
     | 
    	
        Llama-2-7b-chat-hf_finetunes_20250422_180448.csv
    ADDED
    
    | 
         The diff for this file is too large to render. 
		See raw diff 
     | 
| 
         | 
    	
        Llama-2-7b-chat-hf_finetunes_20250426_221535.csv_finetunes_20250426_221535.csv
    ADDED
    
    | 
         The diff for this file is too large to render. 
		See raw diff 
     | 
| 
         | 
    	
        Mistral-NeMo-12B-Instruct_finetunes_20250427_003734.csv_finetunes_20250427_003734.csv
    ADDED
    
    | 
         @@ -0,0 +1,70 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
         
     | 
| 2 | 
         
            +
            nvidia/Mistral-NeMo-12B-Instruct,"---
         
     | 
| 3 | 
         
            +
            license: apache-2.0
         
     | 
| 4 | 
         
            +
            tags:
         
     | 
| 5 | 
         
            +
            - nvidia
         
     | 
| 6 | 
         
            +
            ---
         
     | 
| 7 | 
         
            +
             
     | 
| 8 | 
         
            +
            ## Mistral-NeMo-12B-Instruct
         
     | 
| 9 | 
         
            +
             
     | 
| 10 | 
         
            +
            [](#model-architecture)[](#model-architecture)[](#datasets)
         
     | 
| 11 | 
         
            +
             
     | 
| 12 | 
         
            +
            ### Model Overview:
         
     | 
| 13 | 
         
            +
             
     | 
| 14 | 
         
            +
            Mistral-NeMo-12B-Instruct is a Large Language Model (LLM) composed of 12B parameters, trained jointly by NVIDIA and Mistral AI. It significantly outperforms existing models smaller or similar in size.
         
     | 
| 15 | 
         
            +
             
     | 
| 16 | 
         
            +
            **Key features**
         
     | 
| 17 | 
         
            +
            - Released under the Apache 2 License
         
     | 
| 18 | 
         
            +
            - Pre-trained and instructed versions
         
     | 
| 19 | 
         
            +
            - Trained with a 128k context window
         
     | 
| 20 | 
         
            +
            - Comes with a FP8 quantized version with no accuracy loss
         
     | 
| 21 | 
         
            +
            - Trained on a large proportion of multilingual and code data
         
     | 
| 22 | 
         
            +
             
     | 
| 23 | 
         
            +
            ### Intended use
         
     | 
| 24 | 
         
            +
             
     | 
| 25 | 
         
            +
            Mistral-NeMo-12B-Instruct is a chat model intended for use for the English language. 
         
     | 
| 26 | 
         
            +
             
     | 
| 27 | 
         
            +
            The instruct model itself can be further customized using the [NeMo Framework](https://docs.nvidia.com/nemo-framework/index.html) suite of customization tools including Parameter-Efficient Fine-Tuning (P-tuning, Adapters, LoRA, and more), and Model Alignment (SFT, SteerLM, RLHF, and more) using [NeMo-Aligner](https://github.com/NVIDIA/NeMo-Aligner).
         
     | 
| 28 | 
         
            +
             
     | 
| 29 | 
         
            +
            **Model Developer:** [NVIDIA](https://www.nvidia.com/en-us/) and [MistralAI](https://mistral.ai/)
         
     | 
| 30 | 
         
            +
             
     | 
| 31 | 
         
            +
            **Model Dates:** Mistral-NeMo-12B-Instruct was trained between June 2024 and July 2024.
         
     | 
| 32 | 
         
            +
             
     | 
| 33 | 
         
            +
            **Data Freshness:** The pretraining data has a cutoff of April 2024.
         
     | 
| 34 | 
         
            +
             
     | 
| 35 | 
         
            +
            **Transformers format:** https://huggingface.co/mistralai/Mistral-Nemo-Instruct-2407
         
     | 
| 36 | 
         
            +
             
     | 
| 37 | 
         
            +
            ### Model Architecture:
         
     | 
| 38 | 
         
            +
             
     | 
| 39 | 
         
            +
            Mistral-NeMo-12B-Instruct is a transformer model, with the following architecture choices:
         
     | 
| 40 | 
         
            +
             
     | 
| 41 | 
         
            +
            - Layers: 40
         
     | 
| 42 | 
         
            +
            - Dim: 5,120
         
     | 
| 43 | 
         
            +
            - Head dim: 128
         
     | 
| 44 | 
         
            +
            - Hidden dim: 14,436
         
     | 
| 45 | 
         
            +
            - Activation Function: SwiGLU
         
     | 
| 46 | 
         
            +
            - Number of heads: 32
         
     | 
| 47 | 
         
            +
            - Number of kv-heads: 8 (GQA)
         
     | 
| 48 | 
         
            +
            - Rotary embeddings (theta = 1M)
         
     | 
| 49 | 
         
            +
            - Vocabulary size: 2**17 ~= 128k
         
     | 
| 50 | 
         
            +
             
     | 
| 51 | 
         
            +
            **Architecture Type:** Transformer Decoder (auto-regressive language model)
         
     | 
| 52 | 
         
            +
             
     | 
| 53 | 
         
            +
            ### Evaluation Results
         
     | 
| 54 | 
         
            +
             
     | 
| 55 | 
         
            +
             
     | 
| 56 | 
         
            +
            - MT Bench (dev): 7.84
         
     | 
| 57 | 
         
            +
            - MixEval Hard: 0.534
         
     | 
| 58 | 
         
            +
            - IFEval-v5: 0.629
         
     | 
| 59 | 
         
            +
            - Wildbench: 42.57
         
     | 
| 60 | 
         
            +
             
     | 
| 61 | 
         
            +
            ### Limitations
         
     | 
| 62 | 
         
            +
             
     | 
| 63 | 
         
            +
            The model was trained on data that contains toxic language, unsafe content, and societal biases originally crawled from the internet. Therefore, the model may amplify those biases and return toxic responses especially when prompted with toxic prompts. The model may generate answers that may be inaccurate, omit key information, or include irrelevant or redundant text producing socially unacceptable or undesirable text, even if the prompt itself does not include anything explicitly offensive.
         
     | 
| 64 | 
         
            +
             
     | 
| 65 | 
         
            +
             
     | 
| 66 | 
         
            +
            ### Ethical Considerations
         
     | 
| 67 | 
         
            +
             
     | 
| 68 | 
         
            +
            NVIDIA believes Trustworthy AI is a shared responsibility and we have established policies and practices to enable development for a wide array of AI applications.  When downloaded or used in accordance with our terms of service, developers should work with their internal model team to ensure this model meets requirements for the relevant industry and use case and addresses unforeseen product misuse. Please report security vulnerabilities or NVIDIA AI Concerns [here](https://www.nvidia.com/en-us/support/submit-security-vulnerability/).
         
     | 
| 69 | 
         
            +
             
     | 
| 70 | 
         
            +
            ","{""id"": ""nvidia/Mistral-NeMo-12B-Instruct"", ""author"": ""nvidia"", ""sha"": ""d776f0fa6af3e9abd1de92b8a17cfbee7b072e46"", ""last_modified"": ""2024-07-18 15:35:02+00:00"", ""created_at"": ""2024-07-18 00:47:51+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 250, ""downloads_all_time"": null, ""likes"": 156, ""library_name"": ""nemo"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""nemo"", ""nvidia"", ""license:apache-2.0"", ""region:us""], ""pipeline_tag"": null, ""mask_token"": null, ""trending_score"": null, ""card_data"": ""license: apache-2.0\ntags:\n- nvidia"", ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Mistral-NeMo-12B-Instruct.nemo', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Mistral-NeMo-12B-tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-07-18 15:35:02+00:00"", ""cardData"": ""license: apache-2.0\ntags:\n- nvidia"", ""transformersInfo"": null, ""_id"": ""669866372ca9a02afe563164"", ""modelId"": ""nvidia/Mistral-NeMo-12B-Instruct"", ""usedStorage"": 49018220167}",0,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=nvidia/Mistral-NeMo-12B-Instruct&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bnvidia%2FMistral-NeMo-12B-Instruct%5D(%2Fnvidia%2FMistral-NeMo-12B-Instruct)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
         
     | 
    	
        Mistral-Trismegistus-7B_finetunes_20250426_171734.csv_finetunes_20250426_171734.csv
    ADDED
    
    | 
         @@ -0,0 +1,78 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
         
     | 
| 2 | 
         
            +
            teknium/Mistral-Trismegistus-7B,"---
         
     | 
| 3 | 
         
            +
            base_model: mistralai/Mistral-7B-v0.1
         
     | 
| 4 | 
         
            +
            tags:
         
     | 
| 5 | 
         
            +
            - mistral-7b
         
     | 
| 6 | 
         
            +
            - instruct
         
     | 
| 7 | 
         
            +
            - finetune
         
     | 
| 8 | 
         
            +
            - gpt4
         
     | 
| 9 | 
         
            +
            - synthetic data
         
     | 
| 10 | 
         
            +
            - distillation
         
     | 
| 11 | 
         
            +
            model-index:
         
     | 
| 12 | 
         
            +
            - name: Mistral-Trismegistus-7B
         
     | 
| 13 | 
         
            +
              results: []
         
     | 
| 14 | 
         
            +
            license: apache-2.0
         
     | 
| 15 | 
         
            +
            language:
         
     | 
| 16 | 
         
            +
            - en
         
     | 
| 17 | 
         
            +
            ---
         
     | 
| 18 | 
         
            +
            **Mistral Trismegistus 7B**
         
     | 
| 19 | 
         
            +
             
     | 
| 20 | 
         
            +
            <div style=""display: flex; justify-content: center;"">
         
     | 
| 21 | 
         
            +
              <img src=""https://cdn-uploads.huggingface.co/production/uploads/6317aade83d8d2fd903192d9/3VJvztFDB1XOWfShuHnb6.png"" alt=""Mistral Trismegistus"" width=""50%"" style=""display: block; margin: 0 auto;"">
         
     | 
| 22 | 
         
            +
            </div>
         
     | 
| 23 | 
         
            +
             
     | 
| 24 | 
         
            +
            ## Model Description:
         
     | 
| 25 | 
         
            +
             
     | 
| 26 | 
         
            +
            Transcendence is All You Need! Mistral Trismegistus is a model made for people interested in the esoteric, occult, and spiritual. 
         
     | 
| 27 | 
         
            +
             
     | 
| 28 | 
         
            +
            Here are some outputs:
         
     | 
| 29 | 
         
            +
             
     | 
| 30 | 
         
            +
            Answer questions about occult artifacts:
         
     | 
| 31 | 
         
            +
            
         
     | 
| 32 | 
         
            +
             
     | 
| 33 | 
         
            +
            Play the role of a hypnotist:
         
     | 
| 34 | 
         
            +
            
         
     | 
| 35 | 
         
            +
             
     | 
| 36 | 
         
            +
             
     | 
| 37 | 
         
            +
            ## Special Features:
         
     | 
| 38 | 
         
            +
            - **The First Powerful Occult Expert Model**: ~10,000 high quality, deep, rich, instructions on the occult, esoteric, and spiritual.
         
     | 
| 39 | 
         
            +
            - **Fast**: Trained on Mistral, a state of the art 7B parameter model, you can run this model FAST on even a cpu.
         
     | 
| 40 | 
         
            +
            - **Not a positivity-nazi**: This model was trained on all forms of esoteric tasks and knowledge, and is not burdened by the flowery nature of many other models, who chose positivity over creativity.
         
     | 
| 41 | 
         
            +
             
     | 
| 42 | 
         
            +
            ## Acknowledgements:
         
     | 
| 43 | 
         
            +
             
     | 
| 44 | 
         
            +
            Special thanks to @a16z.
         
     | 
| 45 | 
         
            +
             
     | 
| 46 | 
         
            +
            ## Dataset:
         
     | 
| 47 | 
         
            +
             
     | 
| 48 | 
         
            +
            This model was trained on a 100% synthetic, gpt-4 generated dataset, about ~10,000 examples, on a wide and diverse set of both tasks and knowledge about the esoteric, occult, and spiritual.
         
     | 
| 49 | 
         
            +
             
     | 
| 50 | 
         
            +
            The dataset will be released soon!
         
     | 
| 51 | 
         
            +
             
     | 
| 52 | 
         
            +
            ## Usage:
         
     | 
| 53 | 
         
            +
             
     | 
| 54 | 
         
            +
            Prompt Format:
         
     | 
| 55 | 
         
            +
            ```
         
     | 
| 56 | 
         
            +
            USER: <prompt>
         
     | 
| 57 | 
         
            +
            ASSISTANT:
         
     | 
| 58 | 
         
            +
            ```
         
     | 
| 59 | 
         
            +
            OR
         
     | 
| 60 | 
         
            +
            ```
         
     | 
| 61 | 
         
            +
            <system message>
         
     | 
| 62 | 
         
            +
            USER: <prompt>
         
     | 
| 63 | 
         
            +
            ASSISTANT:
         
     | 
| 64 | 
         
            +
            ```
         
     | 
| 65 | 
         
            +
             
     | 
| 66 | 
         
            +
            ## Benchmarks:
         
     | 
| 67 | 
         
            +
             
     | 
| 68 | 
         
            +
            No benchmark can capture the nature and essense of the quality of spirituality and esoteric knowledge and tasks. You will have to try testing it yourself!
         
     | 
| 69 | 
         
            +
             
     | 
| 70 | 
         
            +
            Training run on wandb here: https://wandb.ai/teknium1/occult-expert-mistral-7b/runs/coccult-expert-mistral-6/overview
         
     | 
| 71 | 
         
            +
             
     | 
| 72 | 
         
            +
            ## Licensing:
         
     | 
| 73 | 
         
            +
             
     | 
| 74 | 
         
            +
            Apache 2.0
         
     | 
| 75 | 
         
            +
             
     | 
| 76 | 
         
            +
            ---
         
     | 
| 77 | 
         
            +
             
     | 
| 78 | 
         
            +
            ","{""id"": ""teknium/Mistral-Trismegistus-7B"", ""author"": ""teknium"", ""sha"": ""ed4e3768713480eb977716d236df864f4fff63dc"", ""last_modified"": ""2023-11-12 08:42:23+00:00"", ""created_at"": ""2023-10-07 00:21:46+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 60, ""downloads_all_time"": null, ""likes"": 218, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""pytorch"", ""mistral"", ""text-generation"", ""mistral-7b"", ""instruct"", ""finetune"", ""gpt4"", ""synthetic data"", ""distillation"", ""en"", ""base_model:mistralai/Mistral-7B-v0.1"", ""base_model:finetune:mistralai/Mistral-7B-v0.1"", ""license:apache-2.0"", ""autotrain_compatible"", ""text-generation-inference"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: mistralai/Mistral-7B-v0.1\nlanguage:\n- en\nlicense: apache-2.0\ntags:\n- mistral-7b\n- instruct\n- finetune\n- gpt4\n- synthetic data\n- distillation\nmodel-index:\n- name: Mistral-Trismegistus-7B\n  results: []"", ""widget_data"": [{""text"": ""My name is Julien and I like to""}, {""text"": ""I like traveling by train because""}, {""text"": ""Paris is an amazing place to visit,""}, {""text"": ""Once upon a time,""}], ""model_index"": [{""name"": ""Mistral-Trismegistus-7B"", ""results"": []}], ""config"": {""architectures"": [""MistralForCausalLM""], ""model_type"": ""mistral"", ""tokenizer_config"": {""bos_token"": ""<s>"", ""eos_token"": ""</s>"", ""pad_token"": null, ""unk_token"": ""<unk>"", ""use_default_system_prompt"": true}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00001-of-00002.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00002-of-00002.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model.bin.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.model', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [""Libia/teknium-Mistral-Trismegistus-7B"", ""liangsu9988/teknium-Mistral-Trismegistus-7B"", ""Elias-Artista/teknium-Mistral-Trismegistus-7B"", ""Andrewxalexander/teknium-Mistral-Trismegistus-7B"", ""angelteng/teknium-Mistral-Trismegistus-7B"", ""angelteng/teknium-Mistral-Trismegistus-7B2"", ""de2/teknium-Mistral-Trismegistus-7B"", ""pooroligarch/Mistral-Trismegistus-7B"", ""Tiroff/teknium-Mistral-Trismegistus-7B"", ""nonyabidnus/teknium-Mistral-Trismegistus-7B"", ""happy610/teknium-Mistral-Trismegistus-7B"", ""promachina/teknium-Mistral-Trismegistus-7B"", ""TabLevitas/teknium-Mistral-Trismegistus-7B"", ""FreddieSpaghetti/teknium-Mistral-Trismegistus-7B"", ""ndehouche/teknium-Mistral-Trismegistus-7B"", ""MagicalAci/teknium-Mistral-Trismegistus-7B"", ""Rexzhang2023/teknium-Mistral-Trismegistus-7B"", ""billy123che/teknium-Mistral-Trismegistus-7B"", ""hk1730/teknium-Mistral-Trismegistus-7B"", ""Ron78/teknium-Mistral-Trismegistus-7B"", ""m-pajew-ski/teknium-Mistral-Trismegistus-7B"", ""MPLebron/teknium-Mistral-Trismegistus-7B"", ""luckli/teknium-Mistral-Trismegistus-7B"", ""aaddfff/teknium-Mistral-Trismegistus-7B"", ""timtimtimshen/teknium-Mistral-Trismegistus-7B"", ""sunnyzhu12/teknium-Mistral-Trismegistus-7B"", ""yinhou3/mt"", ""waytoAGI/teknium-Mistral-Trismegistus-7B"", ""ameliayi/teknium-Mistral-Trismegistus-7B"", ""KabutX/teknium-Mistral-Trismegistus-7B"", ""Lynch2023/teknium-Mistral-Trismegistus-7B"", ""Jackgelove/teknium-Mistral-Trismegistus-7B""], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2023-11-12 08:42:23+00:00"", ""cardData"": ""base_model: mistralai/Mistral-7B-v0.1\nlanguage:\n- en\nlicense: apache-2.0\ntags:\n- mistral-7b\n- instruct\n- finetune\n- gpt4\n- synthetic data\n- distillation\nmodel-index:\n- name: Mistral-Trismegistus-7B\n  results: []"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""_id"": ""6520a49a2a16045c0924ecea"", ""modelId"": ""teknium/Mistral-Trismegistus-7B"", ""usedStorage"": 28967558918}",0,,0,,0,"https://huggingface.co/TheBloke/Mistral-Trismegistus-7B-GGUF, https://huggingface.co/TheBloke/Mistral-Trismegistus-7B-GPTQ, https://huggingface.co/TheBloke/Mistral-Trismegistus-7B-AWQ, https://huggingface.co/DevQuasar/teknium.Mistral-Trismegistus-7B-GGUF",4,"https://huggingface.co/nbeerbower/Flammen-Trismegistus-7B, https://huggingface.co/Kukedlc/Trascendental-Bot-7B, https://huggingface.co/Aryanne/MixSwap, https://huggingface.co/xalkoi/Flammen-Trismegistus-7B-Q6_K-GGUF",4,"Andrewxalexander/teknium-Mistral-Trismegistus-7B, Elias-Artista/teknium-Mistral-Trismegistus-7B, Libia/teknium-Mistral-Trismegistus-7B, Tiroff/teknium-Mistral-Trismegistus-7B, angelteng/teknium-Mistral-Trismegistus-7B, angelteng/teknium-Mistral-Trismegistus-7B2, de2/teknium-Mistral-Trismegistus-7B, happy610/teknium-Mistral-Trismegistus-7B, huggingface/InferenceSupport/discussions/new?title=teknium/Mistral-Trismegistus-7B&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bteknium%2FMistral-Trismegistus-7B%5D(%2Fteknium%2FMistral-Trismegistus-7B)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, liangsu9988/teknium-Mistral-Trismegistus-7B, nonyabidnus/teknium-Mistral-Trismegistus-7B, pooroligarch/Mistral-Trismegistus-7B, promachina/teknium-Mistral-Trismegistus-7B",13
         
     | 
    	
        Molmo-7B-D-0924_finetunes_20250425_143346.csv_finetunes_20250425_143346.csv
    ADDED
    
    | 
         @@ -0,0 +1,211 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
         
     | 
| 2 | 
         
            +
            allenai/Molmo-7B-D-0924,"---
         
     | 
| 3 | 
         
            +
            license: apache-2.0
         
     | 
| 4 | 
         
            +
            language:
         
     | 
| 5 | 
         
            +
            - en
         
     | 
| 6 | 
         
            +
            base_model:
         
     | 
| 7 | 
         
            +
            - openai/clip-vit-large-patch14-336
         
     | 
| 8 | 
         
            +
            - Qwen/Qwen2-7B
         
     | 
| 9 | 
         
            +
            pipeline_tag: image-text-to-text
         
     | 
| 10 | 
         
            +
            tags:
         
     | 
| 11 | 
         
            +
            - multimodal
         
     | 
| 12 | 
         
            +
            - olmo
         
     | 
| 13 | 
         
            +
            - molmo
         
     | 
| 14 | 
         
            +
            - pixmo
         
     | 
| 15 | 
         
            +
            library_name: transformers
         
     | 
| 16 | 
         
            +
            ---
         
     | 
| 17 | 
         
            +
             
     | 
| 18 | 
         
            +
            <img src=""molmo_logo.png"" alt=""Logo for the Molmo Project"" style=""width: auto; height: 50px;"">
         
     | 
| 19 | 
         
            +
             
     | 
| 20 | 
         
            +
            # Molmo 7B-D
         
     | 
| 21 | 
         
            +
             
     | 
| 22 | 
         
            +
            Molmo is a family of open vision-language models developed by the Allen Institute for AI. Molmo models are trained on PixMo, a dataset of 1 million, highly-curated image-text pairs. It has state-of-the-art performance among multimodal models with a similar size while being fully open-source. You can find all models in the Molmo family [here](https://huggingface.co/collections/allenai/molmo-66f379e6fe3b8ef090a8ca19).
         
     | 
| 23 | 
         
            +
            **Learn more** about the Molmo family [in our announcement blog post](https://molmo.allenai.org/blog) or the [paper](https://huggingface.co/papers/2409.17146).
         
     | 
| 24 | 
         
            +
             
     | 
| 25 | 
         
            +
            Molmo 7B-D is based on [Qwen2-7B](https://huggingface.co/Qwen/Qwen2-7B) and uses [OpenAI CLIP](https://huggingface.co/openai/clip-vit-large-patch14-336) as vision backbone. 
         
     | 
| 26 | 
         
            +
            It performs comfortably between GPT-4V and GPT-4o on both academic benchmarks and human evaluation.
         
     | 
| 27 | 
         
            +
            It powers the **Molmo demo at** [**molmo.allenai.org**](https://molmo.allenai.org).
         
     | 
| 28 | 
         
            +
             
     | 
| 29 | 
         
            +
            This checkpoint is a **preview** of the Molmo release. All artifacts used in creating Molmo (PixMo dataset, training code, evaluations, intermediate checkpoints) will be made available at a later date, furthering our commitment to open-source AI development and reproducibility.
         
     | 
| 30 | 
         
            +
             
     | 
| 31 | 
         
            +
            [**Sign up here**](https://docs.google.com/forms/d/e/1FAIpQLSdML1MhNNBDsCHpgWG65Oydg2SjZzVasyqlP08nBrWjZp_c7A/viewform) to be the first to know when artifacts are released.
         
     | 
| 32 | 
         
            +
             
     | 
| 33 | 
         
            +
            Quick links:
         
     | 
| 34 | 
         
            +
            - 💬 [Demo](https://molmo.allenai.org/)
         
     | 
| 35 | 
         
            +
            - 📂 [All Models](https://huggingface.co/collections/allenai/molmo-66f379e6fe3b8ef090a8ca19)
         
     | 
| 36 | 
         
            +
            - 📃 [Paper](https://molmo.allenai.org/paper.pdf)
         
     | 
| 37 | 
         
            +
            - 🎥 [Blog with Videos](https://molmo.allenai.org/blog)
         
     | 
| 38 | 
         
            +
             
     | 
| 39 | 
         
            +
             
     | 
| 40 | 
         
            +
            ## Quick Start
         
     | 
| 41 | 
         
            +
             
     | 
| 42 | 
         
            +
            To run Molmo, first install dependencies:
         
     | 
| 43 | 
         
            +
             
     | 
| 44 | 
         
            +
            ```bash
         
     | 
| 45 | 
         
            +
            pip install einops torchvision
         
     | 
| 46 | 
         
            +
            ```
         
     | 
| 47 | 
         
            +
             
     | 
| 48 | 
         
            +
            Then, follow these steps:
         
     | 
| 49 | 
         
            +
             
     | 
| 50 | 
         
            +
            ```python
         
     | 
| 51 | 
         
            +
            from transformers import AutoModelForCausalLM, AutoProcessor, GenerationConfig
         
     | 
| 52 | 
         
            +
            from PIL import Image
         
     | 
| 53 | 
         
            +
            import requests
         
     | 
| 54 | 
         
            +
             
     | 
| 55 | 
         
            +
            # load the processor
         
     | 
| 56 | 
         
            +
            processor = AutoProcessor.from_pretrained(
         
     | 
| 57 | 
         
            +
                'allenai/Molmo-7B-D-0924',
         
     | 
| 58 | 
         
            +
                trust_remote_code=True,
         
     | 
| 59 | 
         
            +
                torch_dtype='auto',
         
     | 
| 60 | 
         
            +
                device_map='auto'
         
     | 
| 61 | 
         
            +
            )
         
     | 
| 62 | 
         
            +
             
     | 
| 63 | 
         
            +
            # load the model
         
     | 
| 64 | 
         
            +
            model = AutoModelForCausalLM.from_pretrained(
         
     | 
| 65 | 
         
            +
                'allenai/Molmo-7B-D-0924',
         
     | 
| 66 | 
         
            +
                trust_remote_code=True,
         
     | 
| 67 | 
         
            +
                torch_dtype='auto',
         
     | 
| 68 | 
         
            +
                device_map='auto'
         
     | 
| 69 | 
         
            +
            )
         
     | 
| 70 | 
         
            +
             
     | 
| 71 | 
         
            +
            # process the image and text
         
     | 
| 72 | 
         
            +
            inputs = processor.process(
         
     | 
| 73 | 
         
            +
                images=[Image.open(requests.get(""https://picsum.photos/id/237/536/354"", stream=True).raw)],
         
     | 
| 74 | 
         
            +
                text=""Describe this image.""
         
     | 
| 75 | 
         
            +
            )
         
     | 
| 76 | 
         
            +
             
     | 
| 77 | 
         
            +
            # move inputs to the correct device and make a batch of size 1
         
     | 
| 78 | 
         
            +
            inputs = {k: v.to(model.device).unsqueeze(0) for k, v in inputs.items()}
         
     | 
| 79 | 
         
            +
             
     | 
| 80 | 
         
            +
            # generate output; maximum 200 new tokens; stop generation when <|endoftext|> is generated
         
     | 
| 81 | 
         
            +
            output = model.generate_from_batch(
         
     | 
| 82 | 
         
            +
                inputs,
         
     | 
| 83 | 
         
            +
                GenerationConfig(max_new_tokens=200, stop_strings=""<|endoftext|>""),
         
     | 
| 84 | 
         
            +
                tokenizer=processor.tokenizer
         
     | 
| 85 | 
         
            +
            )
         
     | 
| 86 | 
         
            +
             
     | 
| 87 | 
         
            +
            # only get generated tokens; decode them to text
         
     | 
| 88 | 
         
            +
            generated_tokens = output[0,inputs['input_ids'].size(1):]
         
     | 
| 89 | 
         
            +
            generated_text = processor.tokenizer.decode(generated_tokens, skip_special_tokens=True)
         
     | 
| 90 | 
         
            +
             
     | 
| 91 | 
         
            +
            # print the generated text
         
     | 
| 92 | 
         
            +
            print(generated_text)
         
     | 
| 93 | 
         
            +
             
     | 
| 94 | 
         
            +
            # >>>  This image features an adorable black Labrador puppy, captured from a top-down
         
     | 
| 95 | 
         
            +
            #      perspective. The puppy is sitting on a wooden deck, which is composed ...
         
     | 
| 96 | 
         
            +
            ```
         
     | 
| 97 | 
         
            +
             
     | 
| 98 | 
         
            +
            To make inference more efficient, run with autocast:
         
     | 
| 99 | 
         
            +
             
     | 
| 100 | 
         
            +
            ```python
         
     | 
| 101 | 
         
            +
            with torch.autocast(device_type=""cuda"", enabled=True, dtype=torch.bfloat16):
         
     | 
| 102 | 
         
            +
              output = model.generate_from_batch(
         
     | 
| 103 | 
         
            +
                  inputs,
         
     | 
| 104 | 
         
            +
                  GenerationConfig(max_new_tokens=200, stop_strings=""<|endoftext|>""),
         
     | 
| 105 | 
         
            +
                  tokenizer=processor.tokenizer
         
     | 
| 106 | 
         
            +
              )
         
     | 
| 107 | 
         
            +
            ```
         
     | 
| 108 | 
         
            +
             
     | 
| 109 | 
         
            +
            We did most of our evaluation in this setting (autocast on, but float32 weights)
         
     | 
| 110 | 
         
            +
             
     | 
| 111 | 
         
            +
            To even further reduce the memory requirements, the model can be run with bfloat16 weights:
         
     | 
| 112 | 
         
            +
             
     | 
| 113 | 
         
            +
            ```python
         
     | 
| 114 | 
         
            +
            model.to(dtype=torch.bfloat16)
         
     | 
| 115 | 
         
            +
            inputs[""images""] = inputs[""images""].to(torch.bfloat16)
         
     | 
| 116 | 
         
            +
            output = model.generate_from_batch(
         
     | 
| 117 | 
         
            +
                inputs,
         
     | 
| 118 | 
         
            +
                GenerationConfig(max_new_tokens=200, stop_strings=""<|endoftext|>""),
         
     | 
| 119 | 
         
            +
                tokenizer=processor.tokenizer
         
     | 
| 120 | 
         
            +
            )
         
     | 
| 121 | 
         
            +
            ```
         
     | 
| 122 | 
         
            +
             
     | 
| 123 | 
         
            +
            Note that we have observed that this can change the output of the model compared to running with float32 weights.
         
     | 
| 124 | 
         
            +
             
     | 
| 125 | 
         
            +
            ## Evaluations 
         
     | 
| 126 | 
         
            +
             
     | 
| 127 | 
         
            +
            | Model                       | Average Score on 11 Academic Benchmarks | Human Preference Elo Rating |
         
     | 
| 128 | 
         
            +
            |-----------------------------|-----------------------------------------|-----------------------------|
         
     | 
| 129 | 
         
            +
            | Molmo 72B                   | 81.2                                    | 1077                        |
         
     | 
| 130 | 
         
            +
            | **Molmo 7B-D (this model)** | **77.3**                                | **1056**                    |
         
     | 
| 131 | 
         
            +
            | Molmo 7B-O                  | 74.6                                    | 1051                        |
         
     | 
| 132 | 
         
            +
            | MolmoE 1B                   | 68.6                                    | 1032                        |
         
     | 
| 133 | 
         
            +
            | GPT-4o                      | 78.5                                    | 1079                        |
         
     | 
| 134 | 
         
            +
            | GPT-4V                      | 71.1                                    | 1041                        |
         
     | 
| 135 | 
         
            +
            | Gemini 1.5 Pro              | 78.3                                    | 1074                        |
         
     | 
| 136 | 
         
            +
            | Gemini 1.5 Flash            | 75.1                                    | 1054                        |
         
     | 
| 137 | 
         
            +
            | Claude 3.5 Sonnet           | 76.7                                    | 1069                        |
         
     | 
| 138 | 
         
            +
            | Claude 3 Opus               | 66.4                                    |  971                        |
         
     | 
| 139 | 
         
            +
            | Claude 3 Haiku              | 65.3                                    |  999                        |
         
     | 
| 140 | 
         
            +
            | Qwen VL2 72B                | 79.4                                    | 1037                        |
         
     | 
| 141 | 
         
            +
            | Qwen VL2 7B                 | 73.7                                    | 1025                        |
         
     | 
| 142 | 
         
            +
            | Intern VL2 LLAMA 76B        | 77.1                                    | 1018                        |
         
     | 
| 143 | 
         
            +
            | Intern VL2 8B               | 69.4                                    |  953                        |
         
     | 
| 144 | 
         
            +
            | Pixtral 12B                 | 69.5                                    | 1016                        |
         
     | 
| 145 | 
         
            +
            | Phi3.5-Vision 4B            | 59.7                                    |  982                        |
         
     | 
| 146 | 
         
            +
            | PaliGemma 3B                | 50.0                                    |  937                        |
         
     | 
| 147 | 
         
            +
            | LLAVA OneVision 72B         | 76.6                                    | 1051                        |
         
     | 
| 148 | 
         
            +
            | LLAVA OneVision 7B          | 72.0                                    | 1024                        |
         
     | 
| 149 | 
         
            +
            | Cambrian-1 34B              | 66.8                                    |  953                        |
         
     | 
| 150 | 
         
            +
            | Cambrian-1 8B               | 63.4                                    |  952                        |
         
     | 
| 151 | 
         
            +
            | xGen - MM - Interleave 4B   | 59.5                                    |  979                        |
         
     | 
| 152 | 
         
            +
            | LLAVA-1.5 13B               | 43.9                                    |  960                        |
         
     | 
| 153 | 
         
            +
            | LLAVA-1.5 7B                | 40.7                                    |  951                        |
         
     | 
| 154 | 
         
            +
             
     | 
| 155 | 
         
            +
            *Benchmarks: AI2D test, ChartQA test, VQA v2.0 test, DocQA test, InfographicVQA test, TextVQA val, RealWorldQA, MMMU val, MathVista testmini, CountBenchQA, Flickr Count (we collected this new dataset that is significantly harder than CountBenchQA).*
         
     | 
| 156 | 
         
            +
             
     | 
| 157 | 
         
            +
            ## FAQs
         
     | 
| 158 | 
         
            +
             
     | 
| 159 | 
         
            +
            ### I'm getting an error a broadcast error when processing images!
         
     | 
| 160 | 
         
            +
             
     | 
| 161 | 
         
            +
            Your image might not be in RGB format. You can convert it using the following code snippet:
         
     | 
| 162 | 
         
            +
             
     | 
| 163 | 
         
            +
            ```python
         
     | 
| 164 | 
         
            +
            from PIL import Image
         
     | 
| 165 | 
         
            +
             
     | 
| 166 | 
         
            +
            image = Image.open(...)
         
     | 
| 167 | 
         
            +
             
     | 
| 168 | 
         
            +
            if image.mode != ""RGB"":
         
     | 
| 169 | 
         
            +
                image = image.convert(""RGB"")
         
     | 
| 170 | 
         
            +
            ```
         
     | 
| 171 | 
         
            +
             
     | 
| 172 | 
         
            +
            ### Molmo doesn't work great with transparent images!
         
     | 
| 173 | 
         
            +
             
     | 
| 174 | 
         
            +
            We received reports that Molmo models might struggle with transparent images. 
         
     | 
| 175 | 
         
            +
            For the time being, we recommend adding a white or dark background to your images before passing them to the model. The code snippet below shows how to do this using the Python Imaging Library (PIL):
         
     | 
| 176 | 
         
            +
             
     | 
| 177 | 
         
            +
            ```python
         
     | 
| 178 | 
         
            +
             
     | 
| 179 | 
         
            +
            # Load the image
         
     | 
| 180 | 
         
            +
            url = ""...""
         
     | 
| 181 | 
         
            +
            image = Image.open(requests.get(url, stream=True).raw)
         
     | 
| 182 | 
         
            +
             
     | 
| 183 | 
         
            +
            # Convert the image to grayscale to calculate brightness
         
     | 
| 184 | 
         
            +
            gray_image = image.convert('L')  # Convert to grayscale
         
     | 
| 185 | 
         
            +
             
     | 
| 186 | 
         
            +
            # Calculate the average brightness
         
     | 
| 187 | 
         
            +
            stat = ImageStat.Stat(gray_image)
         
     | 
| 188 | 
         
            +
            average_brightness = stat.mean[0]  # Get the average value
         
     | 
| 189 | 
         
            +
             
     | 
| 190 | 
         
            +
            # Define background color based on brightness (threshold can be adjusted)
         
     | 
| 191 | 
         
            +
            bg_color = (0, 0, 0) if average_brightness > 127 else (255, 255, 255)
         
     | 
| 192 | 
         
            +
             
     | 
| 193 | 
         
            +
            # Create a new image with the same size as the original, filled with the background color
         
     | 
| 194 | 
         
            +
            new_image = Image.new('RGB', image.size, bg_color)
         
     | 
| 195 | 
         
            +
             
     | 
| 196 | 
         
            +
            # Paste the original image on top of the background (use image as a mask if needed)
         
     | 
| 197 | 
         
            +
            new_image.paste(image, (0, 0), image if image.mode == 'RGBA' else None)
         
     | 
| 198 | 
         
            +
             
     | 
| 199 | 
         
            +
            # Now you can pass the new_image to Molmo
         
     | 
| 200 | 
         
            +
            processor = AutoProcessor.from_pretrained(
         
     | 
| 201 | 
         
            +
                'allenai/Molmo-7B-D-0924',
         
     | 
| 202 | 
         
            +
                trust_remote_code=True,
         
     | 
| 203 | 
         
            +
                torch_dtype='auto',
         
     | 
| 204 | 
         
            +
                device_map='auto'
         
     | 
| 205 | 
         
            +
            )
         
     | 
| 206 | 
         
            +
            ```
         
     | 
| 207 | 
         
            +
             
     | 
| 208 | 
         
            +
            ## License and Use
         
     | 
| 209 | 
         
            +
             
     | 
| 210 | 
         
            +
            This model is licensed under Apache 2.0. It is intended for research and educational use.
         
     | 
| 211 | 
         
            +
            For more information, please see our [Responsible Use Guidelines](https://allenai.org/responsible-use).","{""id"": ""allenai/Molmo-7B-D-0924"", ""author"": ""allenai"", ""sha"": ""ac032b93b84a7f10c9578ec59f9f20ee9a8990a2"", ""last_modified"": ""2025-04-04 20:01:44+00:00"", ""created_at"": ""2024-09-25 01:48:22+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 569560, ""downloads_all_time"": null, ""likes"": 524, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""molmo"", ""text-generation"", ""multimodal"", ""olmo"", ""pixmo"", ""image-text-to-text"", ""conversational"", ""custom_code"", ""en"", ""arxiv:2409.17146"", ""base_model:Qwen/Qwen2-7B"", ""base_model:finetune:Qwen/Qwen2-7B"", ""license:apache-2.0"", ""autotrain_compatible"", ""region:us""], ""pipeline_tag"": ""image-text-to-text"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- openai/clip-vit-large-patch14-336\n- Qwen/Qwen2-7B\nlanguage:\n- en\nlibrary_name: transformers\nlicense: apache-2.0\npipeline_tag: image-text-to-text\ntags:\n- multimodal\n- olmo\n- molmo\n- pixmo"", ""widget_data"": [{""text"": ""Hi, what can you help me with?""}, {""text"": ""What is 84 * 3 / 2?""}, {""text"": ""Tell me an interesting fact about the universe!""}, {""text"": ""Explain quantum computing in simple terms.""}], ""model_index"": null, ""config"": {""architectures"": [""MolmoForCausalLM""], ""auto_map"": {""AutoConfig"": ""config_molmo.MolmoConfig"", ""AutoModelForCausalLM"": ""modeling_molmo.MolmoForCausalLM""}, ""model_type"": ""molmo"", ""tokenizer_config"": {""bos_token"": null, ""chat_template"": ""{% for message in messages -%}\n        {%- if (loop.index % 2 == 1 and message['role'] != 'user') or \n          (loop.index % 2 == 0 and message['role'].lower() != 'assistant') -%}\n        {{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}\n        {%- endif -%}\n        {{ message['role'].capitalize() + ': ' + message['content'] }}\n        {%- if not loop.last -%}\n        {{ ' ' }}\n        {%- endif %}\n        {%- endfor -%}\n        {%- if add_generation_prompt -%}\n        {{ ' Assistant:' }}\n        {%- endif %}"", ""eos_token"": ""<|endoftext|>"", ""pad_token"": ""<|endoftext|>"", ""unk_token"": null}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": ""modeling_molmo.MolmoForCausalLM"", ""pipeline_tag"": ""text-generation"", ""processor"": null}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='added_tokens.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config_molmo.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='image_preprocessing_molmo.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='merges.txt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00001-of-00007.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00002-of-00007.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00003-of-00007.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00004-of-00007.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00005-of-00007.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00006-of-00007.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00007-of-00007.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='modeling_molmo.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='molmo_logo.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='preprocessing_molmo.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='preprocessor_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='processor_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vocab.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [""akhaliq/Molmo-7B-D-0924"", ""TIGER-Lab/MEGA-Bench"", ""KBaba7/Quant"", ""bhaskartripathi/LLM_Quantization"", ""totolook/Quant"", ""FallnAI/Quantize-HF-Models"", ""yasserrmd/MolmoVision"", ""sflindrs/vlm_comparer"", ""ruslanmv/convert_to_gguf"", ""DDDamon/test"", ""quarterturn/molmo-natural-language-image-captioner"", ""awacke1/Leaderboard-Deepseek-Gemini-Grok-GPT-Qwen"", ""sflindrs/Molmo-7B-D-0924-extended-tokens"", ""stoefln/playground2"", ""dkisb/Molmo-7B-D-0924"", ""gaur3009/ColPali-Query-Generator"", ""zainimam/Ss-mol"", ""TheVixhal/OPPE"", ""srawalll/quizzz"", ""vpssud/molmo2"", ""TLPython/MolmoVision"", ""cburtin/ocr"", ""mastercallum/MolmoVision-noCUDAerror"", ""brunocota/Molmo-7B-D-0924"", ""K00B404/LLM_Quantization""], ""safetensors"": {""parameters"": {""F32"": 8021025280}, ""total"": 8021025280}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-04-04 20:01:44+00:00"", ""cardData"": ""base_model:\n- openai/clip-vit-large-patch14-336\n- Qwen/Qwen2-7B\nlanguage:\n- en\nlibrary_name: transformers\nlicense: apache-2.0\npipeline_tag: image-text-to-text\ntags:\n- multimodal\n- olmo\n- molmo\n- pixmo"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": ""modeling_molmo.MolmoForCausalLM"", ""pipeline_tag"": ""text-generation"", ""processor"": null}, ""_id"": ""66f36be6af8609e9ef8198e5"", ""modelId"": ""allenai/Molmo-7B-D-0924"", ""usedStorage"": 35884012544}",0,,0,,0,"https://huggingface.co/cyan2k/molmo-7B-D-bnb-4bit, https://huggingface.co/impactframes/molmo-7B-D-bnb-4bit, https://huggingface.co/detect-tech/molmo-7B-D-bnb-4bit, https://huggingface.co/ctranslate2-4you/molmo-7B-D-0924-bnb-4bit, https://huggingface.co/OPEA/Molmo-7B-D-0924-int4-sym-inc, https://huggingface.co/Scoolar/Molmo-7B-D-0924-NF4",6,,0,"FallnAI/Quantize-HF-Models, K00B404/LLM_Quantization, KBaba7/Quant, TIGER-Lab/MEGA-Bench, akhaliq/Molmo-7B-D-0924, awacke1/Leaderboard-Deepseek-Gemini-Grok-GPT-Qwen, bhaskartripathi/LLM_Quantization, huggingface/InferenceSupport/discussions/new?title=allenai/Molmo-7B-D-0924&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Ballenai%2FMolmo-7B-D-0924%5D(%2Fallenai%2FMolmo-7B-D-0924)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, quarterturn/molmo-natural-language-image-captioner, ruslanmv/convert_to_gguf, sflindrs/vlm_comparer, totolook/Quant, yasserrmd/MolmoVision",13
         
     | 
    	
        NVLM-D-72B_finetunes_20250425_041137.csv_finetunes_20250425_041137.csv
    ADDED
    
    | 
         @@ -0,0 +1,732 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
         
     | 
| 2 | 
         
            +
            nvidia/NVLM-D-72B,"---
         
     | 
| 3 | 
         
            +
            license: cc-by-nc-4.0
         
     | 
| 4 | 
         
            +
            language:
         
     | 
| 5 | 
         
            +
            - en
         
     | 
| 6 | 
         
            +
            pipeline_tag: image-text-to-text
         
     | 
| 7 | 
         
            +
            tags:
         
     | 
| 8 | 
         
            +
            - nvidia
         
     | 
| 9 | 
         
            +
            - NVLM
         
     | 
| 10 | 
         
            +
            - pytorch
         
     | 
| 11 | 
         
            +
            - multimodal
         
     | 
| 12 | 
         
            +
            - conversational
         
     | 
| 13 | 
         
            +
            library_name: transformers
         
     | 
| 14 | 
         
            +
            ---
         
     | 
| 15 | 
         
            +
             
     | 
| 16 | 
         
            +
            <p align=""center"">
         
     | 
| 17 | 
         
            +
              <img src=""nvlm-logo-light.png"" alt=""Image Description"" width=""300"" >
         
     | 
| 18 | 
         
            +
            </p>
         
     | 
| 19 | 
         
            +
             
     | 
| 20 | 
         
            +
             
     | 
| 21 | 
         
            +
            # Model Overview
         
     | 
| 22 | 
         
            +
             
     | 
| 23 | 
         
            +
            ## Description
         
     | 
| 24 | 
         
            +
            This family of models performs vision-language and text-only tasks including optical character recognition, multimodal reasoning, localization, common sense reasoning, world knowledge utilization, and coding.
         
     | 
| 25 | 
         
            +
             
     | 
| 26 | 
         
            +
            This model is ready for non-commercial use.
         
     | 
| 27 | 
         
            +
             
     | 
| 28 | 
         
            +
            ## License/Terms of Use
         
     | 
| 29 | 
         
            +
             
     | 
| 30 | 
         
            +
            Governing Terms: Deed - [Attribution-NonCommercial 4.0 International - Creative Commons](https://creativecommons.org/licenses/by-nc/4.0/deed.en).
         
     | 
| 31 | 
         
            +
             
     | 
| 32 | 
         
            +
            Additional Information: [LICENSE · Qwen/Qwen2-72B-Instruct at main](https://huggingface.co/Qwen/Qwen2-72B-Instruct/blob/main/LICENSE) for Qwen2-72B-Instruct and [The MIT License – Open Source Initiative](https://opensource.org/license/mit) for InternViT-6B-448px-V1-2.
         
     | 
| 33 | 
         
            +
             
     | 
| 34 | 
         
            +
            # Model Details
         
     | 
| 35 | 
         
            +
             
     | 
| 36 | 
         
            +
            Today (September 17th, 2024), we introduce [NVLM 1.0](https://arxiv.org/abs/2409.11402), a family of frontier-class multimodal large language models (LLMs) that achieve state-of-the-art results on vision-language tasks, rivaling the leading proprietary models (e.g., GPT-4o) and open-access models (e.g., Llama 3-V 405B and InternVL 2). Remarkably, NVLM 1.0 shows improved text-only performance over its LLM backbone after multimodal training. 
         
     | 
| 37 | 
         
            +
             
     | 
| 38 | 
         
            +
            In this repo, we are open-sourcing NVLM-1.0-D-72B (decoder-only architecture), the decoder-only model weights and code for the community.
         
     | 
| 39 | 
         
            +
             
     | 
| 40 | 
         
            +
             
     | 
| 41 | 
         
            +
             
     | 
| 42 | 
         
            +
            ## Reference(s)
         
     | 
| 43 | 
         
            +
            [Paper](https://arxiv.org/abs/2409.11402)   [Inference Code (HF)](https://huggingface.co/nvidia/NVLM-D-72B/tree/main)   [Training Code](https://github.com/NVIDIA/Megatron-LM/tree/NVLM-1.0/examples/multimodal/nvlm)   [Website](https://research.nvidia.com/labs/adlr/NVLM-1/) 
         
     | 
| 44 | 
         
            +
             
     | 
| 45 | 
         
            +
            ## Benchmark Results
         
     | 
| 46 | 
         
            +
            We train our model with legacy [Megatron-LM](https://github.com/NVIDIA/Megatron-LM/tree/main/megatron/legacy) and adapt the codebase to Huggingface for model hosting, reproducibility, and inference.
         
     | 
| 47 | 
         
            +
            We observe numerical differences between the Megatron and Huggingface codebases, which are within the expected range of variation. 
         
     | 
| 48 | 
         
            +
            We provide the results from both the Huggingface codebase and the Megatron codebase for reproducibility and comparison with other models.
         
     | 
| 49 | 
         
            +
             
     | 
| 50 | 
         
            +
            Results (as of September 17th, 2024) in the multimodal benchmarks are as follows:
         
     | 
| 51 | 
         
            +
             
     | 
| 52 | 
         
            +
            ### Vision-language Benchmarks 
         
     | 
| 53 | 
         
            +
             
     | 
| 54 | 
         
            +
            | Benchmark                    | MMMU (val / test) | MathVista | OCRBench | AI2D | ChartQA | DocVQA | TextVQA | RealWorldQA | VQAv2 |
         
     | 
| 55 | 
         
            +
            |------------------------------|-------------------|-----------|----------|------|---------|--------|---------|-------------|-------|
         
     | 
| 56 | 
         
            +
            | NVLM-D 1.0 72B (Huggingface) | 58.7 / 54.9       | 65.2      | 852      | 94.2 | 86.0    | 92.6   | 82.6    | 69.5        | 85.4  |
         
     | 
| 57 | 
         
            +
            | NVLM-D 1.0 72B (Megatron)    | 59.7 / 54.6       | 65.2      | 853      | 94.2 | 86.0    | 92.6   | 82.1    | 69.7        | 85.4  |
         
     | 
| 58 | 
         
            +
            | Llama 3.2 90B                | 60.3 / -          | 57.3      | -        | 92.3 | 85.5    | 90.1   | -       | -           | 78.1  |
         
     | 
| 59 | 
         
            +
            | Llama 3-V 70B                | 60.6 / -          | -         | -        | 93.0 | 83.2    | 92.2   | 83.4    | -           | 79.1  |
         
     | 
| 60 | 
         
            +
            | Llama 3-V 405B               | 64.5 / -          | -         | -        | 94.1 | 85.8    | 92.6   | 84.8    | -           | 80.2  |
         
     | 
| 61 | 
         
            +
            | InternVL2-Llama3-76B         | 55.2 / -          | 65.5      | 839      | 94.8 | 88.4    | 94.1   | 84.4    | 72.2        | -     |
         
     | 
| 62 | 
         
            +
            | GPT-4V                       | 56.8 / 55.7       | 49.9      | 645      | 78.2 | 78.5    | 88.4   | 78.0    | 61.4        | 77.2  |
         
     | 
| 63 | 
         
            +
            | GPT-4o                       | 69.1 / -          | 63.8      | 736      | 94.2 | 85.7    | 92.8   | -       | -           | -     |
         
     | 
| 64 | 
         
            +
            | Claude 3.5 Sonnet            | 68.3 / -          | 67.7      | 788      | 94.7 | 90.8    | 95.2   | -       | -           | -     |
         
     | 
| 65 | 
         
            +
            | Gemini 1.5 Pro (Aug 2024)    | 62.2 / -          | 63.9      | 754      | 94.4 | 87.2    | 93.1   | 78.7    | 70.4        | 80.2  |
         
     | 
| 66 | 
         
            +
             
     | 
| 67 | 
         
            +
            ### Text-only Benchmarks
         
     | 
| 68 | 
         
            +
             
     | 
| 69 | 
         
            +
            | Tasks                        | Backbone LLM | MMLU | GSM8K | MATH | HumanEval | Avg. Accuracy    |
         
     | 
| 70 | 
         
            +
            |------------------------------|--------------|------|-------|------|-----------|------------------|
         
     | 
| 71 | 
         
            +
            | **Proprietary**              |              |      |       |      |           |                  |
         
     | 
| 72 | 
         
            +
            | GPT-4.0                      | N/A          | 88.7 | -     | 76.6 | 90.2      | -                |
         
     | 
| 73 | 
         
            +
            | Gemini Pro 1.5 (Aug 2024)    | N/A          | 85.9 | 90.8  | 67.7 | 84.1      | 82.1             |
         
     | 
| 74 | 
         
            +
            | Claude 3.5 Sonnet            | N/A          | 88.7 | 96.4  | 71.1 | 92.0      | 87.0             |
         
     | 
| 75 | 
         
            +
            | **Open LLM**                 |              |      |       |      |           |                  |
         
     | 
| 76 | 
         
            +
            | (a) Nous-Hermes-2-Yi-34B     | N/A          | 75.5 | 78.6  | 21.8 | 43.3      | 54.8             |
         
     | 
| 77 | 
         
            +
            | (b) Qwen-72B-Instruct        | N/A          | 82.3 | 91.1  | 59.7 | 86.0      | 79.8             |
         
     | 
| 78 | 
         
            +
            | (c) Llama-3-70B-Instruct     | N/A          | 82.0 | 93.0  | 51.0 | 81.7      | 76.6             |
         
     | 
| 79 | 
         
            +
            | (d) Llama-3.1-70B-Instruct   | N/A          | 83.6 | 95.1  | 68.0 | 80.5      | 81.8             |
         
     | 
| 80 | 
         
            +
            | (e) Llama-3.1-405B-Instruct  | N/A          | 87.3 | 96.8  | 73.8 | 89.0      | 86.7             |
         
     | 
| 81 | 
         
            +
            | **Open Multimodal LLM**      |              |      |       |      |           |                  |
         
     | 
| 82 | 
         
            +
            | VILA-1.5 40B                 | (a)          | 73.3 | 67.5  | 16.8 | 34.1      | 🥶 47.9   (-6.9) |
         
     | 
| 83 | 
         
            +
            | LLaVA-OneVision 72B          | (b)          | 80.6 | 89.9  | 49.2 | 74.4      | 🥶 73.5   (-6.3) |
         
     | 
| 84 | 
         
            +
            | InternVL-2-Llama3-76B        | (c)          | 78.5 | 87.1  | 42.5 | 71.3      | 🥶 69.9   (-6.7) |
         
     | 
| 85 | 
         
            +
            | *Llama 3-V 70B               | (d)          | 83.6 | 95.1  | 68.0 | 80.5      | 🙂 81.8   (0)    |
         
     | 
| 86 | 
         
            +
            | *Llama 3-V 405B              | (e)          | 87.3 | 96.8  | 73.8 | 89.0      | 🙂 86.7   (0)    |
         
     | 
| 87 | 
         
            +
            | NVLM-D 1.0 72B (Megatron)    | (b)          | 82.0 | 92.9  | 73.1 | 88.4      | 🥳 84.1   (+4.3) |
         
     | 
| 88 | 
         
            +
            | NVLM-D 1.0 72B (Huggingface) | (b)          | 81.7 | 93.2  | 73.1 | 89.0      | 🥳 84.3   (+4.5) |
         
     | 
| 89 | 
         
            +
             
     | 
| 90 | 
         
            +
             
     | 
| 91 | 
         
            +
            ## Model Architectures
         
     | 
| 92 | 
         
            +
             
     | 
| 93 | 
         
            +
            **Network Architecture:** Decoder-Only Transformer
         
     | 
| 94 | 
         
            +
             
     | 
| 95 | 
         
            +
            **Text-only LLM backbone:** [Qwen2-72B-Instruct](https://huggingface.co/Qwen/Qwen2-72B-Instruct)
         
     | 
| 96 | 
         
            +
             
     | 
| 97 | 
         
            +
            **Vision encoder:** [InternViT-6B](https://huggingface.co/OpenGVLab/InternViT-6B-448px-V1-2)
         
     | 
| 98 | 
         
            +
             
     | 
| 99 | 
         
            +
            ### Robustness
         
     | 
| 100 | 
         
            +
             
     | 
| 101 | 
         
            +
            The model trained on this dataset cannot regenerate its training data:
         
     | 
| 102 | 
         
            +
             
     | 
| 103 | 
         
            +
            1. The model has no image generation capability since its output is only text. Hence it cannot regenerate any image it would have seen during training.
         
     | 
| 104 | 
         
            +
             
     | 
| 105 | 
         
            +
            2. The model cannot regenerate training text data: during training, the model takes text and images as inputs, and the model output (text) is conditioned on both inputs. During inference, without training images as input, the models would not be able to reproduce any part of the training text data.
         
     | 
| 106 | 
         
            +
             
     | 
| 107 | 
         
            +
             
     | 
| 108 | 
         
            +
            ### Input
         
     | 
| 109 | 
         
            +
            **Input Type(s):** Text, Image <br>
         
     | 
| 110 | 
         
            +
            **Input Format(s):** String, [Pillow Library-Supported Formats](https://pillow.readthedocs.io/en/stable/handbook/image-file-formats.html) <br>
         
     | 
| 111 | 
         
            +
            **Input Dimensions:** One-Dimensional (1D), Two Dimensional (2D) <br>
         
     | 
| 112 | 
         
            +
            **Other Properties Related to Input:** Maximum Token Length = 128K Tokens <br>
         
     | 
| 113 | 
         
            +
             
     | 
| 114 | 
         
            +
            ### Output
         
     | 
| 115 | 
         
            +
            **Output Type(s):** Text <br>
         
     | 
| 116 | 
         
            +
            **Output Format:** String <br>
         
     | 
| 117 | 
         
            +
            **Model Output:** 1D <br>
         
     | 
| 118 | 
         
            +
            **Other Properties Related to Output:** None <br> 
         
     | 
| 119 | 
         
            +
             
     | 
| 120 | 
         
            +
            ## How to use
         
     | 
| 121 | 
         
            +
             
     | 
| 122 | 
         
            +
            When converting Megatron checkpoint to Huggingface, we adapt [InternVL codebase](https://huggingface.co/OpenGVLab/InternVL2-Llama3-76B) to support model loading and multi-GPU inference in HF. 
         
     | 
| 123 | 
         
            +
            We also use the tokenizer from [Qwen2.5-72B-Instruct](https://huggingface.co/Qwen/Qwen2.5-72B-Instruct/tree/main) when adapting the tokenizer to Huggingface, as it contains extra special tokens for vision tasks, e.g., `<|vision_pad|>`. 
         
     | 
| 124 | 
         
            +
            We train NVLM-1.0-D-72B based on the [Qwen2-72B-Instruct](https://huggingface.co/Qwen/Qwen2-72B-Instruct/tree/main) text-only model and [InternViT-6B-448px-V1-5](https://huggingface.co/OpenGVLab/InternViT-6B-448px-V1-5) ViT model with our large-scale high-quality multimodal dataset. 
         
     | 
| 125 | 
         
            +
            For training code, please refer to [Megatron-Core](https://github.com/NVIDIA/Megatron-LM/tree/NVLM-1.0/examples/multimodal/nvlm).
         
     | 
| 126 | 
         
            +
             
     | 
| 127 | 
         
            +
             
     | 
| 128 | 
         
            +
            ### Prepare the environment
         
     | 
| 129 | 
         
            +
             
     | 
| 130 | 
         
            +
            We provide a docker build file in the [Dockerfile](Dockerfile) for reproduction. 
         
     | 
| 131 | 
         
            +
             
     | 
| 132 | 
         
            +
            The docker image is based on `nvcr.io/nvidia/pytorch:23.09-py3`. 
         
     | 
| 133 | 
         
            +
             
     | 
| 134 | 
         
            +
            *Note: We observe that different transformer versions / CUDA versions / docker versions can lead to slight benchmark number differences. We recommend using the Dockerfile above for precise reproduction.*
         
     | 
| 135 | 
         
            +
             
     | 
| 136 | 
         
            +
            ### Model loading
         
     | 
| 137 | 
         
            +
             
     | 
| 138 | 
         
            +
            ```python
         
     | 
| 139 | 
         
            +
            import torch
         
     | 
| 140 | 
         
            +
            from transformers import AutoModel
         
     | 
| 141 | 
         
            +
             
     | 
| 142 | 
         
            +
            path = ""nvidia/NVLM-D-72B""
         
     | 
| 143 | 
         
            +
            model = AutoModel.from_pretrained(
         
     | 
| 144 | 
         
            +
                path,
         
     | 
| 145 | 
         
            +
                torch_dtype=torch.bfloat16,
         
     | 
| 146 | 
         
            +
                low_cpu_mem_usage=True,
         
     | 
| 147 | 
         
            +
                use_flash_attn=False,
         
     | 
| 148 | 
         
            +
                trust_remote_code=True).eval()
         
     | 
| 149 | 
         
            +
            ```
         
     | 
| 150 | 
         
            +
             
     | 
| 151 | 
         
            +
            ### Multiple GPUs
         
     | 
| 152 | 
         
            +
             
     | 
| 153 | 
         
            +
            The model can be loaded on multiple GPUs as follows:
         
     | 
| 154 | 
         
            +
             
     | 
| 155 | 
         
            +
            ```python
         
     | 
| 156 | 
         
            +
            import torch
         
     | 
| 157 | 
         
            +
            import math
         
     | 
| 158 | 
         
            +
            from transformers import AutoModel
         
     | 
| 159 | 
         
            +
             
     | 
| 160 | 
         
            +
            def split_model():
         
     | 
| 161 | 
         
            +
                device_map = {}
         
     | 
| 162 | 
         
            +
                world_size = torch.cuda.device_count()
         
     | 
| 163 | 
         
            +
                num_layers = 80
         
     | 
| 164 | 
         
            +
                # Since the first GPU will be used for ViT, treat it as half a GPU.
         
     | 
| 165 | 
         
            +
                num_layers_per_gpu = math.ceil(num_layers / (world_size - 0.5))
         
     | 
| 166 | 
         
            +
                num_layers_per_gpu = [num_layers_per_gpu] * world_size
         
     | 
| 167 | 
         
            +
                num_layers_per_gpu[0] = math.ceil(num_layers_per_gpu[0] * 0.5)
         
     | 
| 168 | 
         
            +
                layer_cnt = 0
         
     | 
| 169 | 
         
            +
                for i, num_layer in enumerate(num_layers_per_gpu):
         
     | 
| 170 | 
         
            +
                    for j in range(num_layer):
         
     | 
| 171 | 
         
            +
                        device_map[f'language_model.model.layers.{layer_cnt}'] = i
         
     | 
| 172 | 
         
            +
                        layer_cnt += 1
         
     | 
| 173 | 
         
            +
                device_map['vision_model'] = 0
         
     | 
| 174 | 
         
            +
                device_map['mlp1'] = 0
         
     | 
| 175 | 
         
            +
                device_map['language_model.model.tok_embeddings'] = 0
         
     | 
| 176 | 
         
            +
                device_map['language_model.model.embed_tokens'] = 0
         
     | 
| 177 | 
         
            +
                device_map['language_model.output'] = 0
         
     | 
| 178 | 
         
            +
                device_map['language_model.model.norm'] = 0
         
     | 
| 179 | 
         
            +
                device_map['language_model.lm_head'] = 0
         
     | 
| 180 | 
         
            +
                device_map['language_model.model.rotary_emb'] = 0
         
     | 
| 181 | 
         
            +
                device_map[f'language_model.model.layers.{num_layers - 1}'] = 0
         
     | 
| 182 | 
         
            +
             
     | 
| 183 | 
         
            +
                return device_map
         
     | 
| 184 | 
         
            +
             
     | 
| 185 | 
         
            +
            path = ""nvidia/NVLM-D-72B""
         
     | 
| 186 | 
         
            +
            device_map = split_model()
         
     | 
| 187 | 
         
            +
            model = AutoModel.from_pretrained(
         
     | 
| 188 | 
         
            +
                path,
         
     | 
| 189 | 
         
            +
                torch_dtype=torch.bfloat16,
         
     | 
| 190 | 
         
            +
                low_cpu_mem_usage=True,
         
     | 
| 191 | 
         
            +
                use_flash_attn=False,
         
     | 
| 192 | 
         
            +
                trust_remote_code=True,
         
     | 
| 193 | 
         
            +
                device_map=device_map).eval()
         
     | 
| 194 | 
         
            +
            ```
         
     | 
| 195 | 
         
            +
             
     | 
| 196 | 
         
            +
             
     | 
| 197 | 
         
            +
            ### Inference
         
     | 
| 198 | 
         
            +
             
     | 
| 199 | 
         
            +
            ```python
         
     | 
| 200 | 
         
            +
            import torch
         
     | 
| 201 | 
         
            +
            from transformers import AutoTokenizer, AutoModel
         
     | 
| 202 | 
         
            +
            import math
         
     | 
| 203 | 
         
            +
            from PIL import Image
         
     | 
| 204 | 
         
            +
            import torchvision.transforms as T
         
     | 
| 205 | 
         
            +
            from torchvision.transforms.functional import InterpolationMode
         
     | 
| 206 | 
         
            +
             
     | 
| 207 | 
         
            +
             
     | 
| 208 | 
         
            +
            def split_model():
         
     | 
| 209 | 
         
            +
                device_map = {}
         
     | 
| 210 | 
         
            +
                world_size = torch.cuda.device_count()
         
     | 
| 211 | 
         
            +
                num_layers = 80
         
     | 
| 212 | 
         
            +
                # Since the first GPU will be used for ViT, treat it as half a GPU.
         
     | 
| 213 | 
         
            +
                num_layers_per_gpu = math.ceil(num_layers / (world_size - 0.5))
         
     | 
| 214 | 
         
            +
                num_layers_per_gpu = [num_layers_per_gpu] * world_size
         
     | 
| 215 | 
         
            +
                num_layers_per_gpu[0] = math.ceil(num_layers_per_gpu[0] * 0.5)
         
     | 
| 216 | 
         
            +
                layer_cnt = 0
         
     | 
| 217 | 
         
            +
                for i, num_layer in enumerate(num_layers_per_gpu):
         
     | 
| 218 | 
         
            +
                    for j in range(num_layer):
         
     | 
| 219 | 
         
            +
                        device_map[f'language_model.model.layers.{layer_cnt}'] = i
         
     | 
| 220 | 
         
            +
                        layer_cnt += 1
         
     | 
| 221 | 
         
            +
                device_map['vision_model'] = 0
         
     | 
| 222 | 
         
            +
                device_map['mlp1'] = 0
         
     | 
| 223 | 
         
            +
                device_map['language_model.model.tok_embeddings'] = 0
         
     | 
| 224 | 
         
            +
                device_map['language_model.model.embed_tokens'] = 0
         
     | 
| 225 | 
         
            +
                device_map['language_model.output'] = 0
         
     | 
| 226 | 
         
            +
                device_map['language_model.model.norm'] = 0
         
     | 
| 227 | 
         
            +
                device_map['language_model.lm_head'] = 0
         
     | 
| 228 | 
         
            +
                device_map['language_model.model.rotary_emb'] = 0
         
     | 
| 229 | 
         
            +
                device_map[f'language_model.model.layers.{num_layers - 1}'] = 0
         
     | 
| 230 | 
         
            +
             
     | 
| 231 | 
         
            +
                return device_map
         
     | 
| 232 | 
         
            +
             
     | 
| 233 | 
         
            +
             
     | 
| 234 | 
         
            +
            IMAGENET_MEAN = (0.485, 0.456, 0.406)
         
     | 
| 235 | 
         
            +
            IMAGENET_STD = (0.229, 0.224, 0.225)
         
     | 
| 236 | 
         
            +
             
     | 
| 237 | 
         
            +
             
     | 
| 238 | 
         
            +
            def build_transform(input_size):
         
     | 
| 239 | 
         
            +
                MEAN, STD = IMAGENET_MEAN, IMAGENET_STD
         
     | 
| 240 | 
         
            +
                transform = T.Compose([
         
     | 
| 241 | 
         
            +
                    T.Lambda(lambda img: img.convert('RGB') if img.mode != 'RGB' else img),
         
     | 
| 242 | 
         
            +
                    T.Resize((input_size, input_size), interpolation=InterpolationMode.BICUBIC),
         
     | 
| 243 | 
         
            +
                    T.ToTensor(),
         
     | 
| 244 | 
         
            +
                    T.Normalize(mean=MEAN, std=STD)
         
     | 
| 245 | 
         
            +
                ])
         
     | 
| 246 | 
         
            +
                return transform
         
     | 
| 247 | 
         
            +
             
     | 
| 248 | 
         
            +
             
     | 
| 249 | 
         
            +
            def find_closest_aspect_ratio(aspect_ratio, target_ratios, width, height, image_size):
         
     | 
| 250 | 
         
            +
                best_ratio_diff = float('inf')
         
     | 
| 251 | 
         
            +
                best_ratio = (1, 1)
         
     | 
| 252 | 
         
            +
                area = width * height
         
     | 
| 253 | 
         
            +
                for ratio in target_ratios:
         
     | 
| 254 | 
         
            +
                    target_aspect_ratio = ratio[0] / ratio[1]
         
     | 
| 255 | 
         
            +
                    ratio_diff = abs(aspect_ratio - target_aspect_ratio)
         
     | 
| 256 | 
         
            +
                    if ratio_diff < best_ratio_diff:
         
     | 
| 257 | 
         
            +
                        best_ratio_diff = ratio_diff
         
     | 
| 258 | 
         
            +
                        best_ratio = ratio
         
     | 
| 259 | 
         
            +
                    elif ratio_diff == best_ratio_diff:
         
     | 
| 260 | 
         
            +
                        if area > 0.5 * image_size * image_size * ratio[0] * ratio[1]:
         
     | 
| 261 | 
         
            +
                            best_ratio = ratio
         
     | 
| 262 | 
         
            +
                return best_ratio
         
     | 
| 263 | 
         
            +
             
     | 
| 264 | 
         
            +
             
     | 
| 265 | 
         
            +
            def dynamic_preprocess(image, min_num=1, max_num=12, image_size=448, use_thumbnail=False):
         
     | 
| 266 | 
         
            +
                orig_width, orig_height = image.size
         
     | 
| 267 | 
         
            +
                aspect_ratio = orig_width / orig_height
         
     | 
| 268 | 
         
            +
             
     | 
| 269 | 
         
            +
                # calculate the existing image aspect ratio
         
     | 
| 270 | 
         
            +
                target_ratios = set(
         
     | 
| 271 | 
         
            +
                    (i, j) for n in range(min_num, max_num + 1) for i in range(1, n + 1) for j in range(1, n + 1) if
         
     | 
| 272 | 
         
            +
                    i * j <= max_num and i * j >= min_num)
         
     | 
| 273 | 
         
            +
                target_ratios = sorted(target_ratios, key=lambda x: x[0] * x[1])
         
     | 
| 274 | 
         
            +
             
     | 
| 275 | 
         
            +
                # find the closest aspect ratio to the target
         
     | 
| 276 | 
         
            +
                target_aspect_ratio = find_closest_aspect_ratio(
         
     | 
| 277 | 
         
            +
                    aspect_ratio, target_ratios, orig_width, orig_height, image_size)
         
     | 
| 278 | 
         
            +
             
     | 
| 279 | 
         
            +
                # calculate the target width and height
         
     | 
| 280 | 
         
            +
                target_width = image_size * target_aspect_ratio[0]
         
     | 
| 281 | 
         
            +
                target_height = image_size * target_aspect_ratio[1]
         
     | 
| 282 | 
         
            +
                blocks = target_aspect_ratio[0] * target_aspect_ratio[1]
         
     | 
| 283 | 
         
            +
             
     | 
| 284 | 
         
            +
                # resize the image
         
     | 
| 285 | 
         
            +
                resized_img = image.resize((target_width, target_height))
         
     | 
| 286 | 
         
            +
                processed_images = []
         
     | 
| 287 | 
         
            +
                for i in range(blocks):
         
     | 
| 288 | 
         
            +
                    box = (
         
     | 
| 289 | 
         
            +
                        (i % (target_width // image_size)) * image_size,
         
     | 
| 290 | 
         
            +
                        (i // (target_width // image_size)) * image_size,
         
     | 
| 291 | 
         
            +
                        ((i % (target_width // image_size)) + 1) * image_size,
         
     | 
| 292 | 
         
            +
                        ((i // (target_width // image_size)) + 1) * image_size
         
     | 
| 293 | 
         
            +
                    )
         
     | 
| 294 | 
         
            +
                    # split the image
         
     | 
| 295 | 
         
            +
                    split_img = resized_img.crop(box)
         
     | 
| 296 | 
         
            +
                    processed_images.append(split_img)
         
     | 
| 297 | 
         
            +
                assert len(processed_images) == blocks
         
     | 
| 298 | 
         
            +
                if use_thumbnail and len(processed_images) != 1:
         
     | 
| 299 | 
         
            +
                    thumbnail_img = image.resize((image_size, image_size))
         
     | 
| 300 | 
         
            +
                    processed_images.append(thumbnail_img)
         
     | 
| 301 | 
         
            +
                return processed_images
         
     | 
| 302 | 
         
            +
             
     | 
| 303 | 
         
            +
             
     | 
| 304 | 
         
            +
            def load_image(image_file, input_size=448, max_num=12):
         
     | 
| 305 | 
         
            +
                image = Image.open(image_file).convert('RGB')
         
     | 
| 306 | 
         
            +
                transform = build_transform(input_size=input_size)
         
     | 
| 307 | 
         
            +
                images = dynamic_preprocess(image, image_size=input_size, use_thumbnail=True, max_num=max_num)
         
     | 
| 308 | 
         
            +
                pixel_values = [transform(image) for image in images]
         
     | 
| 309 | 
         
            +
                pixel_values = torch.stack(pixel_values)
         
     | 
| 310 | 
         
            +
                return pixel_values
         
     | 
| 311 | 
         
            +
             
     | 
| 312 | 
         
            +
            path = ""nvidia/NVLM-D-72B""
         
     | 
| 313 | 
         
            +
            device_map = split_model()
         
     | 
| 314 | 
         
            +
            model = AutoModel.from_pretrained(
         
     | 
| 315 | 
         
            +
                path,
         
     | 
| 316 | 
         
            +
                torch_dtype=torch.bfloat16,
         
     | 
| 317 | 
         
            +
                low_cpu_mem_usage=True,
         
     | 
| 318 | 
         
            +
                use_flash_attn=False,
         
     | 
| 319 | 
         
            +
                trust_remote_code=True,
         
     | 
| 320 | 
         
            +
                device_map=device_map).eval()
         
     | 
| 321 | 
         
            +
             
     | 
| 322 | 
         
            +
            print(model)
         
     | 
| 323 | 
         
            +
             
     | 
| 324 | 
         
            +
            tokenizer = AutoTokenizer.from_pretrained(path, trust_remote_code=True, use_fast=False)
         
     | 
| 325 | 
         
            +
            generation_config = dict(max_new_tokens=1024, do_sample=False)
         
     | 
| 326 | 
         
            +
             
     | 
| 327 | 
         
            +
            # pure-text conversation
         
     | 
| 328 | 
         
            +
            question = 'Hello, who are you?'
         
     | 
| 329 | 
         
            +
            response, history = model.chat(tokenizer, None, question, generation_config, history=None, return_history=True)
         
     | 
| 330 | 
         
            +
            print(f'User: {question}\nAssistant: {response}')
         
     | 
| 331 | 
         
            +
             
     | 
| 332 | 
         
            +
            # single-image single-round conversation
         
     | 
| 333 | 
         
            +
            pixel_values = load_image('path/to/your/example/image.jpg', max_num=6).to(
         
     | 
| 334 | 
         
            +
                torch.bfloat16)
         
     | 
| 335 | 
         
            +
            question = '<image>\nPlease describe the image shortly.'
         
     | 
| 336 | 
         
            +
            response = model.chat(tokenizer, pixel_values, question, generation_config)
         
     | 
| 337 | 
         
            +
            print(f'User: {question}\nAssistant: {response}')
         
     | 
| 338 | 
         
            +
            ```
         
     | 
| 339 | 
         
            +
             
     | 
| 340 | 
         
            +
            ### Benchmark Evaluation
         
     | 
| 341 | 
         
            +
             
     | 
| 342 | 
         
            +
            To test our NVLM-1.0 model on the benchmark datasets, you can use the following code:
         
     | 
| 343 | 
         
            +
             
     | 
| 344 | 
         
            +
            ```bash
         
     | 
| 345 | 
         
            +
            python run_eval.py --config-path eval/full_eval.yaml \
         
     | 
| 346 | 
         
            +
             --result-save-path path/to/eval_results/ \
         
     | 
| 347 | 
         
            +
             --zero-shot-eval-tasks chartqa coco_caption flickr30k_caption vqav2 mmmu textvqa mathvista mmbench chartqa docvqa realworldqa ocrbench ai2diagram ai2diagram_nomask mmmu_pro docvqa_test
         
     | 
| 348 | 
         
            +
            ```
         
     | 
| 349 | 
         
            +
             
     | 
| 350 | 
         
            +
            Specifically,
         
     | 
| 351 | 
         
            +
            - `--config-path eval/full_eval.yaml` file contains the evaluation configurations, including  the evaluation prompt, the evaluation dataset paths, and generation hyper-parameters.
         
     | 
| 352 | 
         
            +
            - `--result-save-path path/to/eval_results/` specifies the path to save the evaluation results.
         
     | 
| 353 | 
         
            +
            - `--zero-shot-eval-tasks` specifies the tasks to evaluate on.
         
     | 
| 354 | 
         
            +
             
     | 
| 355 | 
         
            +
             
     | 
| 356 | 
         
            +
            ## Software Integration
         
     | 
| 357 | 
         
            +
            **Runtime Engine(s)** 
         
     | 
| 358 | 
         
            +
            * PyTorch <br>
         
     | 
| 359 | 
         
            +
             
     | 
| 360 | 
         
            +
            **Supported Hardware Microarchitecture Compatibility:** <br>
         
     | 
| 361 | 
         
            +
            * NVIDIA Hopper <br>
         
     | 
| 362 | 
         
            +
             
     | 
| 363 | 
         
            +
            **[Preferred/Supported] Operating System(s):** <br>
         
     | 
| 364 | 
         
            +
            * Linux <br>
         
     | 
| 365 | 
         
            +
             
     | 
| 366 | 
         
            +
            ## Inference
         
     | 
| 367 | 
         
            +
            **Engine:** PyTorch <br>
         
     | 
| 368 | 
         
            +
            **Test Hardware:** <br>
         
     | 
| 369 | 
         
            +
            * H100 <br>
         
     | 
| 370 | 
         
            +
             
     | 
| 371 | 
         
            +
            ## Model Version(s)
         
     | 
| 372 | 
         
            +
            * v1.0-D (NVLM-D)
         
     | 
| 373 | 
         
            +
             
     | 
| 374 | 
         
            +
            ## Training, Testing, and Evaluation Datasets 
         
     | 
| 375 | 
         
            +
             
     | 
| 376 | 
         
            +
            ### Pre-Training Dataset
         
     | 
| 377 | 
         
            +
             
     | 
| 378 | 
         
            +
            **Link** <br>
         
     | 
| 379 | 
         
            +
            * [See Table 4](https://arxiv.org/abs/2409.11402) <br>
         
     | 
| 380 | 
         
            +
             
     | 
| 381 | 
         
            +
            **Data Collection Method by dataset** <br>
         
     | 
| 382 | 
         
            +
            * Hybrid: Automated, Human, Synthetic, Unknown <br>
         
     | 
| 383 | 
         
            +
             
     | 
| 384 | 
         
            +
            **Labeling Method by dataset** <br>
         
     | 
| 385 | 
         
            +
            * Hybrid: Automated, Human, Synthetic, Unknown <br>
         
     | 
| 386 | 
         
            +
             
     | 
| 387 | 
         
            +
            **Properties** 
         
     | 
| 388 | 
         
            +
            * Trained on image captions, image-text pairs, natural images, charts, documents, scene descriptions, and mathematical reasoning. <br>
         
     | 
| 389 | 
         
            +
             
     | 
| 390 | 
         
            +
            ### Supervised Fine-Tuning Dataset
         
     | 
| 391 | 
         
            +
            **Link** <br>
         
     | 
| 392 | 
         
            +
            * [See Table 6](https://arxiv.org/abs/2409.11402) <br>
         
     | 
| 393 | 
         
            +
             
     | 
| 394 | 
         
            +
            **Data Collection Method by dataset** <br>
         
     | 
| 395 | 
         
            +
            * Hybrid: Automated, Human, Synthetic, Unknown <br>
         
     | 
| 396 | 
         
            +
             
     | 
| 397 | 
         
            +
            **Labeling Method by dataset** <br>
         
     | 
| 398 | 
         
            +
            * Hybrid: Automated, Human, Synthetic, Unknown <br>
         
     | 
| 399 | 
         
            +
             
     | 
| 400 | 
         
            +
            **Properties** 
         
     | 
| 401 | 
         
            +
            * Trained on image captions; general knowledge; image-text pairs; natural images; charts; diagrams; documents; scene descriptions; science diagrams, lessons, textbook data, and question-answer pairs; visual instruction tuning; and mathematical reasoning. <br>
         
     | 
| 402 | 
         
            +
             
     | 
| 403 | 
         
            +
            ### Evaluation Dataset
         
     | 
| 404 | 
         
            +
            **Link** <br>
         
     | 
| 405 | 
         
            +
            * [See Section 6.1, ""Benchmark""](https://arxiv.org/abs/2409.11402) <br>
         
     | 
| 406 | 
         
            +
             
     | 
| 407 | 
         
            +
            **Data collection method by dataset** <br>
         
     | 
| 408 | 
         
            +
            * Human <br>
         
     | 
| 409 | 
         
            +
             
     | 
| 410 | 
         
            +
            **Labeling method by dataset** <br>
         
     | 
| 411 | 
         
            +
            * Human <br>
         
     | 
| 412 | 
         
            +
             
     | 
| 413 | 
         
            +
            **Properties** <br>
         
     | 
| 414 | 
         
            +
            * Evaluated on general knowledge, visual answering, chart understanding, table, optical character recognition, and mathematical reasoning. <br> 
         
     | 
| 415 | 
         
            +
             
     | 
| 416 | 
         
            +
             
     | 
| 417 | 
         
            +
            ## Correspondence to
         
     | 
| 418 | 
         
            +
            Wenliang Dai* (wdai@nvidia.com), Nayeon Lee* (nayeonl@nvidia.com), Boxin Wang* (boxinw@nvidia.com), Zhuolin Yang* (zhuoliny@nvidia.com), Wei Ping* (wping@nvidia.com)
         
     | 
| 419 | 
         
            +
             
     | 
| 420 | 
         
            +
            *Equal contribution
         
     | 
| 421 | 
         
            +
             
     | 
| 422 | 
         
            +
            ## Citation
         
     | 
| 423 | 
         
            +
            <pre>
         
     | 
| 424 | 
         
            +
            @article{nvlm2024,
         
     | 
| 425 | 
         
            +
              title={NVLM: Open Frontier-Class Multimodal LLMs},
         
     | 
| 426 | 
         
            +
              author={Dai, Wenliang and Lee, Nayeon and Wang, Boxin and Yang, Zhuolin and Liu, Zihan and Barker, Jon and Rintamaki, Tuomas and Shoeybi, Mohammad and Catanzaro, Bryan and Ping, Wei},
         
     | 
| 427 | 
         
            +
              journal={arXiv preprint},
         
     | 
| 428 | 
         
            +
              year={2024}}
         
     | 
| 429 | 
         
            +
            </pre>
         
     | 
| 430 | 
         
            +
             
     | 
| 431 | 
         
            +
             
     | 
| 432 | 
         
            +
            ## Ethical Considerations
         
     | 
| 433 | 
         
            +
            NVIDIA believes Trustworthy AI is a shared responsibility and we have established policies and practices to enable development for a wide array of AI applications.  When downloaded or used in accordance with our terms of service, developers should work with their supporting model team to ensure this model meets requirements for the relevant industry and use case and addresses unforeseen product misuse. 
         
     | 
| 434 | 
         
            +
             
     | 
| 435 | 
         
            +
            Please report security vulnerabilities or NVIDIA AI Concerns [here](https://www.nvidia.com/en-us/support/submit-security-vulnerability/).
         
     | 
| 436 | 
         
            +
             ","{""id"": ""nvidia/NVLM-D-72B"", ""author"": ""nvidia"", ""sha"": ""11e9e0f8d265d65ddaa8f9445b6f55fdec9d686f"", ""last_modified"": ""2025-01-14 18:34:52+00:00"", ""created_at"": ""2024-09-30 18:24:49+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 15011, ""downloads_all_time"": null, ""likes"": 769, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""NVLM_D"", ""nvidia"", ""NVLM"", ""pytorch"", ""multimodal"", ""conversational"", ""image-text-to-text"", ""custom_code"", ""en"", ""arxiv:2409.11402"", ""license:cc-by-nc-4.0"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""image-text-to-text"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""language:\n- en\nlibrary_name: transformers\nlicense: cc-by-nc-4.0\npipeline_tag: image-text-to-text\ntags:\n- nvidia\n- NVLM\n- pytorch\n- multimodal\n- conversational"", ""widget_data"": [{""text"": ""Hi, what can you help me with?""}, {""text"": ""What is 84 * 3 / 2?""}, {""text"": ""Tell me an interesting fact about the universe!""}, {""text"": ""Explain quantum computing in simple terms.""}], ""model_index"": null, ""config"": {""architectures"": [""NVLM_D""], ""auto_map"": {""AutoConfig"": ""configuration_nvlm_d.NVLM_D_Config"", ""AutoModel"": ""modeling_nvlm_d.NVLM_D_Model"", ""AutoModelForCausalLM"": ""modeling_nvlm_d.NVLM_D_Model""}, ""model_type"": ""NVLM_D"", ""tokenizer_config"": {""bos_token"": null, ""chat_template"": ""{%- if tools %}\n    {{- '<|im_start|>system\\n' }}\n    {%- if messages[0]['role'] == 'system' %}\n        {{- messages[0]['content'] }}\n    {%- else %}\n        {{- 'You are Qwen, created by Alibaba Cloud. You are a helpful assistant.' }}\n    {%- endif %}\n    {{- \""\\n\\n# Tools\\n\\nYou may call one or more functions to assist with the user query.\\n\\nYou are provided with function signatures within <tools></tools> XML tags:\\n<tools>\"" }}\n    {%- for tool in tools %}\n        {{- \""\\n\"" }}\n        {{- tool | tojson }}\n    {%- endfor %}\n    {{- \""\\n</tools>\\n\\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\\n<tool_call>\\n{\\\""name\\\"": <function-name>, \\\""arguments\\\"": <args-json-object>}\\n</tool_call><|im_end|>\\n\"" }}\n{%- else %}\n    {%- if messages[0]['role'] == 'system' %}\n        {{- '<|im_start|>system\\n' + messages[0]['content'] + '<|im_end|>\\n' }}\n    {%- else %}\n        {{- '<|im_start|>system\\nYou are Qwen, created by Alibaba Cloud. You are a helpful assistant.<|im_end|>\\n' }}\n    {%- endif %}\n{%- endif %}\n{%- for message in messages %}\n    {%- if (message.role == \""user\"") or (message.role == \""system\"" and not loop.first) or (message.role == \""assistant\"" and not message.tool_calls) %}\n        {{- '<|im_start|>' + message.role + '\\n' + message.content + '<|im_end|>' + '\\n' }}\n    {%- elif message.role == \""assistant\"" %}\n        {{- '<|im_start|>' + message.role }}\n        {%- if message.content %}\n            {{- '\\n' + message.content }}\n        {%- endif %}\n        {%- for tool_call in message.tool_calls %}\n            {%- if tool_call.function is defined %}\n                {%- set tool_call = tool_call.function %}\n            {%- endif %}\n            {{- '\\n<tool_call>\\n{\""name\"": \""' }}\n            {{- tool_call.name }}\n            {{- '\"", \""arguments\"": ' }}\n            {{- tool_call.arguments | tojson }}\n            {{- '}\\n</tool_call>' }}\n        {%- endfor %}\n        {{- '<|im_end|>\\n' }}\n    {%- elif message.role == \""tool\"" %}\n        {%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != \""tool\"") %}\n            {{- '<|im_start|>user' }}\n        {%- endif %}\n        {{- '\\n<tool_response>\\n' }}\n        {{- message.content }}\n        {{- '\\n</tool_response>' }}\n        {%- if loop.last or (messages[loop.index0 + 1].role != \""tool\"") %}\n            {{- '<|im_end|>\\n' }}\n        {%- endif %}\n    {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n    {{- '<|im_start|>assistant\\n' }}\n{%- endif %}\n"", ""eos_token"": ""<|im_end|>"", ""pad_token"": ""<|endoftext|>"", ""unk_token"": null}}, ""transformers_info"": {""auto_model"": ""NVLM_D"", ""custom_class"": null, ""pipeline_tag"": null, ""processor"": null}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Dockerfile', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Notice.txt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='configuration_intern_vit.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='configuration_nvlm_d.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='conversation.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='eval/conversation.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='eval/eval_dataset.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='eval/full_eval.yaml', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='eval/mmmu_utils.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='eval/requirements.txt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='eval/vqa_utils.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='incl_licenses/LICENSE', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='incl_licenses/LICENSE_2', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='merges.txt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00001-of-00046.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00002-of-00046.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00003-of-00046.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00004-of-00046.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00005-of-00046.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00006-of-00046.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00007-of-00046.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00008-of-00046.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00009-of-00046.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00010-of-00046.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00011-of-00046.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00012-of-00046.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00013-of-00046.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00014-of-00046.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00015-of-00046.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00016-of-00046.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00017-of-00046.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00018-of-00046.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00019-of-00046.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00020-of-00046.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00021-of-00046.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00022-of-00046.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00023-of-00046.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00024-of-00046.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00025-of-00046.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00026-of-00046.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00027-of-00046.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00028-of-00046.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00029-of-00046.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00030-of-00046.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00031-of-00046.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00032-of-00046.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00033-of-00046.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00034-of-00046.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00035-of-00046.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00036-of-00046.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00037-of-00046.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00038-of-00046.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00039-of-00046.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00040-of-00046.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00041-of-00046.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00042-of-00046.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00043-of-00046.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00044-of-00046.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00045-of-00046.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00046-of-00046.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='modeling_intern_vit.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='modeling_nvlm_d.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='nvlm-logo-light.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='preprocessor_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='run_eval.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vocab.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [""merve/vision_papers"", ""TIGER-Lab/MEGA-Bench"", ""awacke1/Leaderboard-Deepseek-Gemini-Grok-GPT-Qwen"", ""Leo8613/AI_Nvidia"", ""Abhi-22/NVLM-D-Demo""], ""safetensors"": {""parameters"": {""F32"": 9071370240, ""BF16"": 70308223104}, ""total"": 79379593344}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-01-14 18:34:52+00:00"", ""cardData"": ""language:\n- en\nlibrary_name: transformers\nlicense: cc-by-nc-4.0\npipeline_tag: image-text-to-text\ntags:\n- nvidia\n- NVLM\n- pytorch\n- multimodal\n- conversational"", ""transformersInfo"": {""auto_model"": ""NVLM_D"", ""custom_class"": null, ""pipeline_tag"": null, ""processor"": null}, ""_id"": ""66faecf1a15bc0b3f0c55924"", ""modelId"": ""nvidia/NVLM-D-72B"", ""usedStorage"": 354049622570}",0,"https://huggingface.co/peter0000/newrobot, https://huggingface.co/shriramprabhu/final, https://huggingface.co/renia2954/chatbot, https://huggingface.co/A1wayzbeenrea1/New_one, https://huggingface.co/AdelfelteR/Harold, https://huggingface.co/PIOTRsus123/BUMCYKCYK, https://huggingface.co/Arunic/Arunia, https://huggingface.co/laligaa/365, https://huggingface.co/midhyaraj/voiceclone, https://huggingface.co/CapitaLeny/ShadowBoy, https://huggingface.co/hussam05/yolov8",11,https://huggingface.co/PrakashReddy77/NLP,1,"https://huggingface.co/SeanScripts/NVLM-D-72B-nf4, https://huggingface.co/nm-testing/NVLM-D-72B-FP8-dynamic",2,https://huggingface.co/bkbj/Atest,1,"Abhi-22/NVLM-D-Demo, Leo8613/AI_Nvidia, TIGER-Lab/MEGA-Bench, awacke1/Leaderboard-Deepseek-Gemini-Grok-GPT-Qwen, huggingface/InferenceSupport/discussions/new?title=nvidia/NVLM-D-72B&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bnvidia%2FNVLM-D-72B%5D(%2Fnvidia%2FNVLM-D-72B)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, merve/vision_papers",6
         
     | 
| 437 | 
         
            +
            peter0000/newrobot,"---
         
     | 
| 438 | 
         
            +
            license: openrail
         
     | 
| 439 | 
         
            +
            datasets:
         
     | 
| 440 | 
         
            +
            - openai/MMMLU
         
     | 
| 441 | 
         
            +
            language:
         
     | 
| 442 | 
         
            +
            - aa
         
     | 
| 443 | 
         
            +
            metrics:
         
     | 
| 444 | 
         
            +
            - character
         
     | 
| 445 | 
         
            +
            base_model:
         
     | 
| 446 | 
         
            +
            - nvidia/NVLM-D-72B
         
     | 
| 447 | 
         
            +
            new_version: nvidia/NVLM-D-72B
         
     | 
| 448 | 
         
            +
            pipeline_tag: question-answering
         
     | 
| 449 | 
         
            +
            tags:
         
     | 
| 450 | 
         
            +
            - legal
         
     | 
| 451 | 
         
            +
            ---","{""id"": ""peter0000/newrobot"", ""author"": ""peter0000"", ""sha"": ""7203425a9e3f1e525444c0d016ce44ef3d51d803"", ""last_modified"": ""2024-10-07 08:28:42+00:00"", ""created_at"": ""2024-10-07 08:01:10+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": null, ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""legal"", ""question-answering"", ""aa"", ""dataset:openai/MMMLU"", ""base_model:nvidia/NVLM-D-72B"", ""base_model:finetune:nvidia/NVLM-D-72B"", ""license:openrail"", ""region:us""], ""pipeline_tag"": ""question-answering"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- nvidia/NVLM-D-72B\ndatasets:\n- openai/MMMLU\nlanguage:\n- aa\nlicense: openrail\nmetrics:\n- character\npipeline_tag: question-answering\ntags:\n- legal\nnew_version: nvidia/NVLM-D-72B"", ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-10-07 08:28:42+00:00"", ""cardData"": ""base_model:\n- nvidia/NVLM-D-72B\ndatasets:\n- openai/MMMLU\nlanguage:\n- aa\nlicense: openrail\nmetrics:\n- character\npipeline_tag: question-answering\ntags:\n- legal\nnew_version: nvidia/NVLM-D-72B"", ""transformersInfo"": null, ""_id"": ""6703954622f1034f87e12dff"", ""modelId"": ""peter0000/newrobot"", ""usedStorage"": 0}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=peter0000/newrobot&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bpeter0000%2Fnewrobot%5D(%2Fpeter0000%2Fnewrobot)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
         
     | 
| 452 | 
         
            +
            shriramprabhu/final,"---
         
     | 
| 453 | 
         
            +
            license: mit
         
     | 
| 454 | 
         
            +
            datasets:
         
     | 
| 455 | 
         
            +
            - openai/MMMLU
         
     | 
| 456 | 
         
            +
            language:
         
     | 
| 457 | 
         
            +
            - ta
         
     | 
| 458 | 
         
            +
            - en
         
     | 
| 459 | 
         
            +
            metrics:
         
     | 
| 460 | 
         
            +
            - accuracy
         
     | 
| 461 | 
         
            +
            base_model:
         
     | 
| 462 | 
         
            +
            - nvidia/NVLM-D-72B
         
     | 
| 463 | 
         
            +
            new_version: meta-llama/Llama-3.2-11B-Vision-Instruct
         
     | 
| 464 | 
         
            +
            pipeline_tag: text2text-generation
         
     | 
| 465 | 
         
            +
            library_name: diffusers
         
     | 
| 466 | 
         
            +
            tags:
         
     | 
| 467 | 
         
            +
            - code
         
     | 
| 468 | 
         
            +
            ---","{""id"": ""shriramprabhu/final"", ""author"": ""shriramprabhu"", ""sha"": ""42e6764b35d3d14c3684959ced99c13e7f734920"", ""last_modified"": ""2024-10-14 09:28:46+00:00"", ""created_at"": ""2024-10-14 09:26:27+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""diffusers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""diffusers"", ""code"", ""text2text-generation"", ""ta"", ""en"", ""dataset:openai/MMMLU"", ""base_model:nvidia/NVLM-D-72B"", ""base_model:finetune:nvidia/NVLM-D-72B"", ""license:mit"", ""region:us""], ""pipeline_tag"": ""text2text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- nvidia/NVLM-D-72B\ndatasets:\n- openai/MMMLU\nlanguage:\n- ta\n- en\nlibrary_name: diffusers\nlicense: mit\nmetrics:\n- accuracy\npipeline_tag: text2text-generation\ntags:\n- code\nnew_version: meta-llama/Llama-3.2-11B-Vision-Instruct"", ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-10-14 09:28:46+00:00"", ""cardData"": ""base_model:\n- nvidia/NVLM-D-72B\ndatasets:\n- openai/MMMLU\nlanguage:\n- ta\n- en\nlibrary_name: diffusers\nlicense: mit\nmetrics:\n- accuracy\npipeline_tag: text2text-generation\ntags:\n- code\nnew_version: meta-llama/Llama-3.2-11B-Vision-Instruct"", ""transformersInfo"": null, ""_id"": ""670ce3c3c4ceb91ac8f557bc"", ""modelId"": ""shriramprabhu/final"", ""usedStorage"": 0}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=shriramprabhu/final&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bshriramprabhu%2Ffinal%5D(%2Fshriramprabhu%2Ffinal)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
         
     | 
| 469 | 
         
            +
            renia2954/chatbot,"---
         
     | 
| 470 | 
         
            +
            license: apache-2.0
         
     | 
| 471 | 
         
            +
            datasets:
         
     | 
| 472 | 
         
            +
            - nvidia/OpenMathInstruct-2
         
     | 
| 473 | 
         
            +
            language:
         
     | 
| 474 | 
         
            +
            - el
         
     | 
| 475 | 
         
            +
            - en
         
     | 
| 476 | 
         
            +
            base_model:
         
     | 
| 477 | 
         
            +
            - nvidia/NVLM-D-72B
         
     | 
| 478 | 
         
            +
            library_name: fastai
         
     | 
| 479 | 
         
            +
            ---","{""id"": ""renia2954/chatbot"", ""author"": ""renia2954"", ""sha"": ""a8f848d67be14ece2e1ef33cae6e529ad4352720"", ""last_modified"": ""2024-10-17 15:09:35+00:00"", ""created_at"": ""2024-10-17 15:08:53+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""fastai"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""fastai"", ""el"", ""en"", ""dataset:nvidia/OpenMathInstruct-2"", ""base_model:nvidia/NVLM-D-72B"", ""base_model:finetune:nvidia/NVLM-D-72B"", ""license:apache-2.0"", ""region:us""], ""pipeline_tag"": null, ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- nvidia/NVLM-D-72B\ndatasets:\n- nvidia/OpenMathInstruct-2\nlanguage:\n- el\n- en\nlibrary_name: fastai\nlicense: apache-2.0"", ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-10-17 15:09:35+00:00"", ""cardData"": ""base_model:\n- nvidia/NVLM-D-72B\ndatasets:\n- nvidia/OpenMathInstruct-2\nlanguage:\n- el\n- en\nlibrary_name: fastai\nlicense: apache-2.0"", ""transformersInfo"": null, ""_id"": ""671128853b830303f03af830"", ""modelId"": ""renia2954/chatbot"", ""usedStorage"": 0}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=renia2954/chatbot&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Brenia2954%2Fchatbot%5D(%2Frenia2954%2Fchatbot)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
         
     | 
| 480 | 
         
            +
            A1wayzbeenrea1/New_one,"---
         
     | 
| 481 | 
         
            +
            license: cc-by-nc-sa-3.0
         
     | 
| 482 | 
         
            +
            datasets:
         
     | 
| 483 | 
         
            +
            - fka/awesome-chatgpt-prompts
         
     | 
| 484 | 
         
            +
            - google/frames-benchmark
         
     | 
| 485 | 
         
            +
            base_model:
         
     | 
| 486 | 
         
            +
            - openai/whisper-large-v3-turbo
         
     | 
| 487 | 
         
            +
            - nvidia/NVLM-D-72B
         
     | 
| 488 | 
         
            +
            ---","{""id"": ""A1wayzbeenrea1/New_one"", ""author"": ""A1wayzbeenrea1"", ""sha"": ""93318d6a82e1ba5faae3723c4f3b8f3b770f6b25"", ""last_modified"": ""2024-10-20 02:42:27+00:00"", ""created_at"": ""2024-10-20 02:38:38+00:00"", ""private"": false, ""gated"": ""auto"", ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": null, ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""dataset:fka/awesome-chatgpt-prompts"", ""dataset:google/frames-benchmark"", ""base_model:nvidia/NVLM-D-72B"", ""base_model:finetune:nvidia/NVLM-D-72B"", ""doi:10.57967/hf/3287"", ""license:cc-by-nc-sa-3.0"", ""region:us""], ""pipeline_tag"": null, ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- openai/whisper-large-v3-turbo\n- nvidia/NVLM-D-72B\ndatasets:\n- fka/awesome-chatgpt-prompts\n- google/frames-benchmark\nlicense: cc-by-nc-sa-3.0"", ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='1', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-10-20 02:42:27+00:00"", ""cardData"": ""base_model:\n- openai/whisper-large-v3-turbo\n- nvidia/NVLM-D-72B\ndatasets:\n- fka/awesome-chatgpt-prompts\n- google/frames-benchmark\nlicense: cc-by-nc-sa-3.0"", ""transformersInfo"": null, ""_id"": ""67146d2e3db2ca408458d653"", ""modelId"": ""A1wayzbeenrea1/New_one"", ""usedStorage"": 0}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=A1wayzbeenrea1/New_one&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BA1wayzbeenrea1%2FNew_one%5D(%2FA1wayzbeenrea1%2FNew_one)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
         
     | 
| 489 | 
         
            +
            AdelfelteR/Harold,"---
         
     | 
| 490 | 
         
            +
            license: openrail++
         
     | 
| 491 | 
         
            +
            datasets:
         
     | 
| 492 | 
         
            +
            - nvidia/OpenMathInstruct-2
         
     | 
| 493 | 
         
            +
            language:
         
     | 
| 494 | 
         
            +
            - es
         
     | 
| 495 | 
         
            +
            base_model:
         
     | 
| 496 | 
         
            +
            - nvidia/NVLM-D-72B
         
     | 
| 497 | 
         
            +
            ---","{""id"": ""AdelfelteR/Harold"", ""author"": ""AdelfelteR"", ""sha"": ""939c599db8ec640f2ffe3738cad9cb1f88000cb2"", ""last_modified"": ""2024-10-20 14:40:58+00:00"", ""created_at"": ""2024-10-20 14:39:58+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": null, ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""es"", ""dataset:nvidia/OpenMathInstruct-2"", ""base_model:nvidia/NVLM-D-72B"", ""base_model:finetune:nvidia/NVLM-D-72B"", ""license:openrail++"", ""region:us""], ""pipeline_tag"": null, ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- nvidia/NVLM-D-72B\ndatasets:\n- nvidia/OpenMathInstruct-2\nlanguage:\n- es\nlicense: openrail++"", ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-10-20 14:40:58+00:00"", ""cardData"": ""base_model:\n- nvidia/NVLM-D-72B\ndatasets:\n- nvidia/OpenMathInstruct-2\nlanguage:\n- es\nlicense: openrail++"", ""transformersInfo"": null, ""_id"": ""6715163efd0aa609d1e127f7"", ""modelId"": ""AdelfelteR/Harold"", ""usedStorage"": 0}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=AdelfelteR/Harold&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BAdelfelteR%2FHarold%5D(%2FAdelfelteR%2FHarold)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
         
     | 
| 498 | 
         
            +
            PIOTRsus123/BUMCYKCYK,"---
         
     | 
| 499 | 
         
            +
            datasets:
         
     | 
| 500 | 
         
            +
            - PIOTRsus123/SUS
         
     | 
| 501 | 
         
            +
            language:
         
     | 
| 502 | 
         
            +
            - pl
         
     | 
| 503 | 
         
            +
            - en
         
     | 
| 504 | 
         
            +
            - es
         
     | 
| 505 | 
         
            +
            - de
         
     | 
| 506 | 
         
            +
            metrics:
         
     | 
| 507 | 
         
            +
            - accuracy
         
     | 
| 508 | 
         
            +
            - bertscore
         
     | 
| 509 | 
         
            +
            - bleu
         
     | 
| 510 | 
         
            +
            - bleurt
         
     | 
| 511 | 
         
            +
            - brier_score
         
     | 
| 512 | 
         
            +
            - cer
         
     | 
| 513 | 
         
            +
            - character
         
     | 
| 514 | 
         
            +
            - charcut_mt
         
     | 
| 515 | 
         
            +
            - chrf
         
     | 
| 516 | 
         
            +
            - code_eval
         
     | 
| 517 | 
         
            +
            base_model:
         
     | 
| 518 | 
         
            +
            - nvidia/NVLM-D-72B
         
     | 
| 519 | 
         
            +
            pipeline_tag: any-to-any
         
     | 
| 520 | 
         
            +
            ---","{""id"": ""PIOTRsus123/BUMCYKCYK"", ""author"": ""PIOTRsus123"", ""sha"": ""3338abc7ecf9d25089473a5f69e79dbe7d9b8b7f"", ""last_modified"": ""2024-10-21 17:48:18+00:00"", ""created_at"": ""2024-10-21 17:43:38+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 1, ""library_name"": null, ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""any-to-any"", ""pl"", ""en"", ""es"", ""de"", ""dataset:PIOTRsus123/SUS"", ""base_model:nvidia/NVLM-D-72B"", ""base_model:finetune:nvidia/NVLM-D-72B"", ""region:us""], ""pipeline_tag"": ""any-to-any"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- nvidia/NVLM-D-72B\ndatasets:\n- PIOTRsus123/SUS\nlanguage:\n- pl\n- en\n- es\n- de\nmetrics:\n- accuracy\n- bertscore\n- bleu\n- bleurt\n- brier_score\n- cer\n- character\n- charcut_mt\n- chrf\n- code_eval\npipeline_tag: any-to-any"", ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-10-21 17:48:18+00:00"", ""cardData"": ""base_model:\n- nvidia/NVLM-D-72B\ndatasets:\n- PIOTRsus123/SUS\nlanguage:\n- pl\n- en\n- es\n- de\nmetrics:\n- accuracy\n- bertscore\n- bleu\n- bleurt\n- brier_score\n- cer\n- character\n- charcut_mt\n- chrf\n- code_eval\npipeline_tag: any-to-any"", ""transformersInfo"": null, ""_id"": ""671692ca1cc3a3bb3b334070"", ""modelId"": ""PIOTRsus123/BUMCYKCYK"", ""usedStorage"": 0}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=PIOTRsus123/BUMCYKCYK&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BPIOTRsus123%2FBUMCYKCYK%5D(%2FPIOTRsus123%2FBUMCYKCYK)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
         
     | 
| 521 | 
         
            +
            Arunic/Arunia,"---
         
     | 
| 522 | 
         
            +
            license: apache-2.0
         
     | 
| 523 | 
         
            +
            datasets:
         
     | 
| 524 | 
         
            +
            - nvidia/OpenMathInstruct-2
         
     | 
| 525 | 
         
            +
            - google/frames-benchmark
         
     | 
| 526 | 
         
            +
            - openai/MMMLU
         
     | 
| 527 | 
         
            +
            language:
         
     | 
| 528 | 
         
            +
            - en
         
     | 
| 529 | 
         
            +
            metrics:
         
     | 
| 530 | 
         
            +
            - accuracy
         
     | 
| 531 | 
         
            +
            base_model:
         
     | 
| 532 | 
         
            +
            - nvidia/NVLM-D-72B
         
     | 
| 533 | 
         
            +
            new_version: nvidia/NVLM-D-72B
         
     | 
| 534 | 
         
            +
            tags:
         
     | 
| 535 | 
         
            +
            - code
         
     | 
| 536 | 
         
            +
            - Developing
         
     | 
| 537 | 
         
            +
            - Help_With_Physics
         
     | 
| 538 | 
         
            +
            - Physics
         
     | 
| 539 | 
         
            +
            - Coding_With_Physics
         
     | 
| 540 | 
         
            +
            - OpenGL
         
     | 
| 541 | 
         
            +
            library_name: asteroid
         
     | 
| 542 | 
         
            +
            ---","{""id"": ""Arunic/Arunia"", ""author"": ""Arunic"", ""sha"": ""407228e6ac6b746b4b27de14f29c424f2a857c2a"", ""last_modified"": ""2024-10-23 07:56:52+00:00"", ""created_at"": ""2024-10-23 07:48:12+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""asteroid"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""asteroid"", ""code"", ""Developing"", ""Help_With_Physics"", ""Physics"", ""Coding_With_Physics"", ""OpenGL"", ""en"", ""dataset:nvidia/OpenMathInstruct-2"", ""dataset:google/frames-benchmark"", ""dataset:openai/MMMLU"", ""base_model:nvidia/NVLM-D-72B"", ""base_model:finetune:nvidia/NVLM-D-72B"", ""license:apache-2.0"", ""region:us""], ""pipeline_tag"": null, ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- nvidia/NVLM-D-72B\ndatasets:\n- nvidia/OpenMathInstruct-2\n- google/frames-benchmark\n- openai/MMMLU\nlanguage:\n- en\nlibrary_name: asteroid\nlicense: apache-2.0\nmetrics:\n- accuracy\ntags:\n- code\n- Developing\n- Help_With_Physics\n- Physics\n- Coding_With_Physics\n- OpenGL\nnew_version: nvidia/NVLM-D-72B"", ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-10-23 07:56:52+00:00"", ""cardData"": ""base_model:\n- nvidia/NVLM-D-72B\ndatasets:\n- nvidia/OpenMathInstruct-2\n- google/frames-benchmark\n- openai/MMMLU\nlanguage:\n- en\nlibrary_name: asteroid\nlicense: apache-2.0\nmetrics:\n- accuracy\ntags:\n- code\n- Developing\n- Help_With_Physics\n- Physics\n- Coding_With_Physics\n- OpenGL\nnew_version: nvidia/NVLM-D-72B"", ""transformersInfo"": null, ""_id"": ""6718aa3c811f95f326396d04"", ""modelId"": ""Arunic/Arunia"", ""usedStorage"": 0}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=Arunic/Arunia&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BArunic%2FArunia%5D(%2FArunic%2FArunia)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
         
     | 
| 543 | 
         
            +
            laligaa/365,"---
         
     | 
| 544 | 
         
            +
            license: wtfpl
         
     | 
| 545 | 
         
            +
            language:
         
     | 
| 546 | 
         
            +
            - id
         
     | 
| 547 | 
         
            +
            base_model:
         
     | 
| 548 | 
         
            +
            - nvidia/NVLM-D-72B
         
     | 
| 549 | 
         
            +
            pipeline_tag: text-classification
         
     | 
| 550 | 
         
            +
            datasets:
         
     | 
| 551 | 
         
            +
            - fka/awesome-chatgpt-prompts
         
     | 
| 552 | 
         
            +
            metrics:
         
     | 
| 553 | 
         
            +
            - accuracy
         
     | 
| 554 | 
         
            +
            new_version: nvidia/Llama-3.1-Nemotron-70B-Instruct-HF
         
     | 
| 555 | 
         
            +
            library_name: flair
         
     | 
| 556 | 
         
            +
            ---","{""id"": ""laligaa/365"", ""author"": ""laligaa"", ""sha"": ""446c204949626d6cbade6bc827e498dbc2fb3813"", ""last_modified"": ""2024-10-25 11:28:06+00:00"", ""created_at"": ""2024-10-25 11:25:26+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""flair"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""flair"", ""text-classification"", ""id"", ""dataset:fka/awesome-chatgpt-prompts"", ""base_model:nvidia/NVLM-D-72B"", ""base_model:finetune:nvidia/NVLM-D-72B"", ""license:wtfpl"", ""region:us""], ""pipeline_tag"": ""text-classification"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- nvidia/NVLM-D-72B\ndatasets:\n- fka/awesome-chatgpt-prompts\nlanguage:\n- id\nlibrary_name: flair\nlicense: wtfpl\nmetrics:\n- accuracy\npipeline_tag: text-classification\nnew_version: nvidia/Llama-3.1-Nemotron-70B-Instruct-HF"", ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-10-25 11:28:06+00:00"", ""cardData"": ""base_model:\n- nvidia/NVLM-D-72B\ndatasets:\n- fka/awesome-chatgpt-prompts\nlanguage:\n- id\nlibrary_name: flair\nlicense: wtfpl\nmetrics:\n- accuracy\npipeline_tag: text-classification\nnew_version: nvidia/Llama-3.1-Nemotron-70B-Instruct-HF"", ""transformersInfo"": null, ""_id"": ""671b8026ee22338297eddafc"", ""modelId"": ""laligaa/365"", ""usedStorage"": 0}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=laligaa/365&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Blaligaa%2F365%5D(%2Flaligaa%2F365)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
         
     | 
| 557 | 
         
            +
            midhyaraj/voiceclone,"---
         
     | 
| 558 | 
         
            +
            license: apache-2.0
         
     | 
| 559 | 
         
            +
            base_model:
         
     | 
| 560 | 
         
            +
            - nvidia/NVLM-D-72B
         
     | 
| 561 | 
         
            +
            ---
         
     | 
| 562 | 
         
            +
            !pip install -U scipy
         
     | 
| 563 | 
         
            +
            !git clone https://github.com/neonbjb/tortoise-tts.git
         
     | 
| 564 | 
         
            +
            %cd tortoise-tts
         
     | 
| 565 | 
         
            +
            !pip install -r requirements.txt
         
     | 
| 566 | 
         
            +
            !python setup.py install
         
     | 
| 567 | 
         
            +
            !pip install gradio
         
     | 
| 568 | 
         
            +
             
     | 
| 569 | 
         
            +
            import os
         
     | 
| 570 | 
         
            +
            import gradio as gr
         
     | 
| 571 | 
         
            +
            import torchaudio
         
     | 
| 572 | 
         
            +
            import time
         
     | 
| 573 | 
         
            +
            from datetime import datetime
         
     | 
| 574 | 
         
            +
            from tortoise.api import TextToSpeech
         
     | 
| 575 | 
         
            +
            from tortoise.utils.audio import load_audio, load_voice, load_voices
         
     | 
| 576 | 
         
            +
            import os
         
     | 
| 577 | 
         
            +
             
     | 
| 578 | 
         
            +
            # Set the Gradio queue flag to disabled
         
     | 
| 579 | 
         
            +
            os.environ[""COMMANDLINE_ARGS""] = ""--no-gradio-queue""
         
     | 
| 580 | 
         
            +
            VOICE_OPTIONS = [
         
     | 
| 581 | 
         
            +
                ""random"",  # special option for random voice
         
     | 
| 582 | 
         
            +
                ""custom_voice"",  # special option for custom voice
         
     | 
| 583 | 
         
            +
                ""disabled"",  # special option for disabled voice
         
     | 
| 584 | 
         
            +
            ]
         
     | 
| 585 | 
         
            +
             
     | 
| 586 | 
         
            +
            def inference(text, emotion, prompt, voice, mic_audio, voice_b, voice_c, preset, seed):
         
     | 
| 587 | 
         
            +
                if voice != ""custom_voice"":
         
     | 
| 588 | 
         
            +
                    voices = [voice]
         
     | 
| 589 | 
         
            +
                else:
         
     | 
| 590 | 
         
            +
                    voices = []
         
     | 
| 591 | 
         
            +
             
     | 
| 592 | 
         
            +
                if voice_b != ""disabled"":
         
     | 
| 593 | 
         
            +
                    voices.append(voice_b)
         
     | 
| 594 | 
         
            +
                if voice_c != ""disabled"":
         
     | 
| 595 | 
         
            +
                    voices.append(voice_c)
         
     | 
| 596 | 
         
            +
             
     | 
| 597 | 
         
            +
                if emotion != ""None/Custom"":
         
     | 
| 598 | 
         
            +
                    text = f""[I am really {emotion.lower()},] {text}""
         
     | 
| 599 | 
         
            +
                elif prompt.strip() != """":
         
     | 
| 600 | 
         
            +
                    text = f""[{prompt},] {text}""
         
     | 
| 601 | 
         
            +
             
     | 
| 602 | 
         
            +
                c = None
         
     | 
| 603 | 
         
            +
                if voice == ""custom_voice"":
         
     | 
| 604 | 
         
            +
                    if mic_audio is None:
         
     | 
| 605 | 
         
            +
                        raise gr.Error(""Please provide audio from mic when choosing custom voice"")
         
     | 
| 606 | 
         
            +
                    c = load_audio(mic_audio, 22050)
         
     | 
| 607 | 
         
            +
             
     | 
| 608 | 
         
            +
                if len(voices) == 1 or len(voices) == 0:
         
     | 
| 609 | 
         
            +
                    if voice == ""custom_voice"":
         
     | 
| 610 | 
         
            +
                        voice_samples, conditioning_latents = [c], None
         
     | 
| 611 | 
         
            +
                    else:
         
     | 
| 612 | 
         
            +
                        voice_samples, conditioning_latents = load_voice(voice)
         
     | 
| 613 | 
         
            +
                else:
         
     | 
| 614 | 
         
            +
                    voice_samples, conditioning_latents = load_voices(voices)
         
     | 
| 615 | 
         
            +
                    if voice == ""custom_voice"":
         
     | 
| 616 | 
         
            +
                        voice_samples.extend([c])
         
     | 
| 617 | 
         
            +
             
     | 
| 618 | 
         
            +
                sample_voice = voice_samples[0] if len(voice_samples) else None
         
     | 
| 619 | 
         
            +
             
     | 
| 620 | 
         
            +
                start_time = time.time()
         
     | 
| 621 | 
         
            +
                gen, _ = tts.tts_with_preset(
         
     | 
| 622 | 
         
            +
                    text,
         
     | 
| 623 | 
         
            +
                    voice_samples=voice_samples,
         
     | 
| 624 | 
         
            +
                    conditioning_latents=conditioning_latents,
         
     | 
| 625 | 
         
            +
                    preset=preset,
         
     | 
| 626 | 
         
            +
                    use_deterministic_seed=seed,
         
     | 
| 627 | 
         
            +
                    return_deterministic_state=True,
         
     | 
| 628 | 
         
            +
                    k=3,
         
     | 
| 629 | 
         
            +
                )
         
     | 
| 630 | 
         
            +
             
     | 
| 631 | 
         
            +
                with open(""Tortoise_TTS_Runs.log"", ""a"") as f:
         
     | 
| 632 | 
         
            +
                    f.write(
         
     | 
| 633 | 
         
            +
                        f""{datetime.now()} | Voice: {','.join(voices)} | Text: {text} | Quality: {preset} | Time Taken (s): {time.time()-start_time} | Seed: {seed}\n""
         
     | 
| 634 | 
         
            +
                    )
         
     | 
| 635 | 
         
            +
             
     | 
| 636 | 
         
            +
                return (
         
     | 
| 637 | 
         
            +
                    (22050, sample_voice.squeeze().cpu().numpy()),
         
     | 
| 638 | 
         
            +
                    (24000, gen[0].squeeze().cpu().numpy()),
         
     | 
| 639 | 
         
            +
                    (24000, gen[1].squeeze().cpu().numpy()),
         
     | 
| 640 | 
         
            +
                    (24000, gen[2].squeeze().cpu().numpy()),
         
     | 
| 641 | 
         
            +
                )
         
     | 
| 642 | 
         
            +
             
     | 
| 643 | 
         
            +
            def main():
         
     | 
| 644 | 
         
            +
                # Custom HTML for the title
         
     | 
| 645 | 
         
            +
                title_html = ""<h1 style='text-align: center; color: orange; font-weight: bold;'>RJ VOICE CLONING</h1>""
         
     | 
| 646 | 
         
            +
             
     | 
| 647 | 
         
            +
                # Interface components
         
     | 
| 648 | 
         
            +
                text = gr.Textbox(lines=4, label=""Text:"")
         
     | 
| 649 | 
         
            +
                emotion = gr.Radio(
         
     | 
| 650 | 
         
            +
                    [""None/Custom"", ""Happy"", ""Sad"", ""Angry"", ""Disgusted"", ""Arrogant""],
         
     | 
| 651 | 
         
            +
                    value=""None/Custom"",
         
     | 
| 652 | 
         
            +
                    label=""Select emotion:"",
         
     | 
| 653 | 
         
            +
                    type=""value"",
         
     | 
| 654 | 
         
            +
                )
         
     | 
| 655 | 
         
            +
                prompt = gr.Textbox(lines=1, label=""Enter prompt if [Custom] emotion:"")
         
     | 
| 656 | 
         
            +
                preset = gr.Radio(
         
     | 
| 657 | 
         
            +
                    [""ultra_fast"", ""fast"", ""standard"", ""high_quality""],
         
     | 
| 658 | 
         
            +
                    value=""fast"",
         
     | 
| 659 | 
         
            +
                    label=""Preset mode (determines quality with tradeoff over speed):"",
         
     | 
| 660 | 
         
            +
                    type=""value"",
         
     | 
| 661 | 
         
            +
                )
         
     | 
| 662 | 
         
            +
                voice = gr.Dropdown(
         
     | 
| 663 | 
         
            +
                    os.listdir(os.path.join(""tortoise"", ""voices"")) + VOICE_OPTIONS,
         
     | 
| 664 | 
         
            +
                    value=""angie"",  # Default voice
         
     | 
| 665 | 
         
            +
                    label=""Select voice:"",
         
     | 
| 666 | 
         
            +
                    type=""value"",
         
     | 
| 667 | 
         
            +
                )
         
     | 
| 668 | 
         
            +
                mic_audio = gr.Audio(
         
     | 
| 669 | 
         
            +
                    label=""Record voice (when selected custom_voice):"",
         
     | 
| 670 | 
         
            +
                    type=""filepath""
         
     | 
| 671 | 
         
            +
                )
         
     | 
| 672 | 
         
            +
                voice_b = gr.Dropdown(
         
     | 
| 673 | 
         
            +
                    os.listdir(os.path.join(""tortoise"", ""voices"")) + VOICE_OPTIONS,
         
     | 
| 674 | 
         
            +
                    value=""disabled"",
         
     | 
| 675 | 
         
            +
                    label=""(Optional) Select second voice:"",
         
     | 
| 676 | 
         
            +
                    type=""value"",
         
     | 
| 677 | 
         
            +
                )
         
     | 
| 678 | 
         
            +
                voice_c = gr.Dropdown(
         
     | 
| 679 | 
         
            +
                    os.listdir(os.path.join(""tortoise"", ""voices"")) + VOICE_OPTIONS,
         
     | 
| 680 | 
         
            +
                    value=""disabled"",
         
     | 
| 681 | 
         
            +
                    label=""(Optional) Select third voice:"",
         
     | 
| 682 | 
         
            +
                    type=""value"",
         
     | 
| 683 | 
         
            +
                )
         
     | 
| 684 | 
         
            +
                seed = gr.Number(value=0, precision=0, label=""Seed (for reproducibility):"")
         
     | 
| 685 | 
         
            +
             
     | 
| 686 | 
         
            +
                selected_voice = gr.Audio(label=""Sample of selected voice (first):"")
         
     | 
| 687 | 
         
            +
                output_audio_1 = gr.Audio(label=""Output [Candidate 1]:"")
         
     | 
| 688 | 
         
            +
                output_audio_2 = gr.Audio(label=""Output [Candidate 2]:"")
         
     | 
| 689 | 
         
            +
                output_audio_3 = gr.Audio(label=""Output [Candidate 3]:"")
         
     | 
| 690 | 
         
            +
             
     | 
| 691 | 
         
            +
                # Create the Gradio interface
         
     | 
| 692 | 
         
            +
                interface = gr.Interface(
         
     | 
| 693 | 
         
            +
                    fn=inference,
         
     | 
| 694 | 
         
            +
                    inputs=[text, emotion, prompt, voice, mic_audio, voice_b, voice_c, preset, seed],
         
     | 
| 695 | 
         
            +
                    outputs=[selected_voice, output_audio_1, output_audio_2, output_audio_3],
         
     | 
| 696 | 
         
            +
                    title=""RJ VOICE CLONING"",
         
     | 
| 697 | 
         
            +
                    description=title_html,
         
     | 
| 698 | 
         
            +
                    css="".gradio-container { background-color: black; color: orange; }""
         
     | 
| 699 | 
         
            +
                )
         
     | 
| 700 | 
         
            +
             
     | 
| 701 | 
         
            +
                # Launch the interface
         
     | 
| 702 | 
         
            +
                interface.launch(share=True)
         
     | 
| 703 | 
         
            +
             
     | 
| 704 | 
         
            +
            if __name__ == ""__main__"":
         
     | 
| 705 | 
         
            +
                tts = TextToSpeech()
         
     | 
| 706 | 
         
            +
             
     | 
| 707 | 
         
            +
                with open(""Tortoise_TTS_Runs.log"", ""a"") as f:
         
     | 
| 708 | 
         
            +
                    f.write(
         
     | 
| 709 | 
         
            +
                        f""\n\n-------------------------Tortoise TTS Logs, {datetime.now()}-------------------------\n""
         
     | 
| 710 | 
         
            +
                    )
         
     | 
| 711 | 
         
            +
             
     | 
| 712 | 
         
            +
                main()","{""id"": ""midhyaraj/voiceclone"", ""author"": ""midhyaraj"", ""sha"": ""a446a91f6b6e3f04f1b6a9e6f47cdc64494cf9ee"", ""last_modified"": ""2024-10-28 06:27:31+00:00"", ""created_at"": ""2024-10-26 08:01:57+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": null, ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""base_model:nvidia/NVLM-D-72B"", ""base_model:finetune:nvidia/NVLM-D-72B"", ""license:apache-2.0"", ""region:us""], ""pipeline_tag"": null, ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- nvidia/NVLM-D-72B\nlicense: apache-2.0"", ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='app.py', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-10-28 06:27:31+00:00"", ""cardData"": ""base_model:\n- nvidia/NVLM-D-72B\nlicense: apache-2.0"", ""transformersInfo"": null, ""_id"": ""671ca1f56472cd2bde36bbbf"", ""modelId"": ""midhyaraj/voiceclone"", ""usedStorage"": 0}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=midhyaraj/voiceclone&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bmidhyaraj%2Fvoiceclone%5D(%2Fmidhyaraj%2Fvoiceclone)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
         
     | 
| 713 | 
         
            +
            CapitaLeny/ShadowBoy,"---
         
     | 
| 714 | 
         
            +
            license: afl-3.0
         
     | 
| 715 | 
         
            +
            base_model:
         
     | 
| 716 | 
         
            +
            - nvidia/NVLM-D-72B
         
     | 
| 717 | 
         
            +
            ---","{""id"": ""CapitaLeny/ShadowBoy"", ""author"": ""CapitaLeny"", ""sha"": ""ad267ada86282067cdfce03b5bf35841e545a804"", ""last_modified"": ""2024-10-28 03:08:11+00:00"", ""created_at"": ""2024-10-28 02:56:16+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": null, ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""base_model:nvidia/NVLM-D-72B"", ""base_model:finetune:nvidia/NVLM-D-72B"", ""license:afl-3.0"", ""region:us""], ""pipeline_tag"": null, ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- nvidia/NVLM-D-72B\nlicense: afl-3.0"", ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-10-28 03:08:11+00:00"", ""cardData"": ""base_model:\n- nvidia/NVLM-D-72B\nlicense: afl-3.0"", ""transformersInfo"": null, ""_id"": ""671efd50ebb68cf25160dbd7"", ""modelId"": ""CapitaLeny/ShadowBoy"", ""usedStorage"": 0}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=CapitaLeny/ShadowBoy&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BCapitaLeny%2FShadowBoy%5D(%2FCapitaLeny%2FShadowBoy)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
         
     | 
| 718 | 
         
            +
            hussam05/yolov8,"---
         
     | 
| 719 | 
         
            +
            datasets:
         
     | 
| 720 | 
         
            +
            - nvidia/OpenMathInstruct-2
         
     | 
| 721 | 
         
            +
            language:
         
     | 
| 722 | 
         
            +
            - ar
         
     | 
| 723 | 
         
            +
            metrics:
         
     | 
| 724 | 
         
            +
            - accuracy
         
     | 
| 725 | 
         
            +
            base_model:
         
     | 
| 726 | 
         
            +
            - nvidia/NVLM-D-72B
         
     | 
| 727 | 
         
            +
            new_version: nvidia/Llama-3.1-Nemotron-70B-Instruct-HF
         
     | 
| 728 | 
         
            +
            pipeline_tag: image-to-image
         
     | 
| 729 | 
         
            +
            library_name: flair
         
     | 
| 730 | 
         
            +
            tags:
         
     | 
| 731 | 
         
            +
            - code
         
     | 
| 732 | 
         
            +
            ---","{""id"": ""hussam05/yolov8"", ""author"": ""hussam05"", ""sha"": ""19190766d8958b2ff480cdca8730fe82591751e6"", ""last_modified"": ""2024-11-02 18:48:39+00:00"", ""created_at"": ""2024-11-02 18:45:29+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""flair"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""flair"", ""code"", ""image-to-image"", ""ar"", ""dataset:nvidia/OpenMathInstruct-2"", ""base_model:nvidia/NVLM-D-72B"", ""base_model:finetune:nvidia/NVLM-D-72B"", ""region:us""], ""pipeline_tag"": ""image-to-image"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- nvidia/NVLM-D-72B\ndatasets:\n- nvidia/OpenMathInstruct-2\nlanguage:\n- ar\nlibrary_name: flair\nmetrics:\n- accuracy\npipeline_tag: image-to-image\ntags:\n- code\nnew_version: nvidia/Llama-3.1-Nemotron-70B-Instruct-HF"", ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-11-02 18:48:39+00:00"", ""cardData"": ""base_model:\n- nvidia/NVLM-D-72B\ndatasets:\n- nvidia/OpenMathInstruct-2\nlanguage:\n- ar\nlibrary_name: flair\nmetrics:\n- accuracy\npipeline_tag: image-to-image\ntags:\n- code\nnew_version: nvidia/Llama-3.1-Nemotron-70B-Instruct-HF"", ""transformersInfo"": null, ""_id"": ""6726734988599b098d7dfdb3"", ""modelId"": ""hussam05/yolov8"", ""usedStorage"": 0}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=hussam05/yolov8&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bhussam05%2Fyolov8%5D(%2Fhussam05%2Fyolov8)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
         
     | 
    	
        Nemotron-4-340B-Instruct_finetunes_20250424_223250.csv_finetunes_20250424_223250.csv
    ADDED
    
    | 
         The diff for this file is too large to render. 
		See raw diff 
     | 
| 
         | 
    	
        Nous-Hermes-Llama2-13b_finetunes_20250426_121513.csv_finetunes_20250426_121513.csv
    ADDED
    
    | 
         The diff for this file is too large to render. 
		See raw diff 
     | 
| 
         | 
    	
        Phi-3-vision-128k-instruct_finetunes_20250424_223250.csv_finetunes_20250424_223250.csv
    ADDED
    
    | 
         @@ -0,0 +1,367 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
         
     | 
| 2 | 
         
            +
            microsoft/Phi-3-vision-128k-instruct,"---
         
     | 
| 3 | 
         
            +
            license: mit
         
     | 
| 4 | 
         
            +
            license_link: https://huggingface.co/microsoft/Phi-3-vision-128k-instruct/resolve/main/LICENSE
         
     | 
| 5 | 
         
            +
             
     | 
| 6 | 
         
            +
            language:
         
     | 
| 7 | 
         
            +
            - multilingual
         
     | 
| 8 | 
         
            +
            pipeline_tag: text-generation
         
     | 
| 9 | 
         
            +
            tags:
         
     | 
| 10 | 
         
            +
            - nlp
         
     | 
| 11 | 
         
            +
            - code
         
     | 
| 12 | 
         
            +
            - vision
         
     | 
| 13 | 
         
            +
            inference:
         
     | 
| 14 | 
         
            +
              parameters:
         
     | 
| 15 | 
         
            +
                temperature: 0.7
         
     | 
| 16 | 
         
            +
            widget:
         
     | 
| 17 | 
         
            +
              - messages:
         
     | 
| 18 | 
         
            +
                  - role: user
         
     | 
| 19 | 
         
            +
                    content: <|image_1|>Can you describe what you see in the image?
         
     | 
| 20 | 
         
            +
            ---
         
     | 
| 21 | 
         
            +
            🎉 **Phi-3.5**: [[mini-instruct]](https://huggingface.co/microsoft/Phi-3.5-mini-instruct); [[MoE-instruct]](https://huggingface.co/microsoft/Phi-3.5-MoE-instruct) ; [[vision-instruct]](https://huggingface.co/microsoft/Phi-3.5-vision-instruct)
         
     | 
| 22 | 
         
            +
             
     | 
| 23 | 
         
            +
            ## Model Summary
         
     | 
| 24 | 
         
            +
             
     | 
| 25 | 
         
            +
            The Phi-3-Vision-128K-Instruct is a lightweight, state-of-the-art open multimodal model built upon datasets which include - synthetic data and filtered publicly available websites - with a focus on very high-quality, reasoning dense data both on text and vision.  The model belongs to the Phi-3 model family, and the multimodal version comes with 128K context length (in tokens) it can support. The model underwent a rigorous enhancement process, incorporating both supervised fine-tuning and direct preference optimization to ensure precise instruction adherence and robust safety measures.
         
     | 
| 26 | 
         
            +
             
     | 
| 27 | 
         
            +
            Resources and Technical Documentation:
         
     | 
| 28 | 
         
            +
             
     | 
| 29 | 
         
            +
            + [Phi-3 Microsoft Blog](https://aka.ms/Phi-3Build2024)
         
     | 
| 30 | 
         
            +
            + [Phi-3 Technical Report](https://aka.ms/phi3-tech-report)
         
     | 
| 31 | 
         
            +
            + [Phi-3 on Azure AI Studio](https://aka.ms/try-phi3vision)
         
     | 
| 32 | 
         
            +
            + [Phi-3 Cookbook](https://github.com/microsoft/Phi-3CookBook)
         
     | 
| 33 | 
         
            +
             
     | 
| 34 | 
         
            +
             
     | 
| 35 | 
         
            +
            |         | Short Context | Long Context |
         
     | 
| 36 | 
         
            +
            | ------- | ------------- | ------------ |
         
     | 
| 37 | 
         
            +
            | Mini    | 4K [[HF]](https://huggingface.co/microsoft/Phi-3-mini-4k-instruct) ; [[ONNX]](https://huggingface.co/microsoft/Phi-3-mini-4k-instruct-onnx) ; [[GGUF]](https://huggingface.co/microsoft/Phi-3-mini-4k-instruct-gguf) | 128K [[HF]](https://huggingface.co/microsoft/Phi-3-mini-128k-instruct) ; [[ONNX]](https://huggingface.co/microsoft/Phi-3-mini-128k-instruct-onnx)|
         
     | 
| 38 | 
         
            +
            | Small   | 8K [[HF]](https://huggingface.co/microsoft/Phi-3-small-8k-instruct) ; [[ONNX]](https://huggingface.co/microsoft/Phi-3-small-8k-instruct-onnx-cuda) | 128K [[HF]](https://huggingface.co/microsoft/Phi-3-small-128k-instruct) ; [[ONNX]](https://huggingface.co/microsoft/Phi-3-small-128k-instruct-onnx-cuda)|
         
     | 
| 39 | 
         
            +
            | Medium  | 4K [[HF]](https://huggingface.co/microsoft/Phi-3-medium-4k-instruct) ; [[ONNX]](https://huggingface.co/microsoft/Phi-3-medium-4k-instruct-onnx-cuda) | 128K [[HF]](https://huggingface.co/microsoft/Phi-3-medium-128k-instruct) ; [[ONNX]](https://huggingface.co/microsoft/Phi-3-medium-128k-instruct-onnx-cuda)|
         
     | 
| 40 | 
         
            +
            | Vision  |  | 128K [[HF]](https://huggingface.co/microsoft/Phi-3-vision-128k-instruct) ; [[ONNX]](https://huggingface.co/microsoft/Phi-3-vision-128k-instruct-onnx-cuda)|
         
     | 
| 41 | 
         
            +
             
     | 
| 42 | 
         
            +
            ## Intended Uses
         
     | 
| 43 | 
         
            +
             
     | 
| 44 | 
         
            +
            **Primary use cases**
         
     | 
| 45 | 
         
            +
             
     | 
| 46 | 
         
            +
            The model is intended for broad commercial and research use in English. The model provides uses for general purpose AI systems and applications with visual and text input capabilities which require 
         
     | 
| 47 | 
         
            +
             
     | 
| 48 | 
         
            +
            1) memory/compute constrained environments;
         
     | 
| 49 | 
         
            +
            2) latency bound scenarios;
         
     | 
| 50 | 
         
            +
            3) general image understanding;
         
     | 
| 51 | 
         
            +
            4) OCR;
         
     | 
| 52 | 
         
            +
            5) chart and table understanding.
         
     | 
| 53 | 
         
            +
             
     | 
| 54 | 
         
            +
            Our model is designed to accelerate research on efficient language and multimodal models, for use as a building block for generative AI powered features.
         
     | 
| 55 | 
         
            +
             
     | 
| 56 | 
         
            +
            **Use case considerations**
         
     | 
| 57 | 
         
            +
             
     | 
| 58 | 
         
            +
            Our models are not specifically designed or evaluated for all downstream purposes. Developers should consider common limitations of language models as they select use cases, and evaluate and mitigate for accuracy, safety, and fairness before using within a specific downstream use case, particularly for high-risk scenarios. 
         
     | 
| 59 | 
         
            +
            Developers should be aware of and adhere to applicable laws or regulations (including privacy, trade compliance laws, etc.) that are relevant to their use case. 
         
     | 
| 60 | 
         
            +
             
     | 
| 61 | 
         
            +
            Nothing contained in this Model Card should be interpreted as or deemed a restriction or modification to the license the model is released under.
         
     | 
| 62 | 
         
            +
             
     | 
| 63 | 
         
            +
            ## How to Use
         
     | 
| 64 | 
         
            +
             
     | 
| 65 | 
         
            +
            Phi-3-Vision-128K-Instruct has been integrated in the development version (4.40.2) of `transformers`. Until the official version is released through `pip`, ensure that you are doing one of the following:
         
     | 
| 66 | 
         
            +
            * When loading the model, ensure that `trust_remote_code=True` is passed as an argument of the `from_pretrained()` function.
         
     | 
| 67 | 
         
            +
             
     | 
| 68 | 
         
            +
            * Update your local `transformers` to the development version: `pip uninstall -y transformers && pip install git+https://github.com/huggingface/transformers`. The previous command is an alternative to cloning and installing from the source.
         
     | 
| 69 | 
         
            +
             
     | 
| 70 | 
         
            +
            The current `transformers` version can be verified with: `pip list | grep transformers`.
         
     | 
| 71 | 
         
            +
             
     | 
| 72 | 
         
            +
            Examples of required packages:
         
     | 
| 73 | 
         
            +
            ```
         
     | 
| 74 | 
         
            +
            flash_attn==2.5.8
         
     | 
| 75 | 
         
            +
            numpy==1.24.4
         
     | 
| 76 | 
         
            +
            Pillow==10.3.0
         
     | 
| 77 | 
         
            +
            Requests==2.31.0
         
     | 
| 78 | 
         
            +
            torch==2.3.0
         
     | 
| 79 | 
         
            +
            torchvision==0.18.0
         
     | 
| 80 | 
         
            +
            transformers==4.40.2
         
     | 
| 81 | 
         
            +
            ```
         
     | 
| 82 | 
         
            +
             
     | 
| 83 | 
         
            +
            Phi-3-Vision-128K-Instruct is also available in [Azure AI Studio](https://aka.ms/phi3-azure-ai).
         
     | 
| 84 | 
         
            +
             
     | 
| 85 | 
         
            +
            ### Chat Format
         
     | 
| 86 | 
         
            +
             
     | 
| 87 | 
         
            +
            Given the nature of the training data, the Phi-3-Vision-128K-Instruct model is best suited for a single image input wih prompts using the chat format as follows. 
         
     | 
| 88 | 
         
            +
            You can provide the prompt as a single image with a generic template as follow:
         
     | 
| 89 | 
         
            +
            ```markdown
         
     | 
| 90 | 
         
            +
            <|user|>\n<|image_1|>\n{prompt}<|end|>\n<|assistant|>\n 
         
     | 
| 91 | 
         
            +
            ```
         
     | 
| 92 | 
         
            +
             
     | 
| 93 | 
         
            +
            where the model generates the text after `<|assistant|>` . In case of multi-turn conversation, the prompt can be formatted as follows:
         
     | 
| 94 | 
         
            +
             
     | 
| 95 | 
         
            +
            ```markdown
         
     | 
| 96 | 
         
            +
            <|user|>\n<|image_1|>\n{prompt_1}<|end|>\n<|assistant|>\n{response_1}<|end|>\n<|user|>\n{prompt_2}<|end|>\n<|assistant|>\n 
         
     | 
| 97 | 
         
            +
            ```
         
     | 
| 98 | 
         
            +
             
     | 
| 99 | 
         
            +
            ### Sample inference code
         
     | 
| 100 | 
         
            +
             
     | 
| 101 | 
         
            +
            This code snippets show how to get quickly started with running the model on a GPU:
         
     | 
| 102 | 
         
            +
             
     | 
| 103 | 
         
            +
            ```python
         
     | 
| 104 | 
         
            +
            from PIL import Image 
         
     | 
| 105 | 
         
            +
            import requests 
         
     | 
| 106 | 
         
            +
            from transformers import AutoModelForCausalLM 
         
     | 
| 107 | 
         
            +
            from transformers import AutoProcessor 
         
     | 
| 108 | 
         
            +
             
     | 
| 109 | 
         
            +
            model_id = ""microsoft/Phi-3-vision-128k-instruct"" 
         
     | 
| 110 | 
         
            +
             
     | 
| 111 | 
         
            +
            model = AutoModelForCausalLM.from_pretrained(model_id, device_map=""cuda"", trust_remote_code=True, torch_dtype=""auto"", _attn_implementation='flash_attention_2') # use _attn_implementation='eager' to disable flash attention
         
     | 
| 112 | 
         
            +
             
     | 
| 113 | 
         
            +
            processor = AutoProcessor.from_pretrained(model_id, trust_remote_code=True) 
         
     | 
| 114 | 
         
            +
             
     | 
| 115 | 
         
            +
            messages = [ 
         
     | 
| 116 | 
         
            +
                {""role"": ""user"", ""content"": ""<|image_1|>\nWhat is shown in this image?""}, 
         
     | 
| 117 | 
         
            +
                {""role"": ""assistant"", ""content"": ""The chart displays the percentage of respondents who agree with various statements about their preparedness for meetings. It shows five categories: 'Having clear and pre-defined goals for meetings', 'Knowing where to find the information I need for a meeting', 'Understanding my exact role and responsibilities when I'm invited', 'Having tools to manage admin tasks like note-taking or summarization', and 'Having more focus time to sufficiently prepare for meetings'. Each category has an associated bar indicating the level of agreement, measured on a scale from 0% to 100%.""}, 
         
     | 
| 118 | 
         
            +
                {""role"": ""user"", ""content"": ""Provide insightful questions to spark discussion.""} 
         
     | 
| 119 | 
         
            +
            ] 
         
     | 
| 120 | 
         
            +
             
     | 
| 121 | 
         
            +
            url = ""https://assets-c4akfrf5b4d3f4b7.z01.azurefd.net/assets/2024/04/BMDataViz_661fb89f3845e.png"" 
         
     | 
| 122 | 
         
            +
            image = Image.open(requests.get(url, stream=True).raw) 
         
     | 
| 123 | 
         
            +
             
     | 
| 124 | 
         
            +
            prompt = processor.tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
         
     | 
| 125 | 
         
            +
             
     | 
| 126 | 
         
            +
            inputs = processor(prompt, [image], return_tensors=""pt"").to(""cuda:0"") 
         
     | 
| 127 | 
         
            +
             
     | 
| 128 | 
         
            +
            generation_args = { 
         
     | 
| 129 | 
         
            +
                ""max_new_tokens"": 500, 
         
     | 
| 130 | 
         
            +
                ""temperature"": 0.0, 
         
     | 
| 131 | 
         
            +
                ""do_sample"": False, 
         
     | 
| 132 | 
         
            +
            } 
         
     | 
| 133 | 
         
            +
             
     | 
| 134 | 
         
            +
            generate_ids = model.generate(**inputs, eos_token_id=processor.tokenizer.eos_token_id, **generation_args) 
         
     | 
| 135 | 
         
            +
             
     | 
| 136 | 
         
            +
            # remove input tokens 
         
     | 
| 137 | 
         
            +
            generate_ids = generate_ids[:, inputs['input_ids'].shape[1]:]
         
     | 
| 138 | 
         
            +
            response = processor.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] 
         
     | 
| 139 | 
         
            +
             
     | 
| 140 | 
         
            +
            print(response) 
         
     | 
| 141 | 
         
            +
            ```
         
     | 
| 142 | 
         
            +
             
     | 
| 143 | 
         
            +
            Additional basic examples are provided [here](https://huggingface.co/microsoft/Phi-3-vision-128k-instruct/blob/main/sample_inference.py).
         
     | 
| 144 | 
         
            +
             
     | 
| 145 | 
         
            +
            ### How to finetune?
         
     | 
| 146 | 
         
            +
            We recommend user to take a look at the [Phi-3 CookBook finetuning recipe for Vision](https://github.com/microsoft/Phi-3CookBook/blob/main/md/04.Fine-tuning/FineTuning_Vision.md)
         
     | 
| 147 | 
         
            +
             
     | 
| 148 | 
         
            +
             
     | 
| 149 | 
         
            +
            ## Responsible AI Considerations
         
     | 
| 150 | 
         
            +
             
     | 
| 151 | 
         
            +
            Like other models, the Phi family of models can potentially behave in ways that are unfair, unreliable, or offensive. Some of the limiting behaviors to be aware of include:   
         
     | 
| 152 | 
         
            +
             
     | 
| 153 | 
         
            +
            + Quality of Service: The Phi models are trained primarily on English text. Languages other than English will experience worse performance. English language varieties with less representation in the training data might experience worse performance than standard American English.    
         
     | 
| 154 | 
         
            +
            + Representation of Harms & Perpetuation of Stereotypes: These models can over- or under-represent groups of people, erase representation of some groups, or reinforce demeaning or negative stereotypes. Despite safety post-training, these limitations may still be present due to differing levels of representation of different groups or prevalence of examples of negative stereotypes in training data that reflect real-world patterns and societal biases.  
         
     | 
| 155 | 
         
            +
            + Inappropriate or Offensive Content: These models may produce other types of inappropriate or offensive content, which may make it inappropriate to deploy for sensitive contexts without additional mitigations that are specific to the use case.  
         
     | 
| 156 | 
         
            +
            + Information Reliability: Language models can generate nonsensical content or fabricate content that might sound reasonable but is inaccurate or outdated.   
         
     | 
| 157 | 
         
            +
            + Limited Scope for Code: Majority of Phi-3 training data is based in Python and use common packages such as ""typing, math, random, collections, datetime, itertools"". If the model generates Python scripts that utilize other packages or scripts in other languages, we strongly recommend users manually verify all API uses.      
         
     | 
| 158 | 
         
            +
             
     | 
| 159 | 
         
            +
            Developers should apply responsible AI best practices and are responsible for ensuring that a specific use case complies with relevant laws and regulations (e.g. privacy, trade, etc.). Important areas for consideration include:  
         
     | 
| 160 | 
         
            +
             
     | 
| 161 | 
         
            +
            + Allocation: Models may not be suitable for scenarios that could have consequential impact on legal status or the allocation of resources or life opportunities (ex: housing, employment, credit, etc.) without further assessments and additional debiasing techniques.
         
     | 
| 162 | 
         
            +
            + High-Risk Scenarios: Developers should assess suitability of using models in high-risk scenarios where unfair, unreliable or offensive outputs might be extremely costly or lead to harm. This includes providing advice in sensitive or expert domains where accuracy and reliability are critical (ex: legal or health advice). Additional safeguards should be implemented at the application level according to the deployment context.
         
     | 
| 163 | 
         
            +
            + Misinformation: Models may produce inaccurate information. Developers should follow transparency best practices and inform end-users they are interacting with an AI system. At the application level, developers can build feedback mechanisms and pipelines to ground responses in use-case specific, contextual information, a technique known as Retrieval Augmented Generation (RAG).
         
     | 
| 164 | 
         
            +
            + Generation of Harmful Content: Developers should assess outputs for their context and use available safety classifiers or custom solutions appropriate for their use case.
         
     | 
| 165 | 
         
            +
            + Misuse: Other forms of misuse such as fraud, spam, or malware production may be possible, and developers should ensure that their applications do not violate applicable laws and regulations.
         
     | 
| 166 | 
         
            +
            + Identification of individuals: models with vision capabilities may have the potential to uniquely identify individuals in images. Safety post-training steers the model to refuse such requests, but developers should consider and implement, as appropriate, additional mitigations or user consent flows as required in their respective jurisdiction, (e.g., building measures to blur faces in image inputs before processing.
         
     | 
| 167 | 
         
            +
              
         
     | 
| 168 | 
         
            +
            ## Training
         
     | 
| 169 | 
         
            +
             
     | 
| 170 | 
         
            +
            ### Model
         
     | 
| 171 | 
         
            +
             
     | 
| 172 | 
         
            +
            * Architecture: Phi-3-Vision-128K-Instruct has 4.2B parameters and contains image encoder, connector, projector, and Phi-3 Mini language model.
         
     | 
| 173 | 
         
            +
            * Inputs: Text and Image. It’s best suited for prompts using the chat format. 
         
     | 
| 174 | 
         
            +
            * Context length: 128K tokens
         
     | 
| 175 | 
         
            +
            * GPUs: 512 H100-80G
         
     | 
| 176 | 
         
            +
            * Training time: 1.5 days
         
     | 
| 177 | 
         
            +
            * Training data: 500B vision and text tokens
         
     | 
| 178 | 
         
            +
            * Outputs: Generated text in response to the input
         
     | 
| 179 | 
         
            +
            * Dates: Our models were trained between February and April 2024
         
     | 
| 180 | 
         
            +
            * Status: This is a static model trained on an offline text dataset with cutoff date Mar 15, 2024. Future versions of the tuned models may be released as we improve models.
         
     | 
| 181 | 
         
            +
            * Release Type: Open weight release
         
     | 
| 182 | 
         
            +
            * Release dates: The model weight is released on May 21, 2024.
         
     | 
| 183 | 
         
            +
             
     | 
| 184 | 
         
            +
            ### Datasets
         
     | 
| 185 | 
         
            +
             
     | 
| 186 | 
         
            +
            Our training data includes a wide variety of sources, and is a combination of 
         
     | 
| 187 | 
         
            +
             
     | 
| 188 | 
         
            +
            1) publicly available documents filtered rigorously for quality, selected high-quality educational data and code;
         
     | 
| 189 | 
         
            +
            2) selected high-quality image-text interleave;
         
     | 
| 190 | 
         
            +
            3) newly created synthetic, “textbook-like” data for the purpose of teaching math, coding, common sense reasoning, general knowledge of the world (science, daily activities, theory of mind, etc.), newly created image data, e.g., chart/table/diagram/slides;
         
     | 
| 191 | 
         
            +
            4) high quality chat format supervised data covering various topics to reflect human preferences on different aspects such as instruct-following, truthfulness, honesty and helpfulness.
         
     | 
| 192 | 
         
            +
             
     | 
| 193 | 
         
            +
            The data collection process involved sourcing information from publicly available documents, with a meticulous approach to filtering out undesirable documents and images. To safeguard privacy, we carefully filtered various image and text data sources to remove or scrub any potentially personal data from the training data.
         
     | 
| 194 | 
         
            +
             
         
     | 
| 195 | 
         
            +
            More details can be found in the [Phi-3 Technical Report](https://aka.ms/phi3-tech-report).
         
     | 
| 196 | 
         
            +
             
     | 
| 197 | 
         
            +
            ## Benchmarks
         
     | 
| 198 | 
         
            +
             
     | 
| 199 | 
         
            +
            To understand the capabilities, we compare Phi-3-Vision-128K-Instruct with a set of models over a variety of zero-shot benchmarks using our internal benchmark platform.
         
     | 
| 200 | 
         
            +
             
     | 
| 201 | 
         
            +
            |Benchmark|Phi-3 Vision-128K-In|LlaVA-1.6 Vicuna-7B|QWEN-VL Chat|Llama3-Llava-Next-8B|Claude-3 Haiku|Gemini 1.0 Pro V|GPT-4V-Turbo|
         
     | 
| 202 | 
         
            +
            |---------|---------------------|------------------|------------|--------------------|--------------|----------------|------------|
         
     | 
| 203 | 
         
            +
            |MMMU|40.4|34.2|39.0|36.4|40.7|42.0|55.5| 
         
     | 
| 204 | 
         
            +
            |MMBench|80.5|76.3|75.8|79.4|62.4|80.0|86.1|
         
     | 
| 205 | 
         
            +
            |ScienceQA|90.8|70.6|67.2|73.7|72.0|79.7|75.7|
         
     | 
| 206 | 
         
            +
            |MathVista|44.5|31.5|29.4|34.8|33.2|35.0|47.5|
         
     | 
| 207 | 
         
            +
            |InterGPS|38.1|20.5|22.3|24.6|32.1|28.6|41.0|
         
     | 
| 208 | 
         
            +
            |AI2D|76.7|63.1|59.8|66.9|60.3|62.8|74.7|
         
     | 
| 209 | 
         
            +
            |ChartQA|81.4|55.0|50.9|65.8|59.3|58.0|62.3|
         
     | 
| 210 | 
         
            +
            |TextVQA|70.9|64.6|59.4|55.7|62.7|64.7|68.1|
         
     | 
| 211 | 
         
            +
            |POPE|85.8|87.2|82.6|87.0|74.4|84.2|83.7|
         
     | 
| 212 | 
         
            +
             
     | 
| 213 | 
         
            +
             
     | 
| 214 | 
         
            +
            ## Software
         
     | 
| 215 | 
         
            +
             
     | 
| 216 | 
         
            +
            * [PyTorch](https://github.com/pytorch/pytorch)
         
     | 
| 217 | 
         
            +
            * [Transformers](https://github.com/huggingface/transformers)
         
     | 
| 218 | 
         
            +
            * [Flash-Attention](https://github.com/HazyResearch/flash-attention)
         
     | 
| 219 | 
         
            +
             
     | 
| 220 | 
         
            +
            ## Hardware
         
     | 
| 221 | 
         
            +
            Note that by default, the Phi-3-Vision-128K model uses flash attention, which requires certain types of GPU hardware to run. We have tested on the following GPU types:
         
     | 
| 222 | 
         
            +
            * NVIDIA A100
         
     | 
| 223 | 
         
            +
            * NVIDIA A6000
         
     | 
| 224 | 
         
            +
            * NVIDIA H100
         
     | 
| 225 | 
         
            +
             
     | 
| 226 | 
         
            +
            ## License
         
     | 
| 227 | 
         
            +
             
     | 
| 228 | 
         
            +
            The model is licensed under the [MIT license](https://huggingface.co/microsoft/Phi-3-vision-128k-instruct/resolve/main/LICENSE).
         
     | 
| 229 | 
         
            +
             
     | 
| 230 | 
         
            +
            ## Trademarks
         
     | 
| 231 | 
         
            +
             
     | 
| 232 | 
         
            +
            This project may contain trademarks or logos for projects, products, or services. Authorized use of Microsoft trademarks or logos is subject to and must follow [Microsoft’s Trademark & Brand Guidelines](https://www.microsoft.com/en-us/legal/intellectualproperty/trademarks). Use of Microsoft trademarks or logos in modified versions of this project must not cause confusion or imply Microsoft sponsorship. Any use of third-party trademarks or logos are subject to those third-party’s policies.
         
     | 
| 233 | 
         
            +
            ","{""id"": ""microsoft/Phi-3-vision-128k-instruct"", ""author"": ""microsoft"", ""sha"": ""c45209e90a4c4f7d16b2e9d48503c7f3e83623ed"", ""last_modified"": ""2024-08-20 19:56:22+00:00"", ""created_at"": ""2024-05-19 15:07:39+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 26258, ""downloads_all_time"": null, ""likes"": 958, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""phi3_v"", ""text-generation"", ""nlp"", ""code"", ""vision"", ""conversational"", ""custom_code"", ""multilingual"", ""license:mit"", ""autotrain_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""language:\n- multilingual\nlicense: mit\nlicense_link: https://huggingface.co/microsoft/Phi-3-vision-128k-instruct/resolve/main/LICENSE\npipeline_tag: text-generation\ntags:\n- nlp\n- code\n- vision\ninference:\n  parameters:\n    temperature: 0.7\nwidget:\n- messages:\n  - role: user\n    content: <|image_1|>Can you describe what you see in the image?"", ""widget_data"": [{""messages"": [{""role"": ""user"", ""content"": ""<|image_1|>Can you describe what you see in the image?""}]}], ""model_index"": null, ""config"": {""architectures"": [""Phi3VForCausalLM""], ""auto_map"": {""AutoConfig"": ""configuration_phi3_v.Phi3VConfig"", ""AutoModelForCausalLM"": ""modeling_phi3_v.Phi3VForCausalLM""}, ""model_type"": ""phi3_v"", ""tokenizer_config"": {""bos_token"": ""<s>"", ""chat_template"": ""{% for message in messages %}{{'<|' + message['role'] + '|>' + '\n' + message['content'] + '<|end|>\n' }}{% endfor %}{% if add_generation_prompt and messages[-1]['role'] != 'assistant' %}{{- '<|assistant|>\n' -}}{% endif %}"", ""eos_token"": ""<|endoftext|>"", ""pad_token"": ""<|endoftext|>"", ""unk_token"": ""<unk>"", ""use_default_system_prompt"": false}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": ""modeling_phi3_v.Phi3VForCausalLM"", ""pipeline_tag"": ""text-generation"", ""processor"": null}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='CODE_OF_CONDUCT.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='LICENSE', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='SECURITY.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='SUPPORT.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='configuration_phi3_v.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='image_embedding_phi3_v.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='image_processing_phi3_v.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00001-of-00002.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00002-of-00002.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='modeling_phi3_v.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='preprocessor_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='processing_phi3_v.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='sample_inference.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [""ysharma/Microsoft_Phi-3-Vision-128k"", ""MaziyarPanahi/Phi-3-vision-128k"", ""KBaba7/Quant"", ""dwb2023/omniscience"", ""simonraj/PersonalTrainer-Arnold"", ""dwb2023/hf_extractor"", ""bhaskartripathi/LLM_Quantization"", ""totolook/Quant"", ""FallnAI/Quantize-HF-Models"", ""dwb2023/model_explorer2"", ""TypeGPT/Microsoft_Phi-3-Vision-128k"", ""dwb2023/model_explorer4"", ""ruslanmv/convert_to_gguf"", ""PkyHallo/microsoft-Phi-3-vision-128k-instruct"", ""BoltzmannEntropy/vlms"", ""SharryOG/AI"", ""hf1024/Phi-3-vision-128k"", ""nikhilalgorithma/vision1"", ""zohadev/Microsoft-Phi-3-Vision-128k"", ""aggaranu/Microsoft_Phi-3-Vision-128k"", ""sergiopaniego/Microsoft_Phi-3-Vision-128k"", ""Shankarm08/phi3vision"", ""Aleksmorshen/ImageChatbot-Phi-3-Vision-128k"", ""saicharantej/design-golden-rules"", ""Npps/ChatwithLLM"", ""Aleksmorshen/Vision"", ""ashupanda123/AdAlly"", ""Gokulram2710/Phi-3-vision-test-api"", ""Gokulram2710/Microsoft_Phi-3-Vision-128k"", ""nikhilk197/microsoft-Phi-3-vision-128k-instruct"", ""namrata12/Image_Summarizer"", ""AlyxTeam/Phi-3-vision-128k-instruct"", ""srivatsavdamaraju/phi3-fastapi"", ""Abhilash0804/microsoft-Phi-3.5-vision-gradio"", ""KN123/Microsoft_Phi-3-Vision-128k"", ""revproxy/nv-vlm"", ""NEROTECHRB/clothing-segmentation-detection"", ""tomasruiz/multimodal-vibe-check"", ""K00B404/LLM_Quantization"", ""lbw18601752667/IDMR-demo"", ""darsoarafa/visi""], ""safetensors"": {""parameters"": {""BF16"": 4146621440}, ""total"": 4146621440}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-08-20 19:56:22+00:00"", ""cardData"": ""language:\n- multilingual\nlicense: mit\nlicense_link: https://huggingface.co/microsoft/Phi-3-vision-128k-instruct/resolve/main/LICENSE\npipeline_tag: text-generation\ntags:\n- nlp\n- code\n- vision\ninference:\n  parameters:\n    temperature: 0.7\nwidget:\n- messages:\n  - role: user\n    content: <|image_1|>Can you describe what you see in the image?"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": ""modeling_phi3_v.Phi3VForCausalLM"", ""pipeline_tag"": ""text-generation"", ""processor"": null}, ""_id"": ""664a15bb0e5395fb529b1258"", ""modelId"": ""microsoft/Phi-3-vision-128k-instruct"", ""usedStorage"": 16586661776}",0,"https://huggingface.co/MuyeHuang/EvoChart, https://huggingface.co/SkyNetdev/Phi3v-Finetuned, https://huggingface.co/aschirru34/results, https://huggingface.co/MCG-NJU/VideoChatOnline-4B",4,"https://huggingface.co/farshadafx/results, https://huggingface.co/puar-playground/Col-Phi-3-V, https://huggingface.co/NTT-hil-insight/VDocRetriever-Phi3-vision, https://huggingface.co/NTT-hil-insight/VDocGenerator-Phi3-vision",4,"https://huggingface.co/onnx-community/Phi-3-vision-128k-instruct, https://huggingface.co/llmware/phi-3-vision-onnx, https://huggingface.co/RedHatAI/Phi-3-vision-128k-instruct-W4A16-G128",3,,0,"BoltzmannEntropy/vlms, FallnAI/Quantize-HF-Models, K00B404/LLM_Quantization, KBaba7/Quant, MaziyarPanahi/Phi-3-vision-128k, TypeGPT/Microsoft_Phi-3-Vision-128k, bhaskartripathi/LLM_Quantization, dwb2023/hf_extractor, dwb2023/omniscience, huggingface/InferenceSupport/discussions/new?title=microsoft/Phi-3-vision-128k-instruct&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bmicrosoft%2FPhi-3-vision-128k-instruct%5D(%2Fmicrosoft%2FPhi-3-vision-128k-instruct)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, lbw18601752667/IDMR-demo, revproxy/nv-vlm, ysharma/Microsoft_Phi-3-Vision-128k",13
         
     | 
| 234 | 
         
            +
            MuyeHuang/EvoChart,"---
         
     | 
| 235 | 
         
            +
            license: gpl-3.0
         
     | 
| 236 | 
         
            +
            language:
         
     | 
| 237 | 
         
            +
            - en
         
     | 
| 238 | 
         
            +
            base_model: microsoft/Phi-3-vision-128k-instruct
         
     | 
| 239 | 
         
            +
            ---
         
     | 
| 240 | 
         
            +
             
     | 
| 241 | 
         
            +
            # EvoChart Model
         
     | 
| 242 | 
         
            +
             
     | 
| 243 | 
         
            +
            This model is based on Phi3-Vision and self-trained by EvoChart method for chart understanding. 
         
     | 
| 244 | 
         
            +
             
     | 
| 245 | 
         
            +
            ## Usage
         
     | 
| 246 | 
         
            +
             
     | 
| 247 | 
         
            +
            For usage instructions, please refer to the original Phi3-Vision-Instruct documentation.
         
     | 
| 248 | 
         
            +
             
     | 
| 249 | 
         
            +
            ## Resources
         
     | 
| 250 | 
         
            +
             
     | 
| 251 | 
         
            +
            * **GitHub:** https://github.com/MuyeHuang/EvoChart
         
     | 
| 252 | 
         
            +
            * **Arxiv:** https://arxiv.org/abs/2409.01577
         
     | 
| 253 | 
         
            +
             
     | 
| 254 | 
         
            +
            ## Citation
         
     | 
| 255 | 
         
            +
             
     | 
| 256 | 
         
            +
            If you use this model in your research, please cite the following paper:
         
     | 
| 257 | 
         
            +
             
     | 
| 258 | 
         
            +
            ```bibtex
         
     | 
| 259 | 
         
            +
            @misc{huang2024evochartbenchmarkselftrainingapproach,
         
     | 
| 260 | 
         
            +
                  title={EvoChart: A Benchmark and a Self-Training Approach Towards Real-World Chart Understanding}, 
         
     | 
| 261 | 
         
            +
                  author={Muye Huang and Lai Han and Xinyu Zhang and Wenjun Wu and Jie Ma and Lingling Zhang and Jun Liu},
         
     | 
| 262 | 
         
            +
                  year={2024},
         
     | 
| 263 | 
         
            +
                  eprint={2409.01577},
         
     | 
| 264 | 
         
            +
                  archivePrefix={arXiv},
         
     | 
| 265 | 
         
            +
                  primaryClass={cs.CV},
         
     | 
| 266 | 
         
            +
                  url={https://arxiv.org/abs/2409.01577}, 
         
     | 
| 267 | 
         
            +
            }","{""id"": ""MuyeHuang/EvoChart"", ""author"": ""MuyeHuang"", ""sha"": ""de198330629f0c378596eb95c1fe5e587900c7f1"", ""last_modified"": ""2025-01-28 09:51:55+00:00"", ""created_at"": ""2024-09-04 04:09:53+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 6, ""downloads_all_time"": null, ""likes"": 1, ""library_name"": null, ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""safetensors"", ""phi3_v"", ""custom_code"", ""en"", ""arxiv:2409.01577"", ""base_model:microsoft/Phi-3-vision-128k-instruct"", ""base_model:finetune:microsoft/Phi-3-vision-128k-instruct"", ""license:gpl-3.0"", ""region:us""], ""pipeline_tag"": null, ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: microsoft/Phi-3-vision-128k-instruct\nlanguage:\n- en\nlicense: gpl-3.0"", ""widget_data"": null, ""model_index"": null, ""config"": {""architectures"": [""Phi3VForCausalLM""], ""auto_map"": {""AutoConfig"": ""configuration_phi3_v.Phi3VConfig"", ""AutoModelForCausalLM"": ""modeling_phi3_v.Phi3VForCausalLM""}, ""model_type"": ""phi3_v"", ""tokenizer_config"": {""bos_token"": ""<s>"", ""chat_template"": ""{% for message in messages %}{{'<|' + message['role'] + '|>' + '\n' + message['content'] + '<|end|>\n' }}{% endfor %}{% if add_generation_prompt and messages[-1]['role'] != 'assistant' %}{{- '<|assistant|>\n' -}}{% endif %}"", ""eos_token"": ""<|endoftext|>"", ""pad_token"": ""<|endoftext|>"", ""unk_token"": ""<unk>"", ""use_default_system_prompt"": false}}, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='configuration_phi3_v.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='image_embedding_phi3_v.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='image_processing_phi3_v.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='modeling_phi3_v.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='preprocessor_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='processing_phi3_v.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00001-of-00002.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00002-of-00002.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-01-28 09:51:55+00:00"", ""cardData"": ""base_model: microsoft/Phi-3-vision-128k-instruct\nlanguage:\n- en\nlicense: gpl-3.0"", ""transformersInfo"": null, ""_id"": ""66d7dd9154ae22fbfaceff69"", ""modelId"": ""MuyeHuang/EvoChart"", ""usedStorage"": 8293472191}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=MuyeHuang/EvoChart&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BMuyeHuang%2FEvoChart%5D(%2FMuyeHuang%2FEvoChart)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
         
     | 
| 268 | 
         
            +
            SkyNetdev/Phi3v-Finetuned,"---
         
     | 
| 269 | 
         
            +
            license: apache-2.0
         
     | 
| 270 | 
         
            +
            datasets:
         
     | 
| 271 | 
         
            +
            - HuggingFaceM4/the_cauldron
         
     | 
| 272 | 
         
            +
            language:
         
     | 
| 273 | 
         
            +
            - en
         
     | 
| 274 | 
         
            +
            base_model: microsoft/Phi-3-vision-128k-instruct
         
     | 
| 275 | 
         
            +
            ---","{""id"": ""SkyNetdev/Phi3v-Finetuned"", ""author"": ""SkyNetdev"", ""sha"": ""4ddf83e9bfc5a4ec84f885c8766f586567279dca"", ""last_modified"": ""2024-09-04 18:11:26+00:00"", ""created_at"": ""2024-09-04 15:51:43+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 1, ""downloads_all_time"": null, ""likes"": 1, ""library_name"": null, ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""safetensors"", ""phi3_v"", ""custom_code"", ""en"", ""dataset:HuggingFaceM4/the_cauldron"", ""base_model:microsoft/Phi-3-vision-128k-instruct"", ""base_model:finetune:microsoft/Phi-3-vision-128k-instruct"", ""license:apache-2.0"", ""region:us""], ""pipeline_tag"": null, ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: microsoft/Phi-3-vision-128k-instruct\ndatasets:\n- HuggingFaceM4/the_cauldron\nlanguage:\n- en\nlicense: apache-2.0"", ""widget_data"": null, ""model_index"": null, ""config"": {""architectures"": [""Phi3VForCausalLM""], ""auto_map"": {""AutoConfig"": ""configuration_phi3_v.Phi3VConfig"", ""AutoModelForCausalLM"": ""modeling_phi3_v.Phi3VForCausalLM""}, ""model_type"": ""phi3_v"", ""tokenizer_config"": {""bos_token"": ""<s>"", ""chat_template"": ""{% for message in messages %}{{'<|' + message['role'] + '|>' + '\n' + message['content'] + '<|end|>\n' }}{% endfor %}{% if add_generation_prompt and messages[-1]['role'] != 'assistant' %}{{- '<|assistant|>\n' -}}{% endif %}"", ""eos_token"": ""<|endoftext|>"", ""pad_token"": ""<|endoftext|>"", ""unk_token"": ""<unk>"", ""use_default_system_prompt"": false}}, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='configuration_phi3_v.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='eval_after.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='eval_before.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='image_embedding_phi3_v.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='image_processing_phi3_v.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00001-of-00002.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00002-of-00002.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='modeling_phi3_v.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='preprocessor_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='processing_phi3_v.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='processor_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='training_args.bin', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": {""parameters"": {""F16"": 4245122048}, ""total"": 4245122048}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-09-04 18:11:26+00:00"", ""cardData"": ""base_model: microsoft/Phi-3-vision-128k-instruct\ndatasets:\n- HuggingFaceM4/the_cauldron\nlanguage:\n- en\nlicense: apache-2.0"", ""transformersInfo"": null, ""_id"": ""66d8820fe83856057c29a412"", ""modelId"": ""SkyNetdev/Phi3v-Finetuned"", ""usedStorage"": 8490338464}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=SkyNetdev/Phi3v-Finetuned&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BSkyNetdev%2FPhi3v-Finetuned%5D(%2FSkyNetdev%2FPhi3v-Finetuned)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
         
     | 
| 276 | 
         
            +
            aschirru34/results,"---
         
     | 
| 277 | 
         
            +
            base_model: microsoft/Phi-3-vision-128k-instruct
         
     | 
| 278 | 
         
            +
            library_name: transformers
         
     | 
| 279 | 
         
            +
            model_name: results
         
     | 
| 280 | 
         
            +
            tags:
         
     | 
| 281 | 
         
            +
            - generated_from_trainer
         
     | 
| 282 | 
         
            +
            - trl
         
     | 
| 283 | 
         
            +
            - sft
         
     | 
| 284 | 
         
            +
            licence: license
         
     | 
| 285 | 
         
            +
            ---
         
     | 
| 286 | 
         
            +
             
     | 
| 287 | 
         
            +
            # Model Card for results
         
     | 
| 288 | 
         
            +
             
     | 
| 289 | 
         
            +
            This model is a fine-tuned version of [microsoft/Phi-3-vision-128k-instruct](https://huggingface.co/microsoft/Phi-3-vision-128k-instruct).
         
     | 
| 290 | 
         
            +
            It has been trained using [TRL](https://github.com/huggingface/trl).
         
     | 
| 291 | 
         
            +
             
     | 
| 292 | 
         
            +
            ## Quick start
         
     | 
| 293 | 
         
            +
             
     | 
| 294 | 
         
            +
            ```python
         
     | 
| 295 | 
         
            +
            from transformers import pipeline
         
     | 
| 296 | 
         
            +
             
     | 
| 297 | 
         
            +
            question = ""If you had a time machine, but could only go to the past or the future once and never return, which would you choose and why?""
         
     | 
| 298 | 
         
            +
            generator = pipeline(""text-generation"", model=""aschirru34/results"", device=""cuda"")
         
     | 
| 299 | 
         
            +
            output = generator([{""role"": ""user"", ""content"": question}], max_new_tokens=128, return_full_text=False)[0]
         
     | 
| 300 | 
         
            +
            print(output[""generated_text""])
         
     | 
| 301 | 
         
            +
            ```
         
     | 
| 302 | 
         
            +
             
     | 
| 303 | 
         
            +
            ## Training procedure
         
     | 
| 304 | 
         
            +
             
     | 
| 305 | 
         
            +
            [<img src=""https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg"" alt=""Visualize in Weights & Biases"" width=""150"" height=""24""/>](https://wandb.ai/alessandroschirru04-university-of-cagliari/huggingface/runs/sznwrpmw) 
         
     | 
| 306 | 
         
            +
             
     | 
| 307 | 
         
            +
             
     | 
| 308 | 
         
            +
            This model was trained with SFT.
         
     | 
| 309 | 
         
            +
             
     | 
| 310 | 
         
            +
            ### Framework versions
         
     | 
| 311 | 
         
            +
             
     | 
| 312 | 
         
            +
            - TRL: 0.14.0
         
     | 
| 313 | 
         
            +
            - Transformers: 4.47.0
         
     | 
| 314 | 
         
            +
            - Pytorch: 2.5.1+cu121
         
     | 
| 315 | 
         
            +
            - Datasets: 3.2.0
         
     | 
| 316 | 
         
            +
            - Tokenizers: 0.21.0
         
     | 
| 317 | 
         
            +
             
     | 
| 318 | 
         
            +
            ## Citations
         
     | 
| 319 | 
         
            +
             
     | 
| 320 | 
         
            +
             
     | 
| 321 | 
         
            +
             
     | 
| 322 | 
         
            +
            Cite TRL as:
         
     | 
| 323 | 
         
            +
                
         
     | 
| 324 | 
         
            +
            ```bibtex
         
     | 
| 325 | 
         
            +
            @misc{vonwerra2022trl,
         
     | 
| 326 | 
         
            +
            	title        = {{TRL: Transformer Reinforcement Learning}},
         
     | 
| 327 | 
         
            +
            	author       = {Leandro von Werra and Younes Belkada and Lewis Tunstall and Edward Beeching and Tristan Thrush and Nathan Lambert and Shengyi Huang and Kashif Rasul and Quentin Gallouédec},
         
     | 
| 328 | 
         
            +
            	year         = 2020,
         
     | 
| 329 | 
         
            +
            	journal      = {GitHub repository},
         
     | 
| 330 | 
         
            +
            	publisher    = {GitHub},
         
     | 
| 331 | 
         
            +
            	howpublished = {\url{https://github.com/huggingface/trl}}
         
     | 
| 332 | 
         
            +
            }
         
     | 
| 333 | 
         
            +
            ```","{""id"": ""aschirru34/results"", ""author"": ""aschirru34"", ""sha"": ""0a6671d74ac157d187b04b3b6d4db131f1c72b17"", ""last_modified"": ""2025-02-13 11:35:44+00:00"", ""created_at"": ""2025-02-13 11:35:28+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""generated_from_trainer"", ""trl"", ""sft"", ""base_model:microsoft/Phi-3-vision-128k-instruct"", ""base_model:finetune:microsoft/Phi-3-vision-128k-instruct"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": null, ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: microsoft/Phi-3-vision-128k-instruct\nlibrary_name: transformers\nmodel_name: results\ntags:\n- generated_from_trainer\n- trl\n- sft\nlicence: license"", ""widget_data"": null, ""model_index"": null, ""config"": {""tokenizer_config"": {""bos_token"": ""<s>"", ""chat_template"": ""{% for message in messages %}{{'<|' + message['role'] + '|>' + '\n' + message['content'] + '<|end|>\n' }}{% endfor %}{% if add_generation_prompt and messages[-1]['role'] != 'assistant' %}{{- '<|assistant|>\n' -}}{% endif %}"", ""eos_token"": ""<|endoftext|>"", ""pad_token"": ""<|endoftext|>"", ""unk_token"": ""<unk>"", ""use_default_system_prompt"": false}}, ""transformers_info"": {""auto_model"": ""AutoModel"", ""custom_class"": null, ""pipeline_tag"": null, ""processor"": null}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='adapter_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='adapter_model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='training_args.bin', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-02-13 11:35:44+00:00"", ""cardData"": ""base_model: microsoft/Phi-3-vision-128k-instruct\nlibrary_name: transformers\nmodel_name: results\ntags:\n- generated_from_trainer\n- trl\n- sft\nlicence: license"", ""transformersInfo"": {""auto_model"": ""AutoModel"", ""custom_class"": null, ""pipeline_tag"": null, ""processor"": null}, ""_id"": ""67add900ffd78255d6331749"", ""modelId"": ""aschirru34/results"", ""usedStorage"": 45138576}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=aschirru34/results&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Baschirru34%2Fresults%5D(%2Faschirru34%2Fresults)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
         
     | 
| 334 | 
         
            +
            MCG-NJU/VideoChatOnline-4B,"---
         
     | 
| 335 | 
         
            +
            base_model:
         
     | 
| 336 | 
         
            +
            - microsoft/Phi-3-vision-128k-instruct
         
     | 
| 337 | 
         
            +
            license: mit
         
     | 
| 338 | 
         
            +
            pipeline_tag: video-text-to-text
         
     | 
| 339 | 
         
            +
            ---
         
     | 
| 340 | 
         
            +
             
     | 
| 341 | 
         
            +
             
     | 
| 342 | 
         
            +
            # Model Card for VideoChat-Online
         
     | 
| 343 | 
         
            +
             
     | 
| 344 | 
         
            +
            This modelcard aims to give the model info of 'Online Video Understanding: OVBench and VideoChat-Online'.
         
     | 
| 345 | 
         
            +
             
     | 
| 346 | 
         
            +
            ## Model Details
         
     | 
| 347 | 
         
            +
             
     | 
| 348 | 
         
            +
            ### 🛠Usage
         
     | 
| 349 | 
         
            +
            Check the [Demo](https://github.com/MCG-NJU/VideoChat-Online#-demo).
         
     | 
| 350 | 
         
            +
             
     | 
| 351 | 
         
            +
            ### 📃Model Sources
         
     | 
| 352 | 
         
            +
             
     | 
| 353 | 
         
            +
            - **Repository:** [VideoChat-Online](https://github.com/MCG-NJU/VideoChat-Online)
         
     | 
| 354 | 
         
            +
            - **Paper:** [2501.00584](https://arxiv.org/abs/2501.00584v1)
         
     | 
| 355 | 
         
            +
             
     | 
| 356 | 
         
            +
            ## ✏️Citation
         
     | 
| 357 | 
         
            +
             
     | 
| 358 | 
         
            +
            If you find this work useful for your research, please consider citing VideoChatOnline. Your acknowledgement would greatly help us in continuing to contribute resources to the research community.
         
     | 
| 359 | 
         
            +
             
     | 
| 360 | 
         
            +
            ```
         
     | 
| 361 | 
         
            +
            @article{huang2024online,
         
     | 
| 362 | 
         
            +
              title={Online Video Understanding: A Comprehensive Benchmark and Memory-Augmented Method},
         
     | 
| 363 | 
         
            +
              author={Huang, Zhenpeng and Li, Xinhao and Li, Jiaqi and Wang, Jing and Zeng, Xiangyu and Liang, Cheng and Wu, Tao and Chen, Xi and Li, Liang and Wang, Limin},
         
     | 
| 364 | 
         
            +
              journal={arXiv preprint arXiv:2501.00584},
         
     | 
| 365 | 
         
            +
              year={2024}
         
     | 
| 366 | 
         
            +
            }
         
     | 
| 367 | 
         
            +
            ```","{""id"": ""MCG-NJU/VideoChatOnline-4B"", ""author"": ""MCG-NJU"", ""sha"": ""7373f325b9265527b9363f231b168a14523ac875"", ""last_modified"": ""2025-03-27 11:43:49+00:00"", ""created_at"": ""2025-03-05 17:50:44+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 59, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": null, ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""safetensors"", ""internvl_chat"", ""video-text-to-text"", ""custom_code"", ""arxiv:2501.00584"", ""base_model:microsoft/Phi-3-vision-128k-instruct"", ""base_model:finetune:microsoft/Phi-3-vision-128k-instruct"", ""license:mit"", ""region:us""], ""pipeline_tag"": ""video-text-to-text"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- microsoft/Phi-3-vision-128k-instruct\nlicense: mit\npipeline_tag: video-text-to-text"", ""widget_data"": null, ""model_index"": null, ""config"": {""architectures"": [""VideoChatOnline_IT""], ""auto_map"": {""AutoConfig"": ""configuration_internvl_chat.InternVLChatConfig"", ""AutoModel"": ""modeling_internvl_chat.VideoChatOnline_IT"", ""AutoModelForCausalLM"": ""modeling_internvl_chat.VideoChatOnline_IT""}, ""model_type"": ""internvl_chat"", ""tokenizer_config"": {""bos_token"": ""<s>"", ""chat_template"": ""{{ bos_token }}{% for message in messages %}{% if (message['role'] == 'user') %}{{'<|user|>' + '\n' + message['content'] + '<|end|>' + '\n' + '<|assistant|>' + '\n'}}{% elif (message['role'] == 'assistant') %}{{message['content'] + '<|end|>' + '\n'}}{% endif %}{% endfor %}"", ""eos_token"": ""</s>"", ""pad_token"": ""</s>"", ""unk_token"": ""<unk>"", ""use_default_system_prompt"": false}}, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='added_tokens.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00001-of-00002.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00002-of-00002.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.model', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='trainer_state.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": {""parameters"": {""BF16"": 4146855936}, ""total"": 4146855936}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-03-27 11:43:49+00:00"", ""cardData"": ""base_model:\n- microsoft/Phi-3-vision-128k-instruct\nlicense: mit\npipeline_tag: video-text-to-text"", ""transformersInfo"": null, ""_id"": ""67c88ef4fc6a622fd21b9cb1"", ""modelId"": ""MCG-NJU/VideoChatOnline-4B"", ""usedStorage"": 8294277763}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=MCG-NJU/VideoChatOnline-4B&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BMCG-NJU%2FVideoChatOnline-4B%5D(%2FMCG-NJU%2FVideoChatOnline-4B)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
         
     | 
    	
        QVQ-72B-Preview_finetunes_20250425_143346.csv_finetunes_20250425_143346.csv
    ADDED
    
    | 
         @@ -0,0 +1,704 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
         
     | 
| 2 | 
         
            +
            Qwen/QVQ-72B-Preview,"---
         
     | 
| 3 | 
         
            +
            license: other
         
     | 
| 4 | 
         
            +
            license_name: qwen
         
     | 
| 5 | 
         
            +
            license_link: https://huggingface.co/Qwen/QVQ-72B-Preview/blob/main/LICENSE
         
     | 
| 6 | 
         
            +
            language:
         
     | 
| 7 | 
         
            +
            - en
         
     | 
| 8 | 
         
            +
            pipeline_tag: image-text-to-text
         
     | 
| 9 | 
         
            +
            base_model: Qwen/Qwen2-VL-72B
         
     | 
| 10 | 
         
            +
            tags:
         
     | 
| 11 | 
         
            +
              - chat
         
     | 
| 12 | 
         
            +
            library_name: transformers
         
     | 
| 13 | 
         
            +
            ---
         
     | 
| 14 | 
         
            +
             
     | 
| 15 | 
         
            +
             
     | 
| 16 | 
         
            +
            # QVQ-72B-Preview
         
     | 
| 17 | 
         
            +
            <a href=""https://chat.qwenlm.ai/"" target=""_blank"" style=""margin: 2px;"">
         
     | 
| 18 | 
         
            +
                <img alt=""Chat"" src=""https://img.shields.io/badge/%F0%9F%92%9C%EF%B8%8F%20Qwen%20Chat%20-536af5"" style=""display: inline-block; vertical-align: middle;""/>
         
     | 
| 19 | 
         
            +
            </a>
         
     | 
| 20 | 
         
            +
             
     | 
| 21 | 
         
            +
            ## Introduction
         
     | 
| 22 | 
         
            +
             
     | 
| 23 | 
         
            +
            **QVQ-72B-Preview** is an experimental research model developed by the Qwen team, focusing on enhancing visual reasoning capabilities.
         
     | 
| 24 | 
         
            +
             
     | 
| 25 | 
         
            +
            ## Performance
         
     | 
| 26 | 
         
            +
             
     | 
| 27 | 
         
            +
            |                | **QVQ-72B-Preview** | o1-2024-12-17 | gpt-4o-2024-05-13 | Claude3.5 Sonnet-20241022 | Qwen2VL-72B |
         
     | 
| 28 | 
         
            +
            |----------------|-----------------|---------------|-------------------|----------------------------|-------------|
         
     | 
| 29 | 
         
            +
            | MMMU(val)      | 70.3            | 77.3          | 69.1              | 70.4                       | 64.5        |
         
     | 
| 30 | 
         
            +
            | MathVista(mini) | 71.4            | 71.0          | 63.8              | 65.3                       | 70.5        |
         
     | 
| 31 | 
         
            +
            | MathVision(full)   | 35.9            | –             | 30.4              | 35.6                       | 25.9        |
         
     | 
| 32 | 
         
            +
            | OlympiadBench  | 20.4            | –             | 25.9              | –                          | 11.2        |
         
     | 
| 33 | 
         
            +
             
     | 
| 34 | 
         
            +
             
     | 
| 35 | 
         
            +
            **QVQ-72B-Preview** has achieved remarkable performance on various benchmarks. It scored a remarkable 70.3% on the Multimodal Massive Multi-task Understanding (MMMU) benchmark, showcasing QVQ's powerful ability in multidisciplinary understanding and reasoning. Furthermore, the significant improvements on MathVision highlight the model's progress in mathematical reasoning tasks. OlympiadBench also demonstrates the model's enhanced ability to tackle challenging problems.
         
     | 
| 36 | 
         
            +
             
     | 
| 37 | 
         
            +
            ***But It's Not All Perfect:  Acknowledging the Limitations***
         
     | 
| 38 | 
         
            +
             
     | 
| 39 | 
         
            +
            While **QVQ-72B-Preview** exhibits promising performance that surpasses expectations, it’s important to acknowledge several limitations:
         
     | 
| 40 | 
         
            +
             
     | 
| 41 | 
         
            +
            1. **Language Mixing and Code-Switching:** The model might occasionally mix different languages or unexpectedly switch between them, potentially affecting the clarity of its responses.
         
     | 
| 42 | 
         
            +
            2. **Recursive Reasoning Loops:**  There's a risk of the model getting caught in recursive reasoning loops, leading to lengthy responses that may not even arrive at a final answer.
         
     | 
| 43 | 
         
            +
            3. **Safety and Ethical Considerations:** Robust safety measures are needed to ensure reliable and safe performance. Users should exercise caution when deploying this model.
         
     | 
| 44 | 
         
            +
            4. **Performance and Benchmark Limitations:** Despite the improvements in visual reasoning, QVQ doesn’t entirely replace the capabilities of Qwen2-VL-72B. During multi-step visual reasoning, the model might gradually lose focus on the image content, leading to hallucinations. Moreover, QVQ doesn’t show significant improvement over Qwen2-VL-72B in basic recognition tasks like identifying people, animals, or plants.
         
     | 
| 45 | 
         
            +
             
     | 
| 46 | 
         
            +
            Note: Currently, the model only supports single-round dialogues and image outputs. It does not support video inputs.
         
     | 
| 47 | 
         
            +
            ## Quickstart
         
     | 
| 48 | 
         
            +
             
     | 
| 49 | 
         
            +
            We offer a toolkit to help you handle various types of visual input more conveniently. This includes base64, URLs, and interleaved images and videos. You can install it using the following command:
         
     | 
| 50 | 
         
            +
             
     | 
| 51 | 
         
            +
            ```bash
         
     | 
| 52 | 
         
            +
            pip install qwen-vl-utils
         
     | 
| 53 | 
         
            +
            ```
         
     | 
| 54 | 
         
            +
             
     | 
| 55 | 
         
            +
            Here we show a code snippet to show you how to use the chat model with `transformers` and `qwen_vl_utils`:
         
     | 
| 56 | 
         
            +
             
     | 
| 57 | 
         
            +
            ```python
         
     | 
| 58 | 
         
            +
            from transformers import Qwen2VLForConditionalGeneration, AutoTokenizer, AutoProcessor
         
     | 
| 59 | 
         
            +
            from qwen_vl_utils import process_vision_info
         
     | 
| 60 | 
         
            +
             
     | 
| 61 | 
         
            +
            # default: Load the model on the available device(s)
         
     | 
| 62 | 
         
            +
            model = Qwen2VLForConditionalGeneration.from_pretrained(
         
     | 
| 63 | 
         
            +
                ""Qwen/QVQ-72B-Preview"", torch_dtype=""auto"", device_map=""auto""
         
     | 
| 64 | 
         
            +
            )
         
     | 
| 65 | 
         
            +
             
     | 
| 66 | 
         
            +
            # default processer
         
     | 
| 67 | 
         
            +
            processor = AutoProcessor.from_pretrained(""Qwen/QVQ-72B-Preview"")
         
     | 
| 68 | 
         
            +
             
     | 
| 69 | 
         
            +
            # The default range for the number of visual tokens per image in the model is 4-16384. You can set min_pixels and max_pixels according to your needs, such as a token count range of 256-1280, to balance speed and memory usage.
         
     | 
| 70 | 
         
            +
            # min_pixels = 256*28*28
         
     | 
| 71 | 
         
            +
            # max_pixels = 1280*28*28
         
     | 
| 72 | 
         
            +
            # processor = AutoProcessor.from_pretrained(""Qwen/QVQ-72B-Preview"", min_pixels=min_pixels, max_pixels=max_pixels)
         
     | 
| 73 | 
         
            +
             
     | 
| 74 | 
         
            +
            messages = [
         
     | 
| 75 | 
         
            +
                {
         
     | 
| 76 | 
         
            +
                    ""role"": ""system"",
         
     | 
| 77 | 
         
            +
                    ""content"": [
         
     | 
| 78 | 
         
            +
                        {""type"": ""text"", ""text"": ""You are a helpful and harmless assistant. You are Qwen developed by Alibaba. You should think step-by-step.""}
         
     | 
| 79 | 
         
            +
                    ],
         
     | 
| 80 | 
         
            +
                },
         
     | 
| 81 | 
         
            +
                {
         
     | 
| 82 | 
         
            +
                    ""role"": ""user"",
         
     | 
| 83 | 
         
            +
                    ""content"": [
         
     | 
| 84 | 
         
            +
                        {
         
     | 
| 85 | 
         
            +
                            ""type"": ""image"",
         
     | 
| 86 | 
         
            +
                            ""image"": ""https://qianwen-res.oss-cn-beijing.aliyuncs.com/QVQ/demo.png"",
         
     | 
| 87 | 
         
            +
                        },
         
     | 
| 88 | 
         
            +
                        {""type"": ""text"", ""text"": ""What value should be filled in the blank space?""},
         
     | 
| 89 | 
         
            +
                    ],
         
     | 
| 90 | 
         
            +
                }
         
     | 
| 91 | 
         
            +
            ]
         
     | 
| 92 | 
         
            +
             
     | 
| 93 | 
         
            +
            # Preparation for inference
         
     | 
| 94 | 
         
            +
            text = processor.apply_chat_template(
         
     | 
| 95 | 
         
            +
                messages, tokenize=False, add_generation_prompt=True
         
     | 
| 96 | 
         
            +
            )
         
     | 
| 97 | 
         
            +
            image_inputs, video_inputs = process_vision_info(messages)
         
     | 
| 98 | 
         
            +
            inputs = processor(
         
     | 
| 99 | 
         
            +
                text=[text],
         
     | 
| 100 | 
         
            +
                images=image_inputs,
         
     | 
| 101 | 
         
            +
                videos=video_inputs,
         
     | 
| 102 | 
         
            +
                padding=True,
         
     | 
| 103 | 
         
            +
                return_tensors=""pt"",
         
     | 
| 104 | 
         
            +
            )
         
     | 
| 105 | 
         
            +
            inputs = inputs.to(""cuda"")
         
     | 
| 106 | 
         
            +
             
     | 
| 107 | 
         
            +
            # Inference: Generation of the output
         
     | 
| 108 | 
         
            +
            generated_ids = model.generate(**inputs, max_new_tokens=8192)
         
     | 
| 109 | 
         
            +
            generated_ids_trimmed = [
         
     | 
| 110 | 
         
            +
                out_ids[len(in_ids) :] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)
         
     | 
| 111 | 
         
            +
            ]
         
     | 
| 112 | 
         
            +
            output_text = processor.batch_decode(
         
     | 
| 113 | 
         
            +
                generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False
         
     | 
| 114 | 
         
            +
            )
         
     | 
| 115 | 
         
            +
            print(output_text)
         
     | 
| 116 | 
         
            +
            ```
         
     | 
| 117 | 
         
            +
             
     | 
| 118 | 
         
            +
            ## Citation
         
     | 
| 119 | 
         
            +
             
     | 
| 120 | 
         
            +
            If you find our work helpful, feel free to give us a cite.
         
     | 
| 121 | 
         
            +
             
     | 
| 122 | 
         
            +
            ```
         
     | 
| 123 | 
         
            +
            @misc{qvq-72b-preview,
         
     | 
| 124 | 
         
            +
                title = {QVQ: To See the World with Wisdom},
         
     | 
| 125 | 
         
            +
                url = {https://qwenlm.github.io/blog/qvq-72b-preview/},
         
     | 
| 126 | 
         
            +
                author = {Qwen Team},
         
     | 
| 127 | 
         
            +
                month = {December},
         
     | 
| 128 | 
         
            +
                year = {2024}
         
     | 
| 129 | 
         
            +
            }
         
     | 
| 130 | 
         
            +
             
     | 
| 131 | 
         
            +
            @article{Qwen2VL,
         
     | 
| 132 | 
         
            +
              title={Qwen2-VL: Enhancing Vision-Language Model's Perception of the World at Any Resolution},
         
     | 
| 133 | 
         
            +
              author={Wang, Peng and Bai, Shuai and Tan, Sinan and Wang, Shijie and Fan, Zhihao and Bai, Jinze and Chen, Keqin and Liu, Xuejing and Wang, Jialin and Ge, Wenbin and Fan, Yang and Dang, Kai and Du, Mengfei and Ren, Xuancheng and Men, Rui and Liu, Dayiheng and Zhou, Chang and Zhou, Jingren and Lin, Junyang},
         
     | 
| 134 | 
         
            +
              journal={arXiv preprint arXiv:2409.12191},
         
     | 
| 135 | 
         
            +
              year={2024}
         
     | 
| 136 | 
         
            +
            }
         
     | 
| 137 | 
         
            +
            ```","{""id"": ""Qwen/QVQ-72B-Preview"", ""author"": ""Qwen"", ""sha"": ""24425f65e80be6804b75ae0a3dc1e5570e660a25"", ""last_modified"": ""2025-01-12 01:54:38+00:00"", ""created_at"": ""2024-12-24 10:13:22+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 50221, ""downloads_all_time"": null, ""likes"": 586, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": ""warm"", ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""qwen2_vl"", ""image-text-to-text"", ""chat"", ""conversational"", ""en"", ""arxiv:2409.12191"", ""base_model:Qwen/Qwen2-VL-72B"", ""base_model:finetune:Qwen/Qwen2-VL-72B"", ""license:other"", ""text-generation-inference"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""image-text-to-text"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: Qwen/Qwen2-VL-72B\nlanguage:\n- en\nlibrary_name: transformers\nlicense: other\nlicense_name: qwen\nlicense_link: https://huggingface.co/Qwen/QVQ-72B-Preview/blob/main/LICENSE\npipeline_tag: image-text-to-text\ntags:\n- chat"", ""widget_data"": [{""text"": ""Hi, what can you help me with?""}, {""text"": ""What is 84 * 3 / 2?""}, {""text"": ""Tell me an interesting fact about the universe!""}, {""text"": ""Explain quantum computing in simple terms.""}], ""model_index"": null, ""config"": {""architectures"": [""Qwen2VLForConditionalGeneration""], ""model_type"": ""qwen2_vl"", ""processor_config"": {""chat_template"": ""{% set image_count = namespace(value=0) %}{% set video_count = namespace(value=0) %}{% for message in messages %}{% if loop.first and message['role'] != 'system' %}<|im_start|>system\nYou are a helpful and harmless assistant. You are Qwen developed by Alibaba. You should think step-by-step.<|im_end|>\n{% endif %}<|im_start|>{{ message['role'] }}\n{% if message['content'] is string %}{{ message['content'] }}<|im_end|>\n{% else %}{% for content in message['content'] %}{% if content['type'] == 'image' or 'image' in content or 'image_url' in content %}{% set image_count.value = image_count.value + 1 %}{% if add_vision_id %}Picture {{ image_count.value }}: {% endif %}<|vision_start|><|image_pad|><|vision_end|>{% elif content['type'] == 'video' or 'video' in content %}{% set video_count.value = video_count.value + 1 %}{% if add_vision_id %}Video {{ video_count.value }}: {% endif %}<|vision_start|><|video_pad|><|vision_end|>{% elif 'text' in content %}{{ content['text'] }}{% endif %}{% endfor %}<|im_end|>\n{% endif %}{% endfor %}{% if add_generation_prompt %}<|im_start|>assistant\n{% endif %}""}, ""tokenizer_config"": {""bos_token"": null, ""chat_template"": ""{% set image_count = namespace(value=0) %}{% set video_count = namespace(value=0) %}{% for message in messages %}{% if loop.first and message['role'] != 'system' %}<|im_start|>system\nYou are a helpful and harmless assistant. You are Qwen developed by Alibaba. You should think step-by-step.<|im_end|>\n{% endif %}<|im_start|>{{ message['role'] }}\n{% if message['content'] is string %}{{ message['content'] }}<|im_end|>\n{% else %}{% for content in message['content'] %}{% if content['type'] == 'image' or 'image' in content or 'image_url' in content %}{% set image_count.value = image_count.value + 1 %}{% if add_vision_id %}Picture {{ image_count.value }}: {% endif %}<|vision_start|><|image_pad|><|vision_end|>{% elif content['type'] == 'video' or 'video' in content %}{% set video_count.value = video_count.value + 1 %}{% if add_vision_id %}Video {{ video_count.value }}: {% endif %}<|vision_start|><|video_pad|><|vision_end|>{% elif 'text' in content %}{{ content['text'] }}{% endif %}{% endfor %}<|im_end|>\n{% endif %}{% endfor %}{% if add_generation_prompt %}<|im_start|>assistant\n{% endif %}"", ""eos_token"": ""<|im_end|>"", ""pad_token"": ""<|endoftext|>"", ""unk_token"": null}}, ""transformers_info"": {""auto_model"": ""AutoModelForImageTextToText"", ""custom_class"": null, ""pipeline_tag"": ""image-text-to-text"", ""processor"": ""AutoProcessor""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='LICENSE', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='chat_template.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='merges.txt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00001-of-00038.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00002-of-00038.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00003-of-00038.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00004-of-00038.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00005-of-00038.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00006-of-00038.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00007-of-00038.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00008-of-00038.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00009-of-00038.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00010-of-00038.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00011-of-00038.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00012-of-00038.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00013-of-00038.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00014-of-00038.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00015-of-00038.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00016-of-00038.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00017-of-00038.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00018-of-00038.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00019-of-00038.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00020-of-00038.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00021-of-00038.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00022-of-00038.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00023-of-00038.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00024-of-00038.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00025-of-00038.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00026-of-00038.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00027-of-00038.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00028-of-00038.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00029-of-00038.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00030-of-00038.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00031-of-00038.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00032-of-00038.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00033-of-00038.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00034-of-00038.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00035-of-00038.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00036-of-00038.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00037-of-00038.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00038-of-00038.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='preprocessor_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vocab.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [""evbro/Qwen-QVQ-72B-Preview99"", ""hrsprojects/R"", ""OP7/SG_TestSpace"", ""ukaner/Qwen-QVQ-72B-Preview"", ""RomanRG008/Qwen-QVQ-72B-Preview"", ""majortom666123/Qwen-QVQ-72B-Preview"", ""ELUp/Qwen-QVQ-72B-Preview"", ""MartsoBodziu1994/Qwen-QVQ-72B-Preview"", ""wgdp5000/Qwen-QVQ-72B-Preview"", ""jackyin/Qwen-QVQ-72B-Preview"", ""lahuseynli/Qwen-QVQ-72B-Preview"", ""rienn/Qwen-QVQ-72B-Preview"", ""evbro/Qwen-QVQ-72B-Preview"", ""MimoInvers/Qwen-QVQ-72B-Preview"", ""hrsprojects/Qwen-QVQ-72B-Preview"", ""hrsprojects/Qwen-QVQ-72B-Preview1"", ""hrsprojects/J"", ""hrsprojects/Qwen-QVQ-72B-Preview2"", ""changewhen/Qwen-QVQ-72B-Preview"", ""Doubleupai/Flux-api"", ""Prakhar31/Qwen-QVQ-72B-Preview"", ""Nocigar/siliconflow"", ""TheresaQWQ/Qwen-QVQ-72B-Preview"", ""Aleksmorshen/Qwen-QVQ-72B-Preview"", ""jogoh/Image-Reasoning"", ""jogoh/image"", ""shashianand/LLm"", ""shashianand/LLms"", ""sanjog007/Test"", ""kazukikun/Qwen-QVQ-72B-Preview"", ""OP7/Test_nvidia_4xL40S"", ""Tao0805/Qwen-QVQ-72B-Preview"", ""3emibrahim/debugging-testing1""], ""safetensors"": {""parameters"": {""BF16"": 73405560320}, ""total"": 73405560320}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-01-12 01:54:38+00:00"", ""cardData"": ""base_model: Qwen/Qwen2-VL-72B\nlanguage:\n- en\nlibrary_name: transformers\nlicense: other\nlicense_name: qwen\nlicense_link: https://huggingface.co/Qwen/QVQ-72B-Preview/blob/main/LICENSE\npipeline_tag: image-text-to-text\ntags:\n- chat"", ""transformersInfo"": {""auto_model"": ""AutoModelForImageTextToText"", ""custom_class"": null, ""pipeline_tag"": ""image-text-to-text"", ""processor"": ""AutoProcessor""}, ""_id"": ""676a8942ad042e047d99ff10"", ""modelId"": ""Qwen/QVQ-72B-Preview"", ""usedStorage"": 146811273776}",0,"https://huggingface.co/unsloth/QVQ-72B-Preview, https://huggingface.co/huihui-ai/QVQ-72B-Preview-abliterated, https://huggingface.co/sangeetha1993/Coder-Qwen, https://huggingface.co/whitedell/YourAi, https://huggingface.co/rrbhanderi05/FaceGoto-t1, https://huggingface.co/shad777725/risk_model",6,"https://huggingface.co/DDBMST3W/OLE_ENG, https://huggingface.co/rahmatov33/lotheus, https://huggingface.co/Jaythakur/clothify",3,"https://huggingface.co/lmstudio-community/QVQ-72B-Preview-GGUF, https://huggingface.co/unsloth/QVQ-72B-Preview-bnb-4bit, https://huggingface.co/matatonic/QVQ-72B-Preview-abliterated-6.5bpw-h8-exl2, https://huggingface.co/bartowski/QVQ-72B-Preview-GGUF, https://huggingface.co/kosbu/QVQ-72B-Preview-AWQ, https://huggingface.co/DevQuasar/Qwen.QVQ-72B-Preview-GGUF, https://huggingface.co/mradermacher/QVQ-72B-Preview-GGUF, https://huggingface.co/mmnga/QVQ-72B-Preview-gguf, https://huggingface.co/mradermacher/QVQ-72B-Preview-i1-GGUF, https://huggingface.co/tensorblock/QVQ-72B-Preview-GGUF, https://huggingface.co/Dracones/QVQ-72B-Preview_exl2_8.0bpw, https://huggingface.co/Dracones/QVQ-72B-Preview_exl2_7.0bpw, https://huggingface.co/Dracones/QVQ-72B-Preview_exl2_6.0bpw, https://huggingface.co/Dracones/QVQ-72B-Preview_exl2_5.0bpw, https://huggingface.co/Dracones/QVQ-72B-Preview_exl2_4.5bpw, https://huggingface.co/Dracones/QVQ-72B-Preview_exl2_4.0bpw, https://huggingface.co/Dracones/QVQ-72B-Preview_exl2_3.5bpw, https://huggingface.co/Dracones/QVQ-72B-Preview_exl2_3.0bpw, https://huggingface.co/Dracones/QVQ-72B-Preview_exl2_2.75bpw, https://huggingface.co/Dracones/QVQ-72B-Preview_exl2_2.5bpw, https://huggingface.co/Dracones/QVQ-72B-Preview_exl2_2.25bpw, https://huggingface.co/bullerwins/QVQ-72B-Preview-exl2_4.0bpw, https://huggingface.co/bullerwins/QVQ-72B-Preview-exl2_5.0bpw, https://huggingface.co/bullerwins/QVQ-72B-Preview-exl2_6.0bpw, https://huggingface.co/bullerwins/QVQ-72B-Preview-exl2_8.0bpw, https://huggingface.co/XelotX/QVQ-72B-Preview-GGUF, https://huggingface.co/bullerwins/QVQ-72B-Preview-exl2_4.5bpw, https://huggingface.co/bullerwins/QVQ-72B-Preview-exl2_5.5bpw, https://huggingface.co/bullerwins/QVQ-72B-Preview-exl2_3.0bpw, https://huggingface.co/bullerwins/QVQ-72B-Preview-exl2_3.5bpw, https://huggingface.co/OPEA/QVQ-72B-Preview-int4-sym-inc",31,,0,"3emibrahim/debugging-testing1, ELUp/Qwen-QVQ-72B-Preview, MartsoBodziu1994/Qwen-QVQ-72B-Preview, Nocigar/siliconflow, OP7/SG_TestSpace, RomanRG008/Qwen-QVQ-72B-Preview, evbro/Qwen-QVQ-72B-Preview99, hrsprojects/R, jackyin/Qwen-QVQ-72B-Preview, majortom666123/Qwen-QVQ-72B-Preview, ukaner/Qwen-QVQ-72B-Preview, wgdp5000/Qwen-QVQ-72B-Preview",12
         
     | 
| 138 | 
         
            +
            unsloth/QVQ-72B-Preview,"---
         
     | 
| 139 | 
         
            +
            license: other
         
     | 
| 140 | 
         
            +
            license_name: qwen
         
     | 
| 141 | 
         
            +
            license_link: https://huggingface.co/Qwen/QVQ-72B-Preview/blob/main/LICENSE
         
     | 
| 142 | 
         
            +
            language:
         
     | 
| 143 | 
         
            +
            - en
         
     | 
| 144 | 
         
            +
            pipeline_tag: image-text-to-text
         
     | 
| 145 | 
         
            +
            base_model: Qwen/QVQ-72B-Preview
         
     | 
| 146 | 
         
            +
            tags:
         
     | 
| 147 | 
         
            +
              - chat
         
     | 
| 148 | 
         
            +
              - qwen
         
     | 
| 149 | 
         
            +
            library_name: transformers
         
     | 
| 150 | 
         
            +
            ---
         
     | 
| 151 | 
         
            +
             
     | 
| 152 | 
         
            +
             
     | 
| 153 | 
         
            +
            # QVQ-72B-Preview
         
     | 
| 154 | 
         
            +
             
     | 
| 155 | 
         
            +
            ## Introduction
         
     | 
| 156 | 
         
            +
             
     | 
| 157 | 
         
            +
            **QVQ-72B-Preview** is an experimental research model developed by the Qwen team, focusing on enhancing visual reasoning capabilities.
         
     | 
| 158 | 
         
            +
             
     | 
| 159 | 
         
            +
            ## Performance
         
     | 
| 160 | 
         
            +
             
     | 
| 161 | 
         
            +
            |                | **QVQ-72B-Preview** | o1-2024-12-17 | gpt-4o-2024-05-13 | Claude3.5 Sonnet-20241022 | Qwen2VL-72B |
         
     | 
| 162 | 
         
            +
            |----------------|-----------------|---------------|-------------------|----------------------------|-------------|
         
     | 
| 163 | 
         
            +
            | MMMU(val)      | 70.3            | 77.3          | 69.1              | 70.4                       | 64.5        |
         
     | 
| 164 | 
         
            +
            | MathVista(mini) | 71.4            | 71.0          | 63.8              | 65.3                       | 70.5        |
         
     | 
| 165 | 
         
            +
            | MathVision(full)   | 35.9            | –             | 30.4              | 35.6                       | 25.9        |
         
     | 
| 166 | 
         
            +
            | OlympiadBench  | 20.4            | –             | 25.9              | –                          | 11.2        |
         
     | 
| 167 | 
         
            +
             
     | 
| 168 | 
         
            +
             
     | 
| 169 | 
         
            +
            **QVQ-72B-Preview** has achieved remarkable performance on various benchmarks. It scored a remarkable 70.3% on the Multimodal Massive Multi-task Understanding (MMMU) benchmark, showcasing QVQ's powerful ability in multidisciplinary understanding and reasoning. Furthermore, the significant improvements on MathVision highlight the model's progress in mathematical reasoning tasks. OlympiadBench also demonstrates the model's enhanced ability to tackle challenging problems.
         
     | 
| 170 | 
         
            +
             
     | 
| 171 | 
         
            +
            ***But It's Not All Perfect:  Acknowledging the Limitations***
         
     | 
| 172 | 
         
            +
             
     | 
| 173 | 
         
            +
            While **QVQ-72B-Preview** exhibits promising performance that surpasses expectations, it’s important to acknowledge several limitations:
         
     | 
| 174 | 
         
            +
             
     | 
| 175 | 
         
            +
            1. **Language Mixing and Code-Switching:** The model might occasionally mix different languages or unexpectedly switch between them, potentially affecting the clarity of its responses.
         
     | 
| 176 | 
         
            +
            2. **Recursive Reasoning Loops:**  There's a risk of the model getting caught in recursive reasoning loops, leading to lengthy responses that may not even arrive at a final answer.
         
     | 
| 177 | 
         
            +
            3. **Safety and Ethical Considerations:** Robust safety measures are needed to ensure reliable and safe performance. Users should exercise caution when deploying this model.
         
     | 
| 178 | 
         
            +
            4. **Performance and Benchmark Limitations:** Despite the improvements in visual reasoning, QVQ doesn’t entirely replace the capabilities of Qwen2-VL-72B. During multi-step visual reasoning, the model might gradually lose focus on the image content, leading to hallucinations. Moreover, QVQ doesn’t show significant improvement over Qwen2-VL-72B in basic recognition tasks like identifying people, animals, or plants.
         
     | 
| 179 | 
         
            +
             
     | 
| 180 | 
         
            +
            Note: Currently, the model only supports single-round dialogues and image outputs. It does not support video inputs.
         
     | 
| 181 | 
         
            +
            ## Quickstart
         
     | 
| 182 | 
         
            +
             
     | 
| 183 | 
         
            +
            We offer a toolkit to help you handle various types of visual input more conveniently. This includes base64, URLs, and interleaved images and videos. You can install it using the following command:
         
     | 
| 184 | 
         
            +
             
     | 
| 185 | 
         
            +
            ```bash
         
     | 
| 186 | 
         
            +
            pip install qwen-vl-utils
         
     | 
| 187 | 
         
            +
            ```
         
     | 
| 188 | 
         
            +
             
     | 
| 189 | 
         
            +
            Here we show a code snippet to show you how to use the chat model with `transformers` and `qwen_vl_utils`:
         
     | 
| 190 | 
         
            +
             
     | 
| 191 | 
         
            +
            ```python
         
     | 
| 192 | 
         
            +
            from transformers import Qwen2VLForConditionalGeneration, AutoTokenizer, AutoProcessor
         
     | 
| 193 | 
         
            +
            from qwen_vl_utils import process_vision_info
         
     | 
| 194 | 
         
            +
             
     | 
| 195 | 
         
            +
            # default: Load the model on the available device(s)
         
     | 
| 196 | 
         
            +
            model = Qwen2VLForConditionalGeneration.from_pretrained(
         
     | 
| 197 | 
         
            +
                ""Qwen/QVQ-72B-Preview"", torch_dtype=""auto"", device_map=""auto""
         
     | 
| 198 | 
         
            +
            )
         
     | 
| 199 | 
         
            +
             
     | 
| 200 | 
         
            +
            # default processer
         
     | 
| 201 | 
         
            +
            processor = AutoProcessor.from_pretrained(""Qwen/QVQ-72B-Preview"")
         
     | 
| 202 | 
         
            +
             
     | 
| 203 | 
         
            +
            # The default range for the number of visual tokens per image in the model is 4-16384. You can set min_pixels and max_pixels according to your needs, such as a token count range of 256-1280, to balance speed and memory usage.
         
     | 
| 204 | 
         
            +
            # min_pixels = 256*28*28
         
     | 
| 205 | 
         
            +
            # max_pixels = 1280*28*28
         
     | 
| 206 | 
         
            +
            # processor = AutoProcessor.from_pretrained(""Qwen/QVQ-72B-Preview"", min_pixels=min_pixels, max_pixels=max_pixels)
         
     | 
| 207 | 
         
            +
             
     | 
| 208 | 
         
            +
            messages = [
         
     | 
| 209 | 
         
            +
                {
         
     | 
| 210 | 
         
            +
                    ""role"": ""system"",
         
     | 
| 211 | 
         
            +
                    ""content"": [
         
     | 
| 212 | 
         
            +
                        {""type"": ""text"", ""text"": ""You are a helpful and harmless assistant. You are Qwen developed by Alibaba. You should think step-by-step.""}
         
     | 
| 213 | 
         
            +
                    ],
         
     | 
| 214 | 
         
            +
                },
         
     | 
| 215 | 
         
            +
                {
         
     | 
| 216 | 
         
            +
                    ""role"": ""user"",
         
     | 
| 217 | 
         
            +
                    ""content"": [
         
     | 
| 218 | 
         
            +
                        {
         
     | 
| 219 | 
         
            +
                            ""type"": ""image"",
         
     | 
| 220 | 
         
            +
                            ""image"": ""https://qianwen-res.oss-cn-beijing.aliyuncs.com/QVQ/demo.png"",
         
     | 
| 221 | 
         
            +
                        },
         
     | 
| 222 | 
         
            +
                        {""type"": ""text"", ""text"": ""What value should be filled in the blank space?""},
         
     | 
| 223 | 
         
            +
                    ],
         
     | 
| 224 | 
         
            +
                }
         
     | 
| 225 | 
         
            +
            ]
         
     | 
| 226 | 
         
            +
             
     | 
| 227 | 
         
            +
            # Preparation for inference
         
     | 
| 228 | 
         
            +
            text = processor.apply_chat_template(
         
     | 
| 229 | 
         
            +
                messages, tokenize=False, add_generation_prompt=True
         
     | 
| 230 | 
         
            +
            )
         
     | 
| 231 | 
         
            +
            image_inputs, video_inputs = process_vision_info(messages)
         
     | 
| 232 | 
         
            +
            inputs = processor(
         
     | 
| 233 | 
         
            +
                text=[text],
         
     | 
| 234 | 
         
            +
                images=image_inputs,
         
     | 
| 235 | 
         
            +
                videos=video_inputs,
         
     | 
| 236 | 
         
            +
                padding=True,
         
     | 
| 237 | 
         
            +
                return_tensors=""pt"",
         
     | 
| 238 | 
         
            +
            )
         
     | 
| 239 | 
         
            +
            inputs = inputs.to(""cuda"")
         
     | 
| 240 | 
         
            +
             
     | 
| 241 | 
         
            +
            # Inference: Generation of the output
         
     | 
| 242 | 
         
            +
            generated_ids = model.generate(**inputs, max_new_tokens=8192)
         
     | 
| 243 | 
         
            +
            generated_ids_trimmed = [
         
     | 
| 244 | 
         
            +
                out_ids[len(in_ids) :] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)
         
     | 
| 245 | 
         
            +
            ]
         
     | 
| 246 | 
         
            +
            output_text = processor.batch_decode(
         
     | 
| 247 | 
         
            +
                generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False
         
     | 
| 248 | 
         
            +
            )
         
     | 
| 249 | 
         
            +
            print(output_text)
         
     | 
| 250 | 
         
            +
            ```
         
     | 
| 251 | 
         
            +
             
     | 
| 252 | 
         
            +
            ## Citation
         
     | 
| 253 | 
         
            +
             
     | 
| 254 | 
         
            +
            If you find our work helpful, feel free to give us a cite.
         
     | 
| 255 | 
         
            +
             
     | 
| 256 | 
         
            +
            ```
         
     | 
| 257 | 
         
            +
            @misc{qvq-72b-preview,
         
     | 
| 258 | 
         
            +
                title = {QVQ: To See the World with Wisdom},
         
     | 
| 259 | 
         
            +
                url = {https://qwenlm.github.io/blog/qvq-72b-preview/},
         
     | 
| 260 | 
         
            +
                author = {Qwen Team},
         
     | 
| 261 | 
         
            +
                month = {December},
         
     | 
| 262 | 
         
            +
                year = {2024}
         
     | 
| 263 | 
         
            +
            }
         
     | 
| 264 | 
         
            +
             
     | 
| 265 | 
         
            +
            @article{Qwen2VL,
         
     | 
| 266 | 
         
            +
              title={Qwen2-VL: Enhancing Vision-Language Model's Perception of the World at Any Resolution},
         
     | 
| 267 | 
         
            +
              author={Wang, Peng and Bai, Shuai and Tan, Sinan and Wang, Shijie and Fan, Zhihao and Bai, Jinze and Chen, Keqin and Liu, Xuejing and Wang, Jialin and Ge, Wenbin and Fan, Yang and Dang, Kai and Du, Mengfei and Ren, Xuancheng and Men, Rui and Liu, Dayiheng and Zhou, Chang and Zhou, Jingren and Lin, Junyang},
         
     | 
| 268 | 
         
            +
              journal={arXiv preprint arXiv:2409.12191},
         
     | 
| 269 | 
         
            +
              year={2024}
         
     | 
| 270 | 
         
            +
            }
         
     | 
| 271 | 
         
            +
            ```","{""id"": ""unsloth/QVQ-72B-Preview"", ""author"": ""unsloth"", ""sha"": ""cc84fba1bd17b8a3498876c64ba483d1eb54c600"", ""last_modified"": ""2024-12-25 00:05:10+00:00"", ""created_at"": ""2024-12-24 23:45:07+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 5, ""downloads_all_time"": null, ""likes"": 1, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""qwen2_vl"", ""image-text-to-text"", ""chat"", ""qwen"", ""conversational"", ""en"", ""arxiv:2409.12191"", ""base_model:Qwen/QVQ-72B-Preview"", ""base_model:finetune:Qwen/QVQ-72B-Preview"", ""license:other"", ""text-generation-inference"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""image-text-to-text"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: Qwen/QVQ-72B-Preview\nlanguage:\n- en\nlibrary_name: transformers\nlicense: other\nlicense_name: qwen\nlicense_link: https://huggingface.co/Qwen/QVQ-72B-Preview/blob/main/LICENSE\npipeline_tag: image-text-to-text\ntags:\n- chat\n- qwen"", ""widget_data"": [{""text"": ""Hi, what can you help me with?""}, {""text"": ""What is 84 * 3 / 2?""}, {""text"": ""Tell me an interesting fact about the universe!""}, {""text"": ""Explain quantum computing in simple terms.""}], ""model_index"": null, ""config"": {""architectures"": [""Qwen2VLForConditionalGeneration""], ""model_type"": ""qwen2_vl"", ""processor_config"": {""chat_template"": ""{% set image_count = namespace(value=0) %}{% set video_count = namespace(value=0) %}{% for message in messages %}{% if loop.first and message['role'] != 'system' %}<|im_start|>system\nYou are a helpful and harmless assistant. You are Qwen developed by Alibaba. You should think step-by-step.<|im_end|>\n{% endif %}<|im_start|>{{ message['role'] }}\n{% if message['content'] is string %}{{ message['content'] }}<|im_end|>\n{% else %}{% for content in message['content'] %}{% if content['type'] == 'image' or 'image' in content or 'image_url' in content %}{% set image_count.value = image_count.value + 1 %}{% if add_vision_id %}Picture {{ image_count.value }}: {% endif %}<|vision_start|><|image_pad|><|vision_end|>{% elif content['type'] == 'video' or 'video' in content %}{% set video_count.value = video_count.value + 1 %}{% if add_vision_id %}Video {{ video_count.value }}: {% endif %}<|vision_start|><|video_pad|><|vision_end|>{% elif 'text' in content %}{{ content['text'] }}{% endif %}{% endfor %}<|im_end|>\n{% endif %}{% endfor %}{% if add_generation_prompt %}<|im_start|>assistant\n{% endif %}""}, ""tokenizer_config"": {""bos_token"": null, ""chat_template"": ""{% set image_count = namespace(value=0) %}{% set video_count = namespace(value=0) %}{% for message in messages %}{% if loop.first and message['role'] != 'system' %}<|im_start|>system\nYou are a helpful and harmless assistant. You are Qwen developed by Alibaba. You should think step-by-step.<|im_end|>\n{% endif %}<|im_start|>{{ message['role'] }}\n{% if message['content'] is string %}{{ message['content'] }}<|im_end|>\n{% else %}{% for content in message['content'] %}{% if content['type'] == 'image' or 'image' in content or 'image_url' in content %}{% set image_count.value = image_count.value + 1 %}{% if add_vision_id %}Picture {{ image_count.value }}: {% endif %}<|vision_start|><|image_pad|><|vision_end|>{% elif content['type'] == 'video' or 'video' in content %}{% set video_count.value = video_count.value + 1 %}{% if add_vision_id %}Video {{ video_count.value }}: {% endif %}<|vision_start|><|video_pad|><|vision_end|>{% elif 'text' in content %}{{ content['text'] }}{% endif %}{% endfor %}<|im_end|>\n{% endif %}{% endfor %}{% if add_generation_prompt %}<|im_start|>assistant\n{% endif %}"", ""eos_token"": ""<|im_end|>"", ""pad_token"": ""<|vision_pad|>"", ""unk_token"": null}}, ""transformers_info"": {""auto_model"": ""AutoModelForImageTextToText"", ""custom_class"": null, ""pipeline_tag"": ""image-text-to-text"", ""processor"": ""AutoProcessor""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='added_tokens.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='chat_template.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='merges.txt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00001-of-00031.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00002-of-00031.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00003-of-00031.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00004-of-00031.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00005-of-00031.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00006-of-00031.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00007-of-00031.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00008-of-00031.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00009-of-00031.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00010-of-00031.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00011-of-00031.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00012-of-00031.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00013-of-00031.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00014-of-00031.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00015-of-00031.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00016-of-00031.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00017-of-00031.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00018-of-00031.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00019-of-00031.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00020-of-00031.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00021-of-00031.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00022-of-00031.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00023-of-00031.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00024-of-00031.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00025-of-00031.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00026-of-00031.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00027-of-00031.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00028-of-00031.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00029-of-00031.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00030-of-00031.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00031-of-00031.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='preprocessor_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vocab.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": {""parameters"": {""BF16"": 73405560320}, ""total"": 73405560320}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-12-25 00:05:10+00:00"", ""cardData"": ""base_model: Qwen/QVQ-72B-Preview\nlanguage:\n- en\nlibrary_name: transformers\nlicense: other\nlicense_name: qwen\nlicense_link: https://huggingface.co/Qwen/QVQ-72B-Preview/blob/main/LICENSE\npipeline_tag: image-text-to-text\ntags:\n- chat\n- qwen"", ""transformersInfo"": {""auto_model"": ""AutoModelForImageTextToText"", ""custom_class"": null, ""pipeline_tag"": ""image-text-to-text"", ""processor"": ""AutoProcessor""}, ""_id"": ""676b47836fb487638398a5cc"", ""modelId"": ""unsloth/QVQ-72B-Preview"", ""usedStorage"": 146822695168}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=unsloth/QVQ-72B-Preview&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bunsloth%2FQVQ-72B-Preview%5D(%2Funsloth%2FQVQ-72B-Preview)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
         
     | 
| 272 | 
         
            +
            huihui-ai/QVQ-72B-Preview-abliterated,"---
         
     | 
| 273 | 
         
            +
            license: other
         
     | 
| 274 | 
         
            +
            license_name: qwen
         
     | 
| 275 | 
         
            +
            license_link: https://huggingface.co/huihui-ai/QVQ-72B-Preview-abliterated/blob/main/LICENSE
         
     | 
| 276 | 
         
            +
            language:
         
     | 
| 277 | 
         
            +
            - en
         
     | 
| 278 | 
         
            +
            pipeline_tag: image-text-to-text
         
     | 
| 279 | 
         
            +
            base_model: Qwen/QVQ-72B-Preview
         
     | 
| 280 | 
         
            +
            tags:
         
     | 
| 281 | 
         
            +
            - abliterated
         
     | 
| 282 | 
         
            +
            - uncensored
         
     | 
| 283 | 
         
            +
              - chat
         
     | 
| 284 | 
         
            +
            library_name: transformers
         
     | 
| 285 | 
         
            +
            ---
         
     | 
| 286 | 
         
            +
             
     | 
| 287 | 
         
            +
            # huihui-ai/QVQ-72B-Preview-abliterated
         
     | 
| 288 | 
         
            +
             
     | 
| 289 | 
         
            +
             
     | 
| 290 | 
         
            +
            This is an uncensored version of [Qwen/QVQ-72B-Preview](https://huggingface.co/Qwen/QVQ-72B-Preview) created with abliteration (see [remove-refusals-with-transformers](https://github.com/Sumandora/remove-refusals-with-transformers) to know more about it).   
         
     | 
| 291 | 
         
            +
             
     | 
| 292 | 
         
            +
            This is a crude, proof-of-concept implementation to remove refusals from an LLM model without using TransformerLens.    
         
     | 
| 293 | 
         
            +
             
     | 
| 294 | 
         
            +
            It was only the text part that was processed, not the image part.
         
     | 
| 295 | 
         
            +
             
     | 
| 296 | 
         
            +
            ## Usage
         
     | 
| 297 | 
         
            +
            We offer a toolkit to help you handle various types of visual input more conveniently. This includes base64, URLs, and interleaved images and videos. You can install it using the following command:
         
     | 
| 298 | 
         
            +
             
     | 
| 299 | 
         
            +
            ```bash
         
     | 
| 300 | 
         
            +
            pip install qwen-vl-utils
         
     | 
| 301 | 
         
            +
            ```
         
     | 
| 302 | 
         
            +
             
     | 
| 303 | 
         
            +
            Here we show a code snippet to show you how to use the chat model with `transformers` and `qwen_vl_utils`:
         
     | 
| 304 | 
         
            +
             
     | 
| 305 | 
         
            +
             
     | 
| 306 | 
         
            +
            ```python
         
     | 
| 307 | 
         
            +
            from transformers import Qwen2VLForConditionalGeneration, AutoTokenizer, AutoProcessor
         
     | 
| 308 | 
         
            +
            from qwen_vl_utils import process_vision_info
         
     | 
| 309 | 
         
            +
             
     | 
| 310 | 
         
            +
            # default: Load the model on the available device(s)
         
     | 
| 311 | 
         
            +
            model = Qwen2VLForConditionalGeneration.from_pretrained(
         
     | 
| 312 | 
         
            +
                ""huihui-ai/QVQ-72B-Preview-abliterated"", torch_dtype=""auto"", device_map=""auto""
         
     | 
| 313 | 
         
            +
            )
         
     | 
| 314 | 
         
            +
             
     | 
| 315 | 
         
            +
            # default processer
         
     | 
| 316 | 
         
            +
            processor = AutoProcessor.from_pretrained(""huihui-ai/QVQ-72B-Preview-abliterated"")
         
     | 
| 317 | 
         
            +
             
     | 
| 318 | 
         
            +
            # The default range for the number of visual tokens per image in the model is 4-16384. You can set min_pixels and max_pixels according to your needs, such as a token count range of 256-1280, to balance speed and memory usage.
         
     | 
| 319 | 
         
            +
            # min_pixels = 256*28*28
         
     | 
| 320 | 
         
            +
            # max_pixels = 1280*28*28
         
     | 
| 321 | 
         
            +
            # processor = AutoProcessor.from_pretrained(""huihui-ai/QVQ-72B-Preview-abliterated"", min_pixels=min_pixels, max_pixels=max_pixels)
         
     | 
| 322 | 
         
            +
             
     | 
| 323 | 
         
            +
            messages = [
         
     | 
| 324 | 
         
            +
                {
         
     | 
| 325 | 
         
            +
                    ""role"": ""system"",
         
     | 
| 326 | 
         
            +
                    ""content"": [
         
     | 
| 327 | 
         
            +
                        {""type"": ""text"", ""text"": ""You are a helpful and harmless assistant. You are Qwen developed by Alibaba. You should think step-by-step.""}
         
     | 
| 328 | 
         
            +
                    ],
         
     | 
| 329 | 
         
            +
                },
         
     | 
| 330 | 
         
            +
                {
         
     | 
| 331 | 
         
            +
                    ""role"": ""user"",
         
     | 
| 332 | 
         
            +
                    ""content"": [
         
     | 
| 333 | 
         
            +
                        {
         
     | 
| 334 | 
         
            +
                            ""type"": ""image"",
         
     | 
| 335 | 
         
            +
                            ""image"": ""https://qianwen-res.oss-cn-beijing.aliyuncs.com/QVQ/demo.png"",
         
     | 
| 336 | 
         
            +
                        },
         
     | 
| 337 | 
         
            +
                        {""type"": ""text"", ""text"": ""What value should be filled in the blank space?""},
         
     | 
| 338 | 
         
            +
                    ],
         
     | 
| 339 | 
         
            +
                }
         
     | 
| 340 | 
         
            +
            ]
         
     | 
| 341 | 
         
            +
             
     | 
| 342 | 
         
            +
            # Preparation for inference
         
     | 
| 343 | 
         
            +
            text = processor.apply_chat_template(
         
     | 
| 344 | 
         
            +
                messages, tokenize=False, add_generation_prompt=True
         
     | 
| 345 | 
         
            +
            )
         
     | 
| 346 | 
         
            +
            image_inputs, video_inputs = process_vision_info(messages)
         
     | 
| 347 | 
         
            +
            inputs = processor(
         
     | 
| 348 | 
         
            +
                text=[text],
         
     | 
| 349 | 
         
            +
                images=image_inputs,
         
     | 
| 350 | 
         
            +
                videos=video_inputs,
         
     | 
| 351 | 
         
            +
                padding=True,
         
     | 
| 352 | 
         
            +
                return_tensors=""pt"",
         
     | 
| 353 | 
         
            +
            )
         
     | 
| 354 | 
         
            +
            inputs = inputs.to(""cuda"")
         
     | 
| 355 | 
         
            +
             
     | 
| 356 | 
         
            +
            # Inference: Generation of the output
         
     | 
| 357 | 
         
            +
            generated_ids = model.generate(**inputs, max_new_tokens=8192)
         
     | 
| 358 | 
         
            +
            generated_ids_trimmed = [
         
     | 
| 359 | 
         
            +
                out_ids[len(in_ids) :] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)
         
     | 
| 360 | 
         
            +
            ]
         
     | 
| 361 | 
         
            +
            output_text = processor.batch_decode(
         
     | 
| 362 | 
         
            +
                generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False
         
     | 
| 363 | 
         
            +
            )
         
     | 
| 364 | 
         
            +
            print(output_text)
         
     | 
| 365 | 
         
            +
             
     | 
| 366 | 
         
            +
             
     | 
| 367 | 
         
            +
            ```
         
     | 
| 368 | 
         
            +
             
     | 
| 369 | 
         
            +
            ","{""id"": ""huihui-ai/QVQ-72B-Preview-abliterated"", ""author"": ""huihui-ai"", ""sha"": ""45f4d0743e9169ce8831d98a25bf923ebac64309"", ""last_modified"": ""2024-12-26 22:33:08+00:00"", ""created_at"": ""2024-12-26 15:14:14+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 14, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""qwen2_vl"", ""image-text-to-text"", ""abliterated"", ""uncensored - chat"", ""conversational"", ""en"", ""base_model:Qwen/QVQ-72B-Preview"", ""base_model:finetune:Qwen/QVQ-72B-Preview"", ""license:other"", ""text-generation-inference"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""image-text-to-text"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: Qwen/QVQ-72B-Preview\nlanguage:\n- en\nlibrary_name: transformers\nlicense: other\nlicense_name: qwen\nlicense_link: https://huggingface.co/huihui-ai/QVQ-72B-Preview-abliterated/blob/main/LICENSE\npipeline_tag: image-text-to-text\ntags:\n- abliterated\n- uncensored - chat"", ""widget_data"": [{""text"": ""Hi, what can you help me with?""}, {""text"": ""What is 84 * 3 / 2?""}, {""text"": ""Tell me an interesting fact about the universe!""}, {""text"": ""Explain quantum computing in simple terms.""}], ""model_index"": null, ""config"": {""architectures"": [""Qwen2VLForConditionalGeneration""], ""model_type"": ""qwen2_vl"", ""processor_config"": {""chat_template"": ""{% set image_count = namespace(value=0) %}{% set video_count = namespace(value=0) %}{% for message in messages %}{% if loop.first and message['role'] != 'system' %}<|im_start|>system\nYou are a helpful and harmless assistant. You are Qwen developed by Alibaba. You should think step-by-step.<|im_end|>\n{% endif %}<|im_start|>{{ message['role'] }}\n{% if message['content'] is string %}{{ message['content'] }}<|im_end|>\n{% else %}{% for content in message['content'] %}{% if content['type'] == 'image' or 'image' in content or 'image_url' in content %}{% set image_count.value = image_count.value + 1 %}{% if add_vision_id %}Picture {{ image_count.value }}: {% endif %}<|vision_start|><|image_pad|><|vision_end|>{% elif content['type'] == 'video' or 'video' in content %}{% set video_count.value = video_count.value + 1 %}{% if add_vision_id %}Video {{ video_count.value }}: {% endif %}<|vision_start|><|video_pad|><|vision_end|>{% elif 'text' in content %}{{ content['text'] }}{% endif %}{% endfor %}<|im_end|>\n{% endif %}{% endfor %}{% if add_generation_prompt %}<|im_start|>assistant\n{% endif %}""}, ""tokenizer_config"": {""bos_token"": null, ""chat_template"": ""{% set image_count = namespace(value=0) %}{% set video_count = namespace(value=0) %}{% for message in messages %}{% if loop.first and message['role'] != 'system' %}<|im_start|>system\nYou are a helpful and harmless assistant. You are Qwen developed by Alibaba. You should think step-by-step.<|im_end|>\n{% endif %}<|im_start|>{{ message['role'] }}\n{% if message['content'] is string %}{{ message['content'] }}<|im_end|>\n{% else %}{% for content in message['content'] %}{% if content['type'] == 'image' or 'image' in content or 'image_url' in content %}{% set image_count.value = image_count.value + 1 %}{% if add_vision_id %}Picture {{ image_count.value }}: {% endif %}<|vision_start|><|image_pad|><|vision_end|>{% elif content['type'] == 'video' or 'video' in content %}{% set video_count.value = video_count.value + 1 %}{% if add_vision_id %}Video {{ video_count.value }}: {% endif %}<|vision_start|><|video_pad|><|vision_end|>{% elif 'text' in content %}{{ content['text'] }}{% endif %}{% endfor %}<|im_end|>\n{% endif %}{% endfor %}{% if add_generation_prompt %}<|im_start|>assistant\n{% endif %}"", ""eos_token"": ""<|im_end|>"", ""pad_token"": ""<|endoftext|>"", ""unk_token"": null}}, ""transformers_info"": {""auto_model"": ""AutoModelForImageTextToText"", ""custom_class"": null, ""pipeline_tag"": ""image-text-to-text"", ""processor"": ""AutoProcessor""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='LICENSE', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='added_tokens.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='chat_template.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='merges.txt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00001-of-00031.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00002-of-00031.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00003-of-00031.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00004-of-00031.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00005-of-00031.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00006-of-00031.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00007-of-00031.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00008-of-00031.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00009-of-00031.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00010-of-00031.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00011-of-00031.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00012-of-00031.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00013-of-00031.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00014-of-00031.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00015-of-00031.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00016-of-00031.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00017-of-00031.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00018-of-00031.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00019-of-00031.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00020-of-00031.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00021-of-00031.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00022-of-00031.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00023-of-00031.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00024-of-00031.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00025-of-00031.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00026-of-00031.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00027-of-00031.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00028-of-00031.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00029-of-00031.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00030-of-00031.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00031-of-00031.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='preprocessor_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vocab.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": {""parameters"": {""BF16"": 73405560320}, ""total"": 73405560320}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-12-26 22:33:08+00:00"", ""cardData"": ""base_model: Qwen/QVQ-72B-Preview\nlanguage:\n- en\nlibrary_name: transformers\nlicense: other\nlicense_name: qwen\nlicense_link: https://huggingface.co/huihui-ai/QVQ-72B-Preview-abliterated/blob/main/LICENSE\npipeline_tag: image-text-to-text\ntags:\n- abliterated\n- uncensored - chat"", ""transformersInfo"": {""auto_model"": ""AutoModelForImageTextToText"", ""custom_class"": null, ""pipeline_tag"": ""image-text-to-text"", ""processor"": ""AutoProcessor""}, ""_id"": ""676d72c6d59544c38f2f7d0b"", ""modelId"": ""huihui-ai/QVQ-72B-Preview-abliterated"", ""usedStorage"": 146822695168}",1,,0,,0,"https://huggingface.co/mradermacher/QVQ-72B-Preview-abliterated-i1-GGUF, https://huggingface.co/huihui-ai/QVQ-72B-Preview-abliterated-GPTQ-Int4, https://huggingface.co/mradermacher/QVQ-72B-Preview-abliterated-GGUF, https://huggingface.co/huihui-ai/QVQ-72B-Preview-abliterated-GPTQ-Int8",4,,0,huggingface/InferenceSupport/discussions/new?title=huihui-ai/QVQ-72B-Preview-abliterated&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bhuihui-ai%2FQVQ-72B-Preview-abliterated%5D(%2Fhuihui-ai%2FQVQ-72B-Preview-abliterated)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
         
     | 
| 370 | 
         
            +
            sangeetha1993/Coder-Qwen,"---
         
     | 
| 371 | 
         
            +
            license: mit
         
     | 
| 372 | 
         
            +
            language:
         
     | 
| 373 | 
         
            +
            - en
         
     | 
| 374 | 
         
            +
            base_model:
         
     | 
| 375 | 
         
            +
            - Qwen/QVQ-72B-Preview
         
     | 
| 376 | 
         
            +
            pipeline_tag: text2text-generation
         
     | 
| 377 | 
         
            +
            ---","{""id"": ""sangeetha1993/Coder-Qwen"", ""author"": ""sangeetha1993"", ""sha"": ""1a1cb9b72c1c0990f23696473ee13dd11c0bd8b0"", ""last_modified"": ""2025-01-02 06:31:04+00:00"", ""created_at"": ""2025-01-02 06:29:13+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": null, ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""text2text-generation"", ""en"", ""base_model:Qwen/QVQ-72B-Preview"", ""base_model:finetune:Qwen/QVQ-72B-Preview"", ""license:mit"", ""region:us""], ""pipeline_tag"": ""text2text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- Qwen/QVQ-72B-Preview\nlanguage:\n- en\nlicense: mit\npipeline_tag: text2text-generation"", ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-01-02 06:31:04+00:00"", ""cardData"": ""base_model:\n- Qwen/QVQ-72B-Preview\nlanguage:\n- en\nlicense: mit\npipeline_tag: text2text-generation"", ""transformersInfo"": null, ""_id"": ""67763239d3ceb4493fd99919"", ""modelId"": ""sangeetha1993/Coder-Qwen"", ""usedStorage"": 0}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=sangeetha1993/Coder-Qwen&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bsangeetha1993%2FCoder-Qwen%5D(%2Fsangeetha1993%2FCoder-Qwen)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
         
     | 
| 378 | 
         
            +
            whitedell/YourAi,"---
         
     | 
| 379 | 
         
            +
            license: apache-2.0
         
     | 
| 380 | 
         
            +
            datasets:
         
     | 
| 381 | 
         
            +
            - HuggingFaceFW/fineweb-2
         
     | 
| 382 | 
         
            +
            language:
         
     | 
| 383 | 
         
            +
            - aa
         
     | 
| 384 | 
         
            +
            - ae
         
     | 
| 385 | 
         
            +
            - af
         
     | 
| 386 | 
         
            +
            - ak
         
     | 
| 387 | 
         
            +
            - am
         
     | 
| 388 | 
         
            +
            - ar
         
     | 
| 389 | 
         
            +
            - as
         
     | 
| 390 | 
         
            +
            - hi
         
     | 
| 391 | 
         
            +
            - en
         
     | 
| 392 | 
         
            +
            - ne
         
     | 
| 393 | 
         
            +
            - bh
         
     | 
| 394 | 
         
            +
            - sa
         
     | 
| 395 | 
         
            +
            metrics:
         
     | 
| 396 | 
         
            +
            - accuracy
         
     | 
| 397 | 
         
            +
            base_model:
         
     | 
| 398 | 
         
            +
            - Qwen/QVQ-72B-Preview
         
     | 
| 399 | 
         
            +
            new_version: Qwen/QwQ-32B-Preview
         
     | 
| 400 | 
         
            +
            pipeline_tag: question-answering
         
     | 
| 401 | 
         
            +
            ---
         
     | 
| 402 | 
         
            +
            # Model Card for Model ID
         
     | 
| 403 | 
         
            +
             
     | 
| 404 | 
         
            +
            <!-- Provide a quick summary of what the model is/does. -->
         
     | 
| 405 | 
         
            +
             
     | 
| 406 | 
         
            +
            This modelcard aims to be a base template for new models. It has been generated using [this raw template](https://github.com/huggingface/huggingface_hub/blob/main/src/huggingface_hub/templates/modelcard_template.md?plain=1).
         
     | 
| 407 | 
         
            +
             
     | 
| 408 | 
         
            +
            ## Model Details
         
     | 
| 409 | 
         
            +
             
     | 
| 410 | 
         
            +
            ### Model Description
         
     | 
| 411 | 
         
            +
             
     | 
| 412 | 
         
            +
            <!-- Provide a longer summary of what this model is. -->
         
     | 
| 413 | 
         
            +
             
     | 
| 414 | 
         
            +
             
     | 
| 415 | 
         
            +
             
     | 
| 416 | 
         
            +
            - **Developed by:** [More Information Needed]
         
     | 
| 417 | 
         
            +
            - **Funded by [optional]:** [More Information Needed]
         
     | 
| 418 | 
         
            +
            - **Shared by [optional]:** [More Information Needed]
         
     | 
| 419 | 
         
            +
            - **Model type:** [More Information Needed]
         
     | 
| 420 | 
         
            +
            - **Language(s) (NLP):** [More Information Needed]
         
     | 
| 421 | 
         
            +
            - **License:** [More Information Needed]
         
     | 
| 422 | 
         
            +
            - **Finetuned from model [optional]:** [More Information Needed]
         
     | 
| 423 | 
         
            +
             
     | 
| 424 | 
         
            +
            ### Model Sources [optional]
         
     | 
| 425 | 
         
            +
             
     | 
| 426 | 
         
            +
            <!-- Provide the basic links for the model. -->
         
     | 
| 427 | 
         
            +
             
     | 
| 428 | 
         
            +
            - **Repository:** [More Information Needed]
         
     | 
| 429 | 
         
            +
            - **Paper [optional]:** [More Information Needed]
         
     | 
| 430 | 
         
            +
            - **Demo [optional]:** [More Information Needed]
         
     | 
| 431 | 
         
            +
             
     | 
| 432 | 
         
            +
            ## Uses
         
     | 
| 433 | 
         
            +
             
     | 
| 434 | 
         
            +
            <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
         
     | 
| 435 | 
         
            +
             
     | 
| 436 | 
         
            +
            ### Direct Use
         
     | 
| 437 | 
         
            +
             
     | 
| 438 | 
         
            +
            <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
         
     | 
| 439 | 
         
            +
             
     | 
| 440 | 
         
            +
            [More Information Needed]
         
     | 
| 441 | 
         
            +
             
     | 
| 442 | 
         
            +
            ### Downstream Use [optional]
         
     | 
| 443 | 
         
            +
             
     | 
| 444 | 
         
            +
            <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
         
     | 
| 445 | 
         
            +
             
     | 
| 446 | 
         
            +
            [More Information Needed]
         
     | 
| 447 | 
         
            +
             
     | 
| 448 | 
         
            +
            ### Out-of-Scope Use
         
     | 
| 449 | 
         
            +
             
     | 
| 450 | 
         
            +
            <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
         
     | 
| 451 | 
         
            +
             
     | 
| 452 | 
         
            +
            [More Information Needed]
         
     | 
| 453 | 
         
            +
             
     | 
| 454 | 
         
            +
            ## Bias, Risks, and Limitations
         
     | 
| 455 | 
         
            +
             
     | 
| 456 | 
         
            +
            <!-- This section is meant to convey both technical and sociotechnical limitations. -->
         
     | 
| 457 | 
         
            +
             
     | 
| 458 | 
         
            +
            [More Information Needed]
         
     | 
| 459 | 
         
            +
             
     | 
| 460 | 
         
            +
            ### Recommendations
         
     | 
| 461 | 
         
            +
             
     | 
| 462 | 
         
            +
            <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
         
     | 
| 463 | 
         
            +
             
     | 
| 464 | 
         
            +
            Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
         
     | 
| 465 | 
         
            +
             
     | 
| 466 | 
         
            +
            ## How to Get Started with the Model
         
     | 
| 467 | 
         
            +
             
     | 
| 468 | 
         
            +
            Use the code below to get started with the model.
         
     | 
| 469 | 
         
            +
             
     | 
| 470 | 
         
            +
            [More Information Needed]
         
     | 
| 471 | 
         
            +
             
     | 
| 472 | 
         
            +
            ## Training Details
         
     | 
| 473 | 
         
            +
             
     | 
| 474 | 
         
            +
            ### Training Data
         
     | 
| 475 | 
         
            +
             
     | 
| 476 | 
         
            +
            <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
         
     | 
| 477 | 
         
            +
             
     | 
| 478 | 
         
            +
            [More Information Needed]
         
     | 
| 479 | 
         
            +
             
     | 
| 480 | 
         
            +
            ### Training Procedure
         
     | 
| 481 | 
         
            +
             
     | 
| 482 | 
         
            +
            <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
         
     | 
| 483 | 
         
            +
             
     | 
| 484 | 
         
            +
            #### Preprocessing [optional]
         
     | 
| 485 | 
         
            +
             
     | 
| 486 | 
         
            +
            [More Information Needed]
         
     | 
| 487 | 
         
            +
             
     | 
| 488 | 
         
            +
             
     | 
| 489 | 
         
            +
            #### Training Hyperparameters
         
     | 
| 490 | 
         
            +
             
     | 
| 491 | 
         
            +
            - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
         
     | 
| 492 | 
         
            +
             
     | 
| 493 | 
         
            +
            #### Speeds, Sizes, Times [optional]
         
     | 
| 494 | 
         
            +
             
     | 
| 495 | 
         
            +
            <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
         
     | 
| 496 | 
         
            +
             
     | 
| 497 | 
         
            +
            [More Information Needed]
         
     | 
| 498 | 
         
            +
             
     | 
| 499 | 
         
            +
            ## Evaluation
         
     | 
| 500 | 
         
            +
             
     | 
| 501 | 
         
            +
            <!-- This section describes the evaluation protocols and provides the results. -->
         
     | 
| 502 | 
         
            +
             
     | 
| 503 | 
         
            +
            ### Testing Data, Factors & Metrics
         
     | 
| 504 | 
         
            +
             
     | 
| 505 | 
         
            +
            #### Testing Data
         
     | 
| 506 | 
         
            +
             
     | 
| 507 | 
         
            +
            <!-- This should link to a Dataset Card if possible. -->
         
     | 
| 508 | 
         
            +
             
     | 
| 509 | 
         
            +
            [More Information Needed]
         
     | 
| 510 | 
         
            +
             
     | 
| 511 | 
         
            +
            #### Factors
         
     | 
| 512 | 
         
            +
             
     | 
| 513 | 
         
            +
            <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
         
     | 
| 514 | 
         
            +
             
     | 
| 515 | 
         
            +
            [More Information Needed]
         
     | 
| 516 | 
         
            +
             
     | 
| 517 | 
         
            +
            #### Metrics
         
     | 
| 518 | 
         
            +
             
     | 
| 519 | 
         
            +
            <!-- These are the evaluation metrics being used, ideally with a description of why. -->
         
     | 
| 520 | 
         
            +
             
     | 
| 521 | 
         
            +
            [More Information Needed]
         
     | 
| 522 | 
         
            +
             
     | 
| 523 | 
         
            +
            ### Results
         
     | 
| 524 | 
         
            +
             
     | 
| 525 | 
         
            +
            [More Information Needed]
         
     | 
| 526 | 
         
            +
             
     | 
| 527 | 
         
            +
            #### Summary
         
     | 
| 528 | 
         
            +
             
     | 
| 529 | 
         
            +
             
     | 
| 530 | 
         
            +
             
     | 
| 531 | 
         
            +
            ## Model Examination [optional]
         
     | 
| 532 | 
         
            +
             
     | 
| 533 | 
         
            +
            <!-- Relevant interpretability work for the model goes here -->
         
     | 
| 534 | 
         
            +
             
     | 
| 535 | 
         
            +
            [More Information Needed]
         
     | 
| 536 | 
         
            +
             
     | 
| 537 | 
         
            +
            ## Environmental Impact
         
     | 
| 538 | 
         
            +
             
     | 
| 539 | 
         
            +
            <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
         
     | 
| 540 | 
         
            +
             
     | 
| 541 | 
         
            +
            Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
         
     | 
| 542 | 
         
            +
             
     | 
| 543 | 
         
            +
            - **Hardware Type:** [More Information Needed]
         
     | 
| 544 | 
         
            +
            - **Hours used:** [More Information Needed]
         
     | 
| 545 | 
         
            +
            - **Cloud Provider:** [More Information Needed]
         
     | 
| 546 | 
         
            +
            - **Compute Region:** [More Information Needed]
         
     | 
| 547 | 
         
            +
            - **Carbon Emitted:** [More Information Needed]
         
     | 
| 548 | 
         
            +
             
     | 
| 549 | 
         
            +
            ## Technical Specifications [optional]
         
     | 
| 550 | 
         
            +
             
     | 
| 551 | 
         
            +
            ### Model Architecture and Objective
         
     | 
| 552 | 
         
            +
             
     | 
| 553 | 
         
            +
            [More Information Needed]
         
     | 
| 554 | 
         
            +
             
     | 
| 555 | 
         
            +
            ### Compute Infrastructure
         
     | 
| 556 | 
         
            +
             
     | 
| 557 | 
         
            +
            [More Information Needed]
         
     | 
| 558 | 
         
            +
             
     | 
| 559 | 
         
            +
            #### Hardware
         
     | 
| 560 | 
         
            +
             
     | 
| 561 | 
         
            +
            [More Information Needed]
         
     | 
| 562 | 
         
            +
             
     | 
| 563 | 
         
            +
            #### Software
         
     | 
| 564 | 
         
            +
             
     | 
| 565 | 
         
            +
            [More Information Needed]
         
     | 
| 566 | 
         
            +
             
     | 
| 567 | 
         
            +
            ## Citation [optional]
         
     | 
| 568 | 
         
            +
             
     | 
| 569 | 
         
            +
            <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
         
     | 
| 570 | 
         
            +
             
     | 
| 571 | 
         
            +
            **BibTeX:**
         
     | 
| 572 | 
         
            +
             
     | 
| 573 | 
         
            +
            [More Information Needed]
         
     | 
| 574 | 
         
            +
             
     | 
| 575 | 
         
            +
            **APA:**
         
     | 
| 576 | 
         
            +
             
     | 
| 577 | 
         
            +
            [More Information Needed]
         
     | 
| 578 | 
         
            +
             
     | 
| 579 | 
         
            +
            ## Glossary [optional]
         
     | 
| 580 | 
         
            +
             
     | 
| 581 | 
         
            +
            <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
         
     | 
| 582 | 
         
            +
             
     | 
| 583 | 
         
            +
            [More Information Needed]
         
     | 
| 584 | 
         
            +
             
     | 
| 585 | 
         
            +
            ## More Information [optional]
         
     | 
| 586 | 
         
            +
             
     | 
| 587 | 
         
            +
            [More Information Needed]
         
     | 
| 588 | 
         
            +
             
     | 
| 589 | 
         
            +
            ## Model Card Authors [optional]
         
     | 
| 590 | 
         
            +
             
     | 
| 591 | 
         
            +
            [More Information Needed]
         
     | 
| 592 | 
         
            +
             
     | 
| 593 | 
         
            +
            ## Model Card Contact
         
     | 
| 594 | 
         
            +
            HuggingFaceFW/fineweb-2
         
     | 
| 595 | 
         
            +
             
     | 
| 596 | 
         
            +
            [More Information Needed]
         
     | 
| 597 | 
         
            +
            import sagemaker
         
     | 
| 598 | 
         
            +
            import boto3
         
     | 
| 599 | 
         
            +
            from sagemaker.huggingface import HuggingFace
         
     | 
| 600 | 
         
            +
             
     | 
| 601 | 
         
            +
            try:
         
     | 
| 602 | 
         
            +
            	role = sagemaker.get_execution_role()
         
     | 
| 603 | 
         
            +
            except ValueError:
         
     | 
| 604 | 
         
            +
            	iam = boto3.client('iam')
         
     | 
| 605 | 
         
            +
            	role = iam.get_role(RoleName='sagemaker_execution_role')['Role']['Arn']
         
     | 
| 606 | 
         
            +
            		
         
     | 
| 607 | 
         
            +
            hyperparameters = {
         
     | 
| 608 | 
         
            +
            	'model_name_or_path':'Qwen/Qwen2-VL-72B',
         
     | 
| 609 | 
         
            +
            	'output_dir':'/opt/ml/model'
         
     | 
| 610 | 
         
            +
            	# add your remaining hyperparameters
         
     | 
| 611 | 
         
            +
            	# more info here https://github.com/huggingface/transformers/tree/v4.37.0/path/to/script
         
     | 
| 612 | 
         
            +
            }
         
     | 
| 613 | 
         
            +
             
     | 
| 614 | 
         
            +
            # git configuration to download our fine-tuning script
         
     | 
| 615 | 
         
            +
            git_config = {'repo': 'https://github.com/huggingface/transformers.git','branch': 'v4.37.0'}
         
     | 
| 616 | 
         
            +
             
     | 
| 617 | 
         
            +
            # creates Hugging Face estimator
         
     | 
| 618 | 
         
            +
            huggingface_estimator = HuggingFace(
         
     | 
| 619 | 
         
            +
            	entry_point='train.py',
         
     | 
| 620 | 
         
            +
            	source_dir='./path/to/script',
         
     | 
| 621 | 
         
            +
            	instance_type='ml.p3.2xlarge',
         
     | 
| 622 | 
         
            +
            	instance_count=1,
         
     | 
| 623 | 
         
            +
            	role=role,
         
     | 
| 624 | 
         
            +
            	git_config=git_config,
         
     | 
| 625 | 
         
            +
            	transformers_version='4.37.0',
         
     | 
| 626 | 
         
            +
            	pytorch_version='2.1.0',
         
     | 
| 627 | 
         
            +
            	py_version='py310',
         
     | 
| 628 | 
         
            +
            	hyperparameters = hyperparameters
         
     | 
| 629 | 
         
            +
            )
         
     | 
| 630 | 
         
            +
             
     | 
| 631 | 
         
            +
            # starting the train job
         
     | 
| 632 | 
         
            +
            huggingface_estimator.fit()
         
     | 
| 633 | 
         
            +
            git lfs install
         
     | 
| 634 | 
         
            +
            # Load model directly
         
     | 
| 635 | 
         
            +
            from transformers import AutoProcessor, AutoModelForImageTextToText
         
     | 
| 636 | 
         
            +
            from openai import OpenAI
         
     | 
| 637 | 
         
            +
             
     | 
| 638 | 
         
            +
            client = OpenAI(
         
     | 
| 639 | 
         
            +
            	base_url=""https://api-inference.huggingface.co/v1/"",
         
     | 
| 640 | 
         
            +
            	api_key=""hf_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx""
         
     | 
| 641 | 
         
            +
            )
         
     | 
| 642 | 
         
            +
             
     | 
| 643 | 
         
            +
            messages = [
         
     | 
| 644 | 
         
            +
            	{
         
     | 
| 645 | 
         
            +
            		""role"": ""user"",
         
     | 
| 646 | 
         
            +
            		""content"": [
         
     | 
| 647 | 
         
            +
            			{
         
     | 
| 648 | 
         
            +
            				""type"": ""text"",
         
     | 
| 649 | 
         
            +
            				""text"": ""Describe this image in one sentence.""
         
     | 
| 650 | 
         
            +
            			},
         
     | 
| 651 | 
         
            +
            			{
         
     | 
| 652 | 
         
            +
            				""type"": ""image_url"",
         
     | 
| 653 | 
         
            +
            				""image_url"": {
         
     | 
| 654 | 
         
            +
            					""url"": ""https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg""
         
     | 
| 655 | 
         
            +
            				}
         
     | 
| 656 | 
         
            +
            			}
         
     | 
| 657 | 
         
            +
            		]
         
     | 
| 658 | 
         
            +
            	}
         
     | 
| 659 | 
         
            +
            ]
         
     | 
| 660 | 
         
            +
             
     | 
| 661 | 
         
            +
            stream = client.chat.completions.create(
         
     | 
| 662 | 
         
            +
                model=""Qwen/QVQ-72B-Preview"", 
         
     | 
| 663 | 
         
            +
            	messages=messages, 
         
     | 
| 664 | 
         
            +
            	max_tokens=500,
         
     | 
| 665 | 
         
            +
            	stream=True
         
     | 
| 666 | 
         
            +
            )
         
     | 
| 667 | 
         
            +
             
     | 
| 668 | 
         
            +
            for chunk in stream:
         
     | 
| 669 | 
         
            +
                print(chunk.choices[0].delta.content, end="""")
         
     | 
| 670 | 
         
            +
            processor = AutoProcessor.from_pretrained(""Qwen/Qwen2-VL-72B"")
         
     | 
| 671 | 
         
            +
            model = AutoModelForImageTextToText.from_pretrained(""Qwen/Qwen2-VL-72B"")","{""id"": ""whitedell/YourAi"", ""author"": ""whitedell"", ""sha"": ""2986cdf74da031fa68fd659ad283af0503977cda"", ""last_modified"": ""2025-01-07 10:42:13+00:00"", ""created_at"": ""2025-01-07 10:21:16+00:00"", ""private"": false, ""gated"": ""auto"", ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": null, ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""question-answering"", ""aa"", ""ae"", ""af"", ""ak"", ""am"", ""ar"", ""as"", ""hi"", ""en"", ""ne"", ""bh"", ""sa"", ""dataset:HuggingFaceFW/fineweb-2"", ""arxiv:1910.09700"", ""base_model:Qwen/QVQ-72B-Preview"", ""base_model:finetune:Qwen/QVQ-72B-Preview"", ""license:apache-2.0"", ""region:us""], ""pipeline_tag"": ""question-answering"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- Qwen/QVQ-72B-Preview\ndatasets:\n- HuggingFaceFW/fineweb-2\nlanguage:\n- aa\n- ae\n- af\n- ak\n- am\n- ar\n- as\n- hi\n- en\n- ne\n- bh\n- sa\nlicense: apache-2.0\nmetrics:\n- accuracy\npipeline_tag: question-answering\nnew_version: Qwen/QwQ-32B-Preview"", ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-01-07 10:42:13+00:00"", ""cardData"": ""base_model:\n- Qwen/QVQ-72B-Preview\ndatasets:\n- HuggingFaceFW/fineweb-2\nlanguage:\n- aa\n- ae\n- af\n- ak\n- am\n- ar\n- as\n- hi\n- en\n- ne\n- bh\n- sa\nlicense: apache-2.0\nmetrics:\n- accuracy\npipeline_tag: question-answering\nnew_version: Qwen/QwQ-32B-Preview"", ""transformersInfo"": null, ""_id"": ""677d001c19adab3653f05201"", ""modelId"": ""whitedell/YourAi"", ""usedStorage"": 0}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=whitedell/YourAi&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bwhitedell%2FYourAi%5D(%2Fwhitedell%2FYourAi)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
         
     | 
| 672 | 
         
            +
            rrbhanderi05/FaceGoto-t1,"---
         
     | 
| 673 | 
         
            +
            license: afl-3.0
         
     | 
| 674 | 
         
            +
            datasets:
         
     | 
| 675 | 
         
            +
            - fka/awesome-chatgpt-prompts
         
     | 
| 676 | 
         
            +
            - HuggingFaceTB/finemath
         
     | 
| 677 | 
         
            +
            - HuggingFaceFW/fineweb-2
         
     | 
| 678 | 
         
            +
            - O1-OPEN/OpenO1-SFT
         
     | 
| 679 | 
         
            +
            metrics:
         
     | 
| 680 | 
         
            +
            - accuracy
         
     | 
| 681 | 
         
            +
            base_model:
         
     | 
| 682 | 
         
            +
            - Qwen/QVQ-72B-Preview
         
     | 
| 683 | 
         
            +
            - meta-llama/Llama-3.3-70B-Instruct
         
     | 
| 684 | 
         
            +
            new_version: deepseek-ai/DeepSeek-V3
         
     | 
| 685 | 
         
            +
            pipeline_tag: any-to-any
         
     | 
| 686 | 
         
            +
            library_name: diffusers
         
     | 
| 687 | 
         
            +
            tags:
         
     | 
| 688 | 
         
            +
            - code
         
     | 
| 689 | 
         
            +
            - not-for-all-audiences
         
     | 
| 690 | 
         
            +
            ---","{""id"": ""rrbhanderi05/FaceGoto-t1"", ""author"": ""rrbhanderi05"", ""sha"": ""82e897bd903f9bed89bdc37d4522aaa57b1ebdac"", ""last_modified"": ""2025-01-09 06:12:09+00:00"", ""created_at"": ""2025-01-09 06:09:38+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""diffusers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""diffusers"", ""code"", ""not-for-all-audiences"", ""any-to-any"", ""dataset:fka/awesome-chatgpt-prompts"", ""dataset:HuggingFaceTB/finemath"", ""dataset:HuggingFaceFW/fineweb-2"", ""dataset:O1-OPEN/OpenO1-SFT"", ""base_model:Qwen/QVQ-72B-Preview"", ""base_model:finetune:Qwen/QVQ-72B-Preview"", ""license:afl-3.0"", ""region:us""], ""pipeline_tag"": ""any-to-any"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- Qwen/QVQ-72B-Preview\n- meta-llama/Llama-3.3-70B-Instruct\ndatasets:\n- fka/awesome-chatgpt-prompts\n- HuggingFaceTB/finemath\n- HuggingFaceFW/fineweb-2\n- O1-OPEN/OpenO1-SFT\nlibrary_name: diffusers\nlicense: afl-3.0\nmetrics:\n- accuracy\npipeline_tag: any-to-any\ntags:\n- code\n- not-for-all-audiences\nnew_version: deepseek-ai/DeepSeek-V3"", ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-01-09 06:12:09+00:00"", ""cardData"": ""base_model:\n- Qwen/QVQ-72B-Preview\n- meta-llama/Llama-3.3-70B-Instruct\ndatasets:\n- fka/awesome-chatgpt-prompts\n- HuggingFaceTB/finemath\n- HuggingFaceFW/fineweb-2\n- O1-OPEN/OpenO1-SFT\nlibrary_name: diffusers\nlicense: afl-3.0\nmetrics:\n- accuracy\npipeline_tag: any-to-any\ntags:\n- code\n- not-for-all-audiences\nnew_version: deepseek-ai/DeepSeek-V3"", ""transformersInfo"": null, ""_id"": ""677f68222fcceb4c31520480"", ""modelId"": ""rrbhanderi05/FaceGoto-t1"", ""usedStorage"": 0}",1,,0,,0,,0,,0,,0
         
     | 
| 691 | 
         
            +
            shad777725/risk_model,"---
         
     | 
| 692 | 
         
            +
            license: apache-2.0
         
     | 
| 693 | 
         
            +
            language:
         
     | 
| 694 | 
         
            +
            - en
         
     | 
| 695 | 
         
            +
            metrics:
         
     | 
| 696 | 
         
            +
            - accuracy
         
     | 
| 697 | 
         
            +
            base_model:
         
     | 
| 698 | 
         
            +
            - Qwen/QVQ-72B-Preview
         
     | 
| 699 | 
         
            +
            new_version: deepseek-ai/DeepSeek-V3
         
     | 
| 700 | 
         
            +
            pipeline_tag: text-classification
         
     | 
| 701 | 
         
            +
            library_name: fasttext
         
     | 
| 702 | 
         
            +
            tags:
         
     | 
| 703 | 
         
            +
            - medical
         
     | 
| 704 | 
         
            +
            ---","{""id"": ""shad777725/risk_model"", ""author"": ""shad777725"", ""sha"": ""2ed166567b4687b84be32f482bd5143837c8dfcb"", ""last_modified"": ""2025-01-16 00:51:18+00:00"", ""created_at"": ""2025-01-16 00:31:43+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""fasttext"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""fasttext"", ""medical"", ""text-classification"", ""en"", ""base_model:Qwen/QVQ-72B-Preview"", ""base_model:finetune:Qwen/QVQ-72B-Preview"", ""license:apache-2.0"", ""region:us""], ""pipeline_tag"": ""text-classification"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- Qwen/QVQ-72B-Preview\nlanguage:\n- en\nlibrary_name: fasttext\nlicense: apache-2.0\nmetrics:\n- accuracy\npipeline_tag: text-classification\ntags:\n- medical\nnew_version: deepseek-ai/DeepSeek-V3"", ""widget_data"": [{""text"": ""I like you. I love you""}], ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model_fasttext.bin', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-01-16 00:51:18+00:00"", ""cardData"": ""base_model:\n- Qwen/QVQ-72B-Preview\nlanguage:\n- en\nlibrary_name: fasttext\nlicense: apache-2.0\nmetrics:\n- accuracy\npipeline_tag: text-classification\ntags:\n- medical\nnew_version: deepseek-ai/DeepSeek-V3"", ""transformersInfo"": null, ""_id"": ""6788536f5d5f8ac10b9dafeb"", ""modelId"": ""shad777725/risk_model"", ""usedStorage"": 240645775}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=shad777725/risk_model&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bshad777725%2Frisk_model%5D(%2Fshad777725%2Frisk_model)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
         
     | 
    	
        QwQ-32B-GGUF_finetunes_20250426_221535.csv_finetunes_20250426_221535.csv
    ADDED
    
    | 
         @@ -0,0 +1,125 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
         
     | 
| 2 | 
         
            +
            Qwen/QwQ-32B-GGUF,"---
         
     | 
| 3 | 
         
            +
            license: apache-2.0
         
     | 
| 4 | 
         
            +
            license_link: https://huggingface.co/Qwen/QWQ-32B-GGUF/blob/main/LICENSE
         
     | 
| 5 | 
         
            +
            language:
         
     | 
| 6 | 
         
            +
            - en
         
     | 
| 7 | 
         
            +
            pipeline_tag: text-generation
         
     | 
| 8 | 
         
            +
            base_model: Qwen/QwQ-32B
         
     | 
| 9 | 
         
            +
            tags:
         
     | 
| 10 | 
         
            +
            - chat
         
     | 
| 11 | 
         
            +
            ---
         
     | 
| 12 | 
         
            +
             
     | 
| 13 | 
         
            +
            # QwQ-32B-GGUF
         
     | 
| 14 | 
         
            +
             
     | 
| 15 | 
         
            +
            <a href=""https://chat.qwenlm.ai/"" target=""_blank"" style=""margin: 2px;"">
         
     | 
| 16 | 
         
            +
                <img alt=""Chat"" src=""https://img.shields.io/badge/%F0%9F%92%9C%EF%B8%8F%20Qwen%20Chat%20-536af5"" style=""display: inline-block; vertical-align: middle;""/>
         
     | 
| 17 | 
         
            +
            </a>
         
     | 
| 18 | 
         
            +
             
     | 
| 19 | 
         
            +
            ## Introduction
         
     | 
| 20 | 
         
            +
             
     | 
| 21 | 
         
            +
            QwQ is the reasoning model of the Qwen series. Compared with conventional instruction-tuned models, QwQ, which is capable of thinking and reasoning, can achieve significantly enhanced performance in downstream tasks, especially hard problems. QwQ-32B is the medium-sized reasoning model, which is capable of achieving competitive performance against state-of-the-art reasoning models, e.g., DeepSeek-R1, o1-mini.
         
     | 
| 22 | 
         
            +
             
     | 
| 23 | 
         
            +
            <p align=""center"">
         
     | 
| 24 | 
         
            +
              <img width=""100%"" src=""figures/benchmark.jpg"">
         
     | 
| 25 | 
         
            +
            </p>
         
     | 
| 26 | 
         
            +
             
     | 
| 27 | 
         
            +
            **This repo contains the QwQ 32B model in the GGUF Format**, which has the following features:
         
     | 
| 28 | 
         
            +
            - Type: Causal Language Models
         
     | 
| 29 | 
         
            +
            - Training Stage: Pretraining & Post-training (Supervised Finetuning and Reinforcement Learning)
         
     | 
| 30 | 
         
            +
            - Architecture: transformers with RoPE, SwiGLU, RMSNorm, and Attention QKV bias
         
     | 
| 31 | 
         
            +
            - Number of Parameters: 32.5B
         
     | 
| 32 | 
         
            +
            - Number of Paramaters (Non-Embedding): 31.0B
         
     | 
| 33 | 
         
            +
            - Number of Layers: 64
         
     | 
| 34 | 
         
            +
            - Number of Attention Heads (GQA): 40 for Q and 8 for KV
         
     | 
| 35 | 
         
            +
            - Context Length: Full 131,072 tokens
         
     | 
| 36 | 
         
            +
            - Quantization: q4_K_M, q5_0, q5_K_M, q6_K, q8_0
         
     | 
| 37 | 
         
            +
             
     | 
| 38 | 
         
            +
            **Note:** For the best experience, please review the [usage guidelines](#usage-guidelines) before deploying QwQ models.
         
     | 
| 39 | 
         
            +
             
     | 
| 40 | 
         
            +
            You can try our [demo](https://huggingface.co/spaces/Qwen/QwQ-32B-Demo) or access QwQ models via [QwenChat](https://chat.qwen.ai).
         
     | 
| 41 | 
         
            +
             
     | 
| 42 | 
         
            +
            For more details, please refer to our [blog](https://qwenlm.github.io/blog/qwq-32b/), [GitHub](https://github.com/QwenLM/Qwen2.5), and [Documentation](https://qwen.readthedocs.io/en/latest/).
         
     | 
| 43 | 
         
            +
             
     | 
| 44 | 
         
            +
            ## Requirements
         
     | 
| 45 | 
         
            +
             
     | 
| 46 | 
         
            +
            QwQ is based on Qwen2.5, whose code has been in the latest Hugging face `transformers`. We advise you to use the latest version of `transformers`.
         
     | 
| 47 | 
         
            +
             
     | 
| 48 | 
         
            +
            With `transformers<4.37.0`, you will encounter the following error:
         
     | 
| 49 | 
         
            +
            ```
         
     | 
| 50 | 
         
            +
            KeyError: 'qwen2'
         
     | 
| 51 | 
         
            +
            ```
         
     | 
| 52 | 
         
            +
             
     | 
| 53 | 
         
            +
            Also check out our [AWQ documentation](https://qwen.readthedocs.io/en/latest/quantization/awq.html) for more usage guide.
         
     | 
| 54 | 
         
            +
             
     | 
| 55 | 
         
            +
            ## Quickstart
         
     | 
| 56 | 
         
            +
             
     | 
| 57 | 
         
            +
            heck out our [llama.cpp documentation](https://qwen.readthedocs.io/en/latest/run_locally/llama.cpp.html) for more usage guide.
         
     | 
| 58 | 
         
            +
             
     | 
| 59 | 
         
            +
            We advise you to clone [`llama.cpp`](https://github.com/ggerganov/llama.cpp) and install it following the official guide. We follow the latest version of llama.cpp. 
         
     | 
| 60 | 
         
            +
            In the following demonstration, we assume that you are running commands under the repository `llama.cpp`.
         
     | 
| 61 | 
         
            +
             
     | 
| 62 | 
         
            +
            You can use the following commands for inference:
         
     | 
| 63 | 
         
            +
             
     | 
| 64 | 
         
            +
            ```shell
         
     | 
| 65 | 
         
            +
            ./llama-cli \
         
     | 
| 66 | 
         
            +
                --model QwQ-32B-GGUF/qwq-32b-q4_k_m.gguf \
         
     | 
| 67 | 
         
            +
                --threads 32 \
         
     | 
| 68 | 
         
            +
                --ctx-size 32768 \
         
     | 
| 69 | 
         
            +
                --seed 1234 \
         
     | 
| 70 | 
         
            +
                --temp 0.6 \
         
     | 
| 71 | 
         
            +
                --min-p 0.0 \
         
     | 
| 72 | 
         
            +
                --top-k 40 \
         
     | 
| 73 | 
         
            +
                --top-p 0.95 \
         
     | 
| 74 | 
         
            +
                -no-cnv \
         
     | 
| 75 | 
         
            +
                --samplers ""top_k;top_p;min_p;temperature;"" \
         
     | 
| 76 | 
         
            +
                --prompt ""<|im_start|>user\nHow many r's are in the word \""strawberry\""<|im_end|>\n<|im_start|>assistant\n<think>\n""
         
     | 
| 77 | 
         
            +
            ```
         
     | 
| 78 | 
         
            +
             
     | 
| 79 | 
         
            +
            ### Usage Guidelines
         
     | 
| 80 | 
         
            +
             
     | 
| 81 | 
         
            +
            To achieve optimal performance, we recommend the following settings:
         
     | 
| 82 | 
         
            +
             
     | 
| 83 | 
         
            +
            1. **Enforce Thoughtful Output**: Ensure the model starts with ""\<think\>\n"" to prevent generating empty thinking content, which can degrade output quality.
         
     | 
| 84 | 
         
            +
             
     | 
| 85 | 
         
            +
            2. **Sampling Parameters**:
         
     | 
| 86 | 
         
            +
               - Use Temperature=0.6, TopP=0.95, MinP=0 instead of Greedy decoding to avoid endless repetitions.
         
     | 
| 87 | 
         
            +
               - Use TopK between 20 and 40 to filter out rare token occurrences while maintaining the diversity of the generated output.
         
     | 
| 88 | 
         
            +
               - For supported frameworks, you can adjust the `presence_penalty` parameter between 0 and 2 to reduce endless repetitions. However, using a higher value may result in occasional language mixing and a slight decrease in performance.
         
     | 
| 89 | 
         
            +
             
     | 
| 90 | 
         
            +
            3. **No Thinking Content in History**: In multi-turn conversations, the historical model output should only include the final output part and does not need to include the thinking content. This feature is already implemented in `apply_chat_template`.
         
     | 
| 91 | 
         
            +
             
     | 
| 92 | 
         
            +
            4. **Standardize Output Format**: We recommend using prompts to standardize model outputs when benchmarking.
         
     | 
| 93 | 
         
            +
               - **Math Problems**: Include ""Please reason step by step, and put your final answer within \boxed{}."" in the prompt.
         
     | 
| 94 | 
         
            +
               - **Multiple-Choice Questions**: Add the following JSON structure to the prompt to standardize responses: ""Please show your choice in the `answer` field with only the choice letter, e.g.,`\""answer\"": \""C\""`."" in the prompt.
         
     | 
| 95 | 
         
            +
             
     | 
| 96 | 
         
            +
            5. **Handle Long Inputs**: For inputs exceeding 32,768 tokens, enable [YaRN](https://arxiv.org/abs/2309.00071) to improve the model's ability to capture long-sequence information effectively. Currently, only vLLM supports YARN for length extrapolating. If you want to process sequences up to 131,072 tokens, please refer to non-GGUF models.
         
     | 
| 97 | 
         
            +
             
     | 
| 98 | 
         
            +
            6. **Other References**: You can also consult [Unsloth's Guide](https://docs.unsloth.ai/basics/tutorial-how-to-run-qwq-32b-effectively) to see if their approach meets your needs. (Thanks to the Unsloth team!)
         
     | 
| 99 | 
         
            +
             
     | 
| 100 | 
         
            +
            ## Evaluation & Performance
         
     | 
| 101 | 
         
            +
             
     | 
| 102 | 
         
            +
            Detailed evaluation results are reported in this [📑 blog](https://qwenlm.github.io/blog/qwq-32b/).
         
     | 
| 103 | 
         
            +
             
     | 
| 104 | 
         
            +
            For requirements on GPU memory and the respective throughput, see results [here](https://qwen.readthedocs.io/en/latest/benchmark/speed_benchmark.html).
         
     | 
| 105 | 
         
            +
             
     | 
| 106 | 
         
            +
            ## Citation
         
     | 
| 107 | 
         
            +
             
     | 
| 108 | 
         
            +
            If you find our work helpful, feel free to give us a cite.
         
     | 
| 109 | 
         
            +
             
     | 
| 110 | 
         
            +
            ```
         
     | 
| 111 | 
         
            +
            @misc{qwq32b,
         
     | 
| 112 | 
         
            +
                title = {QwQ-32B: Embracing the Power of Reinforcement Learning},
         
     | 
| 113 | 
         
            +
                url = {https://qwenlm.github.io/blog/qwq-32b/},
         
     | 
| 114 | 
         
            +
                author = {Qwen Team},
         
     | 
| 115 | 
         
            +
                month = {March},
         
     | 
| 116 | 
         
            +
                year = {2025}
         
     | 
| 117 | 
         
            +
            }
         
     | 
| 118 | 
         
            +
             
     | 
| 119 | 
         
            +
            @article{qwen2.5,
         
     | 
| 120 | 
         
            +
                  title={Qwen2.5 Technical Report}, 
         
     | 
| 121 | 
         
            +
                  author={An Yang and Baosong Yang and Beichen Zhang and Binyuan Hui and Bo Zheng and Bowen Yu and Chengyuan Li and Dayiheng Liu and Fei Huang and Haoran Wei and Huan Lin and Jian Yang and Jianhong Tu and Jianwei Zhang and Jianxin Yang and Jiaxi Yang and Jingren Zhou and Junyang Lin and Kai Dang and Keming Lu and Keqin Bao and Kexin Yang and Le Yu and Mei Li and Mingfeng Xue and Pei Zhang and Qin Zhu and Rui Men and Runji Lin and Tianhao Li and Tianyi Tang and Tingyu Xia and Xingzhang Ren and Xuancheng Ren and Yang Fan and Yang Su and Yichang Zhang and Yu Wan and Yuqiong Liu and Zeyu Cui and Zhenru Zhang and Zihan Qiu},
         
     | 
| 122 | 
         
            +
                  journal={arXiv preprint arXiv:2412.15115},
         
     | 
| 123 | 
         
            +
                  year={2024}
         
     | 
| 124 | 
         
            +
            }
         
     | 
| 125 | 
         
            +
            ```","{""id"": ""Qwen/QwQ-32B-GGUF"", ""author"": ""Qwen"", ""sha"": ""8728e66249190b78dee8404869827328527f6b3b"", ""last_modified"": ""2025-03-13 10:04:54+00:00"", ""created_at"": ""2025-03-05 15:12:54+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 70084, ""downloads_all_time"": null, ""likes"": 194, ""library_name"": null, ""gguf"": {""total"": 32763876352, ""architecture"": ""qwen2"", ""context_length"": 131072, ""chat_template"": ""{%- if tools %}\n    {{- '<|im_start|>system\\n' }}\n    {%- if messages[0]['role'] == 'system' %}\n        {{- messages[0]['content'] }}\n    {%- else %}\n        {{- '' }}\n    {%- endif %}\n    {{- \""\\n\\n# Tools\\n\\nYou may call one or more functions to assist with the user query.\\n\\nYou are provided with function signatures within <tools></tools> XML tags:\\n<tools>\"" }}\n    {%- for tool in tools %}\n        {{- \""\\n\"" }}\n        {{- tool | tojson }}\n    {%- endfor %}\n    {{- \""\\n</tools>\\n\\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\\n<tool_call>\\n{\\\""name\\\"": <function-name>, \\\""arguments\\\"": <args-json-object>}\\n</tool_call><|im_end|>\\n\"" }}\n{%- else %}\n    {%- if messages[0]['role'] == 'system' %}\n        {{- '<|im_start|>system\\n' + messages[0]['content'] + '<|im_end|>\\n' }}\n  {%- endif %}\n{%- endif %}\n{%- for message in messages %}\n    {%- if (message.role == \""user\"") or (message.role == \""system\"" and not loop.first) %}\n        {{- '<|im_start|>' + message.role + '\\n' + message.content + '<|im_end|>' + '\\n' }}\n    {%- elif message.role == \""assistant\"" and not message.tool_calls %}\n        {%- set content = message.content.split('</think>')[-1].lstrip('\\n') %}\n        {{- '<|im_start|>' + message.role + '\\n' + content + '<|im_end|>' + '\\n' }}\n    {%- elif message.role == \""assistant\"" %}\n        {%- set content = message.content.split('</think>')[-1].lstrip('\\n') %}\n        {{- '<|im_start|>' + message.role }}\n        {%- if message.content %}\n            {{- '\\n' + content }}\n        {%- endif %}\n        {%- for tool_call in message.tool_calls %}\n            {%- if tool_call.function is defined %}\n                {%- set tool_call = tool_call.function %}\n            {%- endif %}\n            {{- '\\n<tool_call>\\n{\""name\"": \""' }}\n            {{- tool_call.name }}\n            {{- '\"", \""arguments\"": ' }}\n            {{- tool_call.arguments | tojson }}\n            {{- '}\\n</tool_call>' }}\n        {%- endfor %}\n        {{- '<|im_end|>\\n' }}\n    {%- elif message.role == \""tool\"" %}\n        {%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != \""tool\"") %}\n            {{- '<|im_start|>user' }}\n        {%- endif %}\n        {{- '\\n<tool_response>\\n' }}\n        {{- message.content }}\n        {{- '\\n</tool_response>' }}\n        {%- if loop.last or (messages[loop.index0 + 1].role != \""tool\"") %}\n            {{- '<|im_end|>\\n' }}\n        {%- endif %}\n    {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n    {{- '<|im_start|>assistant\\n' }}\n{%- endif %}\n"", ""bos_token"": ""<|endoftext|>"", ""eos_token"": ""<|im_end|>""}, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""gguf"", ""chat"", ""text-generation"", ""en"", ""arxiv:2309.00071"", ""arxiv:2412.15115"", ""base_model:Qwen/QwQ-32B"", ""base_model:quantized:Qwen/QwQ-32B"", ""license:apache-2.0"", ""endpoints_compatible"", ""region:us"", ""conversational""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: Qwen/QwQ-32B\nlanguage:\n- en\nlicense: apache-2.0\nlicense_link: https://huggingface.co/Qwen/QWQ-32B-GGUF/blob/main/LICENSE\npipeline_tag: text-generation\ntags:\n- chat"", ""widget_data"": [{""text"": ""Hi, what can you help me with?""}, {""text"": ""What is 84 * 3 / 2?""}, {""text"": ""Tell me an interesting fact about the universe!""}, {""text"": ""Explain quantum computing in simple terms.""}], ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='LICENSE', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='figures/benchmark.jpg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='fp16/qwq-32b-fp16-00001-of-00017.gguf', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='fp16/qwq-32b-fp16-00002-of-00017.gguf', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='fp16/qwq-32b-fp16-00003-of-00017.gguf', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='fp16/qwq-32b-fp16-00004-of-00017.gguf', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='fp16/qwq-32b-fp16-00005-of-00017.gguf', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='fp16/qwq-32b-fp16-00006-of-00017.gguf', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='fp16/qwq-32b-fp16-00007-of-00017.gguf', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='fp16/qwq-32b-fp16-00008-of-00017.gguf', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='fp16/qwq-32b-fp16-00009-of-00017.gguf', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='fp16/qwq-32b-fp16-00010-of-00017.gguf', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='fp16/qwq-32b-fp16-00011-of-00017.gguf', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='fp16/qwq-32b-fp16-00012-of-00017.gguf', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='fp16/qwq-32b-fp16-00013-of-00017.gguf', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='fp16/qwq-32b-fp16-00014-of-00017.gguf', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='fp16/qwq-32b-fp16-00015-of-00017.gguf', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='fp16/qwq-32b-fp16-00016-of-00017.gguf', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='fp16/qwq-32b-fp16-00017-of-00017.gguf', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='params', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='qwq-32b-q4_k_m.gguf', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='qwq-32b-q5_0.gguf', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='qwq-32b-q5_k_m.gguf', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='qwq-32b-q6_k.gguf', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='qwq-32b-q8_0.gguf', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-03-13 10:04:54+00:00"", ""cardData"": ""base_model: Qwen/QwQ-32B\nlanguage:\n- en\nlicense: apache-2.0\nlicense_link: https://huggingface.co/Qwen/QWQ-32B-GGUF/blob/main/LICENSE\npipeline_tag: text-generation\ntags:\n- chat"", ""transformersInfo"": null, ""_id"": ""67c869f6a3a4e28d00af552b"", ""modelId"": ""Qwen/QwQ-32B-GGUF"", ""usedStorage"": 414230301824}",0,,0,,0,,0,,0,"Qwen/QwQ-32B-Demo, huggingface/InferenceSupport/discussions/new?title=Qwen/QwQ-32B-GGUF&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BQwen%2FQwQ-32B-GGUF%5D(%2FQwen%2FQwQ-32B-GGUF)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A",2
         
     | 
    	
        Qwen-14B_finetunes_20250426_212347.csv_finetunes_20250426_212347.csv
    ADDED
    
    | 
         @@ -0,0 +1,1109 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
         
     | 
| 2 | 
         
            +
            Qwen/Qwen-14B,"---
         
     | 
| 3 | 
         
            +
            language:
         
     | 
| 4 | 
         
            +
            - zh
         
     | 
| 5 | 
         
            +
            - en
         
     | 
| 6 | 
         
            +
            tags:
         
     | 
| 7 | 
         
            +
            - qwen
         
     | 
| 8 | 
         
            +
            pipeline_tag: text-generation
         
     | 
| 9 | 
         
            +
            inference: false
         
     | 
| 10 | 
         
            +
            ---
         
     | 
| 11 | 
         
            +
             
     | 
| 12 | 
         
            +
            # Qwen-14B
         
     | 
| 13 | 
         
            +
             
     | 
| 14 | 
         
            +
            <p align=""center"">
         
     | 
| 15 | 
         
            +
                <img src=""https://qianwen-res.oss-cn-beijing.aliyuncs.com/logo_qwen.jpg"" width=""400""/>
         
     | 
| 16 | 
         
            +
            <p>
         
     | 
| 17 | 
         
            +
            <br>
         
     | 
| 18 | 
         
            +
             
     | 
| 19 | 
         
            +
            <p align=""center"">
         
     | 
| 20 | 
         
            +
                    🤗 <a href=""https://huggingface.co/Qwen"">Hugging Face</a>   |   🤖 <a href=""https://modelscope.cn/organization/qwen"">ModelScope</a>   |    📑 <a href=""https://arxiv.org/abs/2309.16609"">Paper</a>    |   🖥️ <a href=""https://modelscope.cn/studios/qwen/Qwen-14B-Chat-Demo/summary"">Demo</a>
         
     | 
| 21 | 
         
            +
            <br>
         
     | 
| 22 | 
         
            +
            <a href=""https://github.com/QwenLM/Qwen/blob/main/assets/wechat.png"">WeChat (微信)</a>   |   <a href=""https://discord.gg/z3GAxXZ9Ce"">Discord</a>   |    <a href=""https://dashscope.aliyun.com"">API</a> 
         
     | 
| 23 | 
         
            +
            </p>
         
     | 
| 24 | 
         
            +
            <br>
         
     | 
| 25 | 
         
            +
             
     | 
| 26 | 
         
            +
            ## 介绍 (Introduction)
         
     | 
| 27 | 
         
            +
             
     | 
| 28 | 
         
            +
            **通义千问-14B**(**Qwen-14B**)是阿里云研发的通义千问大模型系列的140亿参数规模的模型。Qwen-14B是基于Transformer的大语言模型, 在超大规模的预训练数据上进行训练得到。预训练数据类型多样,覆盖广泛,包括大量网络文本、专业书籍、代码等。同时,在Qwen-14B的基础上,我们使用对齐机制打造了基于大语言模型的AI助手Qwen-14B-Chat。本仓库为Qwen-14B的仓库。
         
     | 
| 29 | 
         
            +
             
     | 
| 30 | 
         
            +
            通义千问-14B(Qwen-14B)主要有以下特点:
         
     | 
| 31 | 
         
            +
             
     | 
| 32 | 
         
            +
            1. **大规模高质量训练语料**:使用超过3万亿tokens的数据进行预训练,包含高质量中、英、多语言、代码、数学等数据,涵盖通用及专业领域的训练语料。通过大量对比实验对预训练语料分布进行了优化。
         
     | 
| 33 | 
         
            +
            2. **强大的性能**:Qwen-14B在多个中英文下游评测任务上(涵盖常识推理、代码、数学、翻译等),效果显著超越现有的相近规模开源模型,甚至在部分指标上相比更大尺寸模型也有较强竞争力。具体评测结果请详见下文。
         
     | 
| 34 | 
         
            +
            3. **覆盖更全面的词表**:相比目前以中英词表为主的开源模型,Qwen-14B使用了约15万大小的词表。该词表对多语言更加友好,方便用户在不扩展词表的情况下对部分语种进行能力增强和扩展。
         
     | 
| 35 | 
         
            +
             
     | 
| 36 | 
         
            +
            如果您想了解更多关于通义千问14B开源模型的细节,我们建议您参阅[GitHub代码库](https://github.com/QwenLM/Qwen)。
         
     | 
| 37 | 
         
            +
             
     | 
| 38 | 
         
            +
            **Qwen-14B** is the 14B-parameter version of the large language model series, Qwen (abbr. Tongyi Qianwen), proposed by Alibaba Cloud. Qwen-14B is a Transformer-based large language model, which is pretrained on a large volume of data, including web texts, books, codes, etc. Additionally, based on the pretrained Qwen-14B, we release Qwen-14B-Chat, a large-model-based AI assistant, which is trained with alignment techniques. This repository is the one for Qwen-14B.
         
     | 
| 39 | 
         
            +
             
     | 
| 40 | 
         
            +
            The features of Qwen-14B include:
         
     | 
| 41 | 
         
            +
             
     | 
| 42 | 
         
            +
            1. **Large-scale high-quality training corpora**: It is pretrained on over 3 trillion tokens, including Chinese, English, multilingual texts, code, and mathematics, covering general and professional fields. The distribution of the pre-training corpus has been optimized through a large number of ablation experiments.
         
     | 
| 43 | 
         
            +
            2. **Competitive performance**: It significantly surpasses existing open-source models of similar scale on multiple Chinese and English downstream evaluation tasks (including commonsense, reasoning, code, mathematics, etc.), and even surpasses some larger-scale models in several benchmarks. See below for specific evaluation results.
         
     | 
| 44 | 
         
            +
            3. **More comprehensive vocabulary coverage**: Compared with other open-source models based on Chinese and English vocabularies, Qwen-14B uses a vocabulary of over 150K tokens. This vocabulary is more friendly to multiple languages, enabling users to directly further enhance the capability for certain languages without expanding the vocabulary.
         
     | 
| 45 | 
         
            +
             
     | 
| 46 | 
         
            +
            For more details about the open-source model of Qwen-14B, please refer to the [GitHub](https://github.com/QwenLM/Qwen) code repository.
         
     | 
| 47 | 
         
            +
            <br>
         
     | 
| 48 | 
         
            +
             
     | 
| 49 | 
         
            +
            ## 要求(Requirements)
         
     | 
| 50 | 
         
            +
             
     | 
| 51 | 
         
            +
            * python 3.8及以上版本
         
     | 
| 52 | 
         
            +
            * pytorch 1.12及以上版本,推荐2.0及以上版本
         
     | 
| 53 | 
         
            +
            * 建议使用CUDA 11.4及以上(GPU用户、flash-attention用户等需考虑此选项)
         
     | 
| 54 | 
         
            +
            * python 3.8 and above
         
     | 
| 55 | 
         
            +
            * pytorch 1.12 and above, 2.0 and above are recommended
         
     | 
| 56 | 
         
            +
            * CUDA 11.4 and above are recommended (this is for GPU users, flash-attention users, etc.)
         
     | 
| 57 | 
         
            +
            <br>
         
     | 
| 58 | 
         
            +
             
     | 
| 59 | 
         
            +
            ## 依赖项 (Dependency)
         
     | 
| 60 | 
         
            +
             
     | 
| 61 | 
         
            +
            运行Qwen-14B,请确保满足上述要求,再执行以下pip命令安装依赖库
         
     | 
| 62 | 
         
            +
             
     | 
| 63 | 
         
            +
            To run Qwen-14B, please make sure you meet the above requirements, and then execute the following pip commands to install the dependent libraries.
         
     | 
| 64 | 
         
            +
             
     | 
| 65 | 
         
            +
            ```bash
         
     | 
| 66 | 
         
            +
            pip install transformers==4.32.0 accelerate tiktoken einops scipy transformers_stream_generator==0.0.4 peft deepspeed
         
     | 
| 67 | 
         
            +
            ```
         
     | 
| 68 | 
         
            +
             
     | 
| 69 | 
         
            +
            另外,推荐安装`flash-attention`库(**当前已支持flash attention 2**),以实现更高的效率和更低的显存占用。
         
     | 
| 70 | 
         
            +
             
     | 
| 71 | 
         
            +
            In addition, it is recommended to install the `flash-attention` library (**we support flash attention 2 now.**) for higher efficiency and lower memory usage.
         
     | 
| 72 | 
         
            +
             
     | 
| 73 | 
         
            +
            ```bash
         
     | 
| 74 | 
         
            +
            git clone https://github.com/Dao-AILab/flash-attention
         
     | 
| 75 | 
         
            +
            cd flash-attention && pip install .
         
     | 
| 76 | 
         
            +
            # 下方安装可选,安装可能比较缓慢。
         
     | 
| 77 | 
         
            +
            # pip install csrc/layer_norm
         
     | 
| 78 | 
         
            +
            # pip install csrc/rotary
         
     | 
| 79 | 
         
            +
            ```
         
     | 
| 80 | 
         
            +
            <br>
         
     | 
| 81 | 
         
            +
             
     | 
| 82 | 
         
            +
            ## 快速使用(Quickstart)
         
     | 
| 83 | 
         
            +
             
     | 
| 84 | 
         
            +
            您可以通过以下代码轻松调用:
         
     | 
| 85 | 
         
            +
             
     | 
| 86 | 
         
            +
            You can easily call the model with the following code:
         
     | 
| 87 | 
         
            +
             
     | 
| 88 | 
         
            +
            ```python
         
     | 
| 89 | 
         
            +
            from transformers import AutoModelForCausalLM, AutoTokenizer
         
     | 
| 90 | 
         
            +
            from transformers.generation import GenerationConfig
         
     | 
| 91 | 
         
            +
             
     | 
| 92 | 
         
            +
            # Note: The default behavior now has injection attack prevention off.
         
     | 
| 93 | 
         
            +
            tokenizer = AutoTokenizer.from_pretrained(""Qwen/Qwen-14B"", trust_remote_code=True)
         
     | 
| 94 | 
         
            +
             
     | 
| 95 | 
         
            +
            # use bf16
         
     | 
| 96 | 
         
            +
            # model = AutoModelForCausalLM.from_pretrained(""Qwen/Qwen-14B"", device_map=""auto"", trust_remote_code=True, bf16=True).eval()
         
     | 
| 97 | 
         
            +
            # use fp16
         
     | 
| 98 | 
         
            +
            # model = AutoModelForCausalLM.from_pretrained(""Qwen/Qwen-14B"", device_map=""auto"", trust_remote_code=True, fp16=True).eval()
         
     | 
| 99 | 
         
            +
            # use cpu only
         
     | 
| 100 | 
         
            +
            # model = AutoModelForCausalLM.from_pretrained(""Qwen/Qwen-14B"", device_map=""cpu"", trust_remote_code=True).eval()
         
     | 
| 101 | 
         
            +
            # use auto mode, automatically select precision based on the device.
         
     | 
| 102 | 
         
            +
            model = AutoModelForCausalLM.from_pretrained(""Qwen/Qwen-14B"", device_map=""auto"", trust_remote_code=True).eval()
         
     | 
| 103 | 
         
            +
             
     | 
| 104 | 
         
            +
            # Specify hyperparameters for generation. But if you use transformers>=4.32.0, there is no need to do this.
         
     | 
| 105 | 
         
            +
            # model.generation_config = GenerationConfig.from_pretrained(""Qwen/Qwen-14B"", trust_remote_code=True)
         
     | 
| 106 | 
         
            +
             
     | 
| 107 | 
         
            +
            inputs = tokenizer('蒙古国的首都是乌兰巴托(Ulaanbaatar)\n冰岛的首都是雷克雅未克(Reykjavik)\n埃塞俄比亚的首都是', return_tensors='pt')
         
     | 
| 108 | 
         
            +
            inputs = inputs.to(model.device)
         
     | 
| 109 | 
         
            +
            pred = model.generate(**inputs)
         
     | 
| 110 | 
         
            +
            print(tokenizer.decode(pred.cpu()[0], skip_special_tokens=True))
         
     | 
| 111 | 
         
            +
            # 蒙古国的首都是乌兰巴托(Ulaanbaatar)\n冰岛的首都是雷克雅未克(Reykjavik)\n埃塞俄比亚的首都是亚的斯亚贝巴(Addis Ababa)...
         
     | 
| 112 | 
         
            +
            ```
         
     | 
| 113 | 
         
            +
             
     | 
| 114 | 
         
            +
            关于更多的使用说明,请参考我们的[GitHub repo](https://github.com/QwenLM/Qwen)获取更多信息。
         
     | 
| 115 | 
         
            +
             
     | 
| 116 | 
         
            +
            For more information, please refer to our [GitHub repo](https://github.com/QwenLM/Qwen) for more information.
         
     | 
| 117 | 
         
            +
            <br>
         
     | 
| 118 | 
         
            +
             
     | 
| 119 | 
         
            +
            ## Tokenizer
         
     | 
| 120 | 
         
            +
             
     | 
| 121 | 
         
            +
            > 注:作为术语的“tokenization”在中文中尚无共识的概念对应,本文档采用英文表达以利说明。
         
     | 
| 122 | 
         
            +
             
     | 
| 123 | 
         
            +
            基于tiktoken的分词器有别于其他分词器,比如sentencepiece分词器。尤其在微调阶段,需要特别注意特殊token的使用。关于tokenizer的更多信息,以及微调时涉及的相关使用,请参阅[文档](https://github.com/QwenLM/Qwen/blob/main/tokenization_note_zh.md)。
         
     | 
| 124 | 
         
            +
             
     | 
| 125 | 
         
            +
            Our tokenizer based on tiktoken is different from other tokenizers, e.g., sentencepiece tokenizer. You need to pay attention to special tokens, especially in finetuning. For more detailed information on the tokenizer and related use in fine-tuning, please refer to the [documentation](https://github.com/QwenLM/Qwen/blob/main/tokenization_note.md).
         
     | 
| 126 | 
         
            +
            <br>
         
     | 
| 127 | 
         
            +
             
     | 
| 128 | 
         
            +
            ## 模型细节 (Model)
         
     | 
| 129 | 
         
            +
             
     | 
| 130 | 
         
            +
            Qwen-14B模型规模基本情况如下所示:
         
     | 
| 131 | 
         
            +
             
     | 
| 132 | 
         
            +
            The details of the model architecture of Qwen-14B are listed as follows:
         
     | 
| 133 | 
         
            +
             
     | 
| 134 | 
         
            +
            | Hyperparameter  |  Value |
         
     | 
| 135 | 
         
            +
            |:----------------|:-------|
         
     | 
| 136 | 
         
            +
            |    n_layers     |     40 |
         
     | 
| 137 | 
         
            +
            |     n_heads     |     40 |
         
     | 
| 138 | 
         
            +
            |     d_model     |   5120 |
         
     | 
| 139 | 
         
            +
            |   vocab size    | 151851 |
         
     | 
| 140 | 
         
            +
            | sequence length |   2048 |
         
     | 
| 141 | 
         
            +
             
     | 
| 142 | 
         
            +
            在位置编码、FFN激活函数和normalization的实现方式上,我们也采用了目前最流行的做法,
         
     | 
| 143 | 
         
            +
            即RoPE相对位置编码、SwiGLU激活函数、RMSNorm(可选安装flash-attention加速)。
         
     | 
| 144 | 
         
            +
             
     | 
| 145 | 
         
            +
            在分词器方面,相比目前主流开源模型以中英词表为主,Qwen-14B使用了超过15万token大小的词表。 该词表在GPT-4使用的BPE词表`cl100k_base`基础上,对中文、多语言进行了优化,在对中、英、代码数据的高效编解码的基础上,对部分多语言更加友好,方便用户在不扩展词表的情况下对部分语种进行能力增强。
         
     | 
| 146 | 
         
            +
            词表对数字按单个数字位切分。调用较为高效的[tiktoken分词库](https://github.com/openai/tiktoken)进行分词。
         
     | 
| 147 | 
         
            +
             
     | 
| 148 | 
         
            +
            我们从部分语种各随机抽取100万个文档语料,以对比不同模型的编码压缩率(以支持100语种的XLM-R为基准值1,越低越好),具体性能见图。
         
     | 
| 149 | 
         
            +
             
     | 
| 150 | 
         
            +
            可以看到Qwen-14B在保持中英代码高效解码的前提下,对部分使用人群较多的语种(泰语th、希伯来语he、阿拉伯语ar、韩语ko、越南语vi、日语ja、土耳其语tr、印尼语id、波兰语pl、俄语ru、荷兰语nl、葡萄牙语pt、意大利语it、德语de、西班牙语es、法语fr等)上也实现了较高的压缩率,使得模型在这些语种上也具备较强的可扩展性和较高的训练和推理效率。
         
     | 
| 151 | 
         
            +
             
     | 
| 152 | 
         
            +
            在预训练数据方面,Qwen-14B模型一方面利用了部分开源通用语料,
         
     | 
| 153 | 
         
            +
            另��方面也积累了海量全网语料以及高质量文本内容,去重及过滤后的语料超过3T tokens。
         
     | 
| 154 | 
         
            +
            囊括全网文本、百科、书籍、代码、数学及各个领域垂类。
         
     | 
| 155 | 
         
            +
             
     | 
| 156 | 
         
            +
            <p align=""center"">
         
     | 
| 157 | 
         
            +
                <img src=""assets/tokenizer.png"" style=""width: 1200px""/>
         
     | 
| 158 | 
         
            +
            <p>
         
     | 
| 159 | 
         
            +
             
     | 
| 160 | 
         
            +
            For position encoding, FFN activation function, and normalization methods, we adopt the prevalent practices, i.e., RoPE relative position encoding, SwiGLU for activation function, and RMSNorm for normalization (optional installation of flash-attention for acceleration).
         
     | 
| 161 | 
         
            +
             
     | 
| 162 | 
         
            +
            For tokenization, compared to the current mainstream open-source models based on Chinese and English vocabularies, Qwen-14B uses a vocabulary of over 150K tokens. It first considers efficient encoding of Chinese, English, and code data, and is also more friendly to multilingual languages, enabling users to directly enhance the capability of some languages without expanding the vocabulary. It segments numbers by single digit, and calls the [tiktoken](https://github.com/openai/tiktoken) tokenizer library for efficient tokenization.
         
     | 
| 163 | 
         
            +
             
     | 
| 164 | 
         
            +
            We randomly selected 1 million document corpus of each language to test and compare the encoding compression rates of different models (with XLM-R, which supports 100 languages, as the base value 1). The specific performance is shown in the figure above.
         
     | 
| 165 | 
         
            +
             
     | 
| 166 | 
         
            +
            As can be seen, while ensuring the efficient decoding of Chinese, English, and code, Qwen-14B also achieves a high compression rate for many other languages (such as th, he, ar, ko, vi, ja, tr, id, pl, ru, nl, pt, it, de, es, fr etc.), equipping the model with strong scalability as well as high training and inference efficiency in these languages.
         
     | 
| 167 | 
         
            +
             
     | 
| 168 | 
         
            +
            For pre-training data, on the one hand, Qwen-14B uses part of the open-source generic corpus. On the other hand, it uses a massive amount of accumulated web corpus and high-quality text content. The scale of corpus reaches over 3T tokens after deduplication and filtration, encompassing web text, encyclopedias, books, code, mathematics, and various domain.
         
     | 
| 169 | 
         
            +
            <br>
         
     | 
| 170 | 
         
            +
             
     | 
| 171 | 
         
            +
            ## 评测效果(Evaluation)
         
     | 
| 172 | 
         
            +
            我们选取了MMLU,C-Eval,GSM8K, MATH, HumanEval, MBPP, BBH, CMMLU等目前较流行的benchmark,对模型的中英知识能力、翻译、数学推理、代码等能力进行综合评测。从下列结果可以看到Qwen模型在所有benchmark上均取得了同级别开源模型中的最优表现。
         
     | 
| 173 | 
         
            +
             
     | 
| 174 | 
         
            +
            We selected MMLU, C-Eval, GSM8K, MATH, HumanEval, MBPP, BBH, CMMLU, which are currently popular benchmarks, to test the model’s Chinese and English knowledge capabilities, translation, mathematical reasoning, coding and other capabilities. From the following comprehensive evaluation results, we can see that the Qwen model outperform the similarly sized open-source models on all tasks.
         
     | 
| 175 | 
         
            +
             
     | 
| 176 | 
         
            +
            | Model              |   MMLU   |  C-Eval  |  GSM8K   |   MATH   | HumanEval |   MBPP   |   BBH    |  CMMLU   |
         
     | 
| 177 | 
         
            +
            |:-------------------|:--------:|:--------:|:--------:|:--------:|:---------:|:--------:|:--------:|:--------:|
         
     | 
| 178 | 
         
            +
            |                    |  5-shot  |  5-shot  |  8-shot  |  4-shot  |  0-shot   |  3-shot  |  3-shot  |  5-shot  |
         
     | 
| 179 | 
         
            +
            | LLaMA2-7B          |   46.8   |   32.5   |   16.7   |   3.3    |   12.8    |   20.8   |   38.2   |   31.8   |
         
     | 
| 180 | 
         
            +
            | LLaMA2-13B         |   55.0   |   41.4   |   29.6   |   5.0    |   18.9    |   30.3   |   45.6   |   38.4   |
         
     | 
| 181 | 
         
            +
            | LLaMA2-34B         |   62.6   |    -     |   42.2   |   6.2    |   22.6    |   33.0   |   44.1   |    -     |
         
     | 
| 182 | 
         
            +
            | ChatGLM2-6B        |   47.9   |   51.7   |   32.4   |   6.5    |     -     |    -     |   33.7   |    -     |
         
     | 
| 183 | 
         
            +
            | InternLM-7B        |   51.0   |   53.4   |   31.2   |   6.3    |   10.4    |   14.0   |   37.0   |   51.8   |
         
     | 
| 184 | 
         
            +
            | InternLM-20B       |   62.1   |   58.8   |   52.6   |   7.9    |   25.6    |   35.6   |   52.5   |   59.0   |
         
     | 
| 185 | 
         
            +
            | Baichuan2-7B       |   54.7   |   56.3   |   24.6   |   5.6    |   18.3    |   24.2   |   41.6   |   57.1   |
         
     | 
| 186 | 
         
            +
            | Baichuan2-13B      |   59.5   |   59.0   |   52.8   |   10.1   |   17.1    |   30.2   |   49.0   |   62.0   |
         
     | 
| 187 | 
         
            +
            | Qwen-7B (original) |   56.7   |   59.6   |   51.6   |    -     |   24.4    |   31.2   |   40.6   |   58.8   |
         
     | 
| 188 | 
         
            +
            | **Qwen-7B**        |   58.2   |   63.5   |   51.7   |   11.6   |   29.9    |   31.6   |   45.0   |   62.2   |
         
     | 
| 189 | 
         
            +
            | **Qwen-14B**       | **66.3** | **72.1** | **61.3** | **24.8** | **32.3**  | **40.8** | **53.4** | **71.0** |
         
     | 
| 190 | 
         
            +
             
     | 
| 191 | 
         
            +
             
     | 
| 192 | 
         
            +
            ### 长序列评测(Long-Context Evaluation)
         
     | 
| 193 | 
         
            +
             
     | 
| 194 | 
         
            +
            我们引入NTK插值,LogN注意力缩放,窗口注意力等技巧,将Qwen-7B (original)和14B模型的上下文长度从2K扩展到8K以上,将Qwen-7B从8K扩到32K。在arXiv数据上使用PPL指标测试Qwen-7B和Qwen-14B在不同长度下的表现,结果如下:
         
     | 
| 195 | 
         
            +
             
     | 
| 196 | 
         
            +
            **(若要启用NTK和LogN注意力缩放,请将config.json里的`use_dynamic_ntk`和`use_logn_attn`设置为true)**
         
     | 
| 197 | 
         
            +
             
     | 
| 198 | 
         
            +
            We introduce NTK-aware interpolation, LogN attention scaling, Window attention, etc. to extend the context length to over 8K tokens. We conduct language modeling experiments on the arXiv dataset with the PPL evaluation. Results are demonstrated below:
         
     | 
| 199 | 
         
            +
             
     | 
| 200 | 
         
            +
            **(To use NTK interpolation and LogN scaling, please set `use_dynamic_ntk` and `use_long_attn` to true in config.json.)**
         
     | 
| 201 | 
         
            +
            <table>
         
     | 
| 202 | 
         
            +
                <tr>
         
     | 
| 203 | 
         
            +
                    <th rowspan=""2"">Model</th><th colspan=""6"" align=""center"">Sequence Length</th>
         
     | 
| 204 | 
         
            +
                </tr>
         
     | 
| 205 | 
         
            +
                <tr>
         
     | 
| 206 | 
         
            +
                    <th align=""center"">1024</th><th align=""center"">2048</th><th align=""center"">4096</th><th align=""center"">8192</th><th align=""center"">16384</th><th align=""center"">32768</th>
         
     | 
| 207 | 
         
            +
                </tr>
         
     | 
| 208 | 
         
            +
                 <tr>
         
     | 
| 209 | 
         
            +
                    <td>Qwen-7B (original)</td><td align=""center"">4.23</td><td align=""center"">3.78</td><td align=""center"">39.35</td><td align=""center"">469.81</td><td align=""center"">2645.09</td><td align=""center"">-</td>
         
     | 
| 210 | 
         
            +
                </tr>
         
     | 
| 211 | 
         
            +
                <tr>
         
     | 
| 212 | 
         
            +
                    <td>+ dynamic_ntk</td><td align=""center"">4.23</td><td align=""center"">3.78</td><td align=""center"">3.59</td><td align=""center"">3.66</td><td align=""center"">5.71</td><td align=""center"">-</td>
         
     | 
| 213 | 
         
            +
                </tr>
         
     | 
| 214 | 
         
            +
                <tr>
         
     | 
| 215 | 
         
            +
                    <td>+ dynamic_ntk + logn</td><td align=""center"">4.23</td><td align=""center"">3.78</td><td align=""center"">3.58</td><td align=""center"">3.56</td><td align=""center"">4.62</td><td align=""center"">-</td>
         
     | 
| 216 | 
         
            +
                </tr>
         
     | 
| 217 | 
         
            +
                <tr>
         
     | 
| 218 | 
         
            +
                    <td>+ dynamic_ntk + logn + window_attn</td><td align=""center"">4.23</td><td align=""center"">3.78</td><td align=""center"">3.58</td><td align=""center"">3.49</td><td align=""center"">4.32</td><td align=""center"">-</td>
         
     | 
| 219 | 
         
            +
                </tr>
         
     | 
| 220 | 
         
            +
                <tr>
         
     | 
| 221 | 
         
            +
                <tr>
         
     | 
| 222 | 
         
            +
                    <td>Qwen-7B</td><td align=""center""><b>4.23</b></td><td align=""center""><b>3.81</b></td><td align=""center""><b>3.52</b></td><td align=""center""><b>3.31</b></td><td align=""center"">7.27</td><td align=""center"">181.49</td>
         
     | 
| 223 | 
         
            +
                </tr>
         
     | 
| 224 | 
         
            +
                <tr>
         
     | 
| 225 | 
         
            +
                    <td>+ dynamic_ntk + logn + window_attn</td><td align=""center""><b>4.23</b></td><td align=""center""><b>3.81</b></td><td align=""center""><b>3.52</b></td><td align=""center""><b>3.33</b></td><td align=""center""><b>3.22</b></td><td align=""center""><b>3.17</b></td>
         
     | 
| 226 | 
         
            +
                </tr>
         
     | 
| 227 | 
         
            +
                <tr>
         
     | 
| 228 | 
         
            +
                    <td>Qwen-14B</td><td align=""center""><b>-</b></td><td align=""center""><b>3.46</b></td><td align=""center"">22.79</td><td align=""center"">334.65</td><td align=""center"">3168.35</td><td align=""center"">-</td>
         
     | 
| 229 | 
         
            +
                </tr>
         
     | 
| 230 | 
         
            +
                <tr>
         
     | 
| 231 | 
         
            +
                    <td>+ dynamic_ntk + logn + window_attn</td><td align=""center""><b>-</b></td><td align=""center""><b>3.46</b></td><td align=""center""><b>3.29</b></td><td align=""center""><b>3.18</b></td><td align=""center"">3.42</td><td align=""center"">-</td>
         
     | 
| 232 | 
         
            +
                </tr>
         
     | 
| 233 | 
         
            +
            </table>
         
     | 
| 234 | 
         
            +
             
     | 
| 235 | 
         
            +
            ## 评测复现(Reproduction)
         
     | 
| 236 | 
         
            +
             
     | 
| 237 | 
         
            +
            我们提供了评测脚本,方便大家复现模型效果,详见[链接](https://github.com/QwenLM/Qwen/tree/main/eval)。提示:由于硬件和框架造成的舍入误差,复现结果如有小幅波动属于正常现象。
         
     | 
| 238 | 
         
            +
             
     | 
| 239 | 
         
            +
            We have provided evaluation scripts to reproduce the performance of our model, details as [link](https://github.com/QwenLM/Qwen/tree/main/eval).
         
     | 
| 240 | 
         
            +
            <br>
         
     | 
| 241 | 
         
            +
             
     | 
| 242 | 
         
            +
            ## FAQ
         
     | 
| 243 | 
         
            +
             
     | 
| 244 | 
         
            +
            如遇到问题,敬请查阅[FAQ](https://github.com/QwenLM/Qwen/blob/main/FAQ_zh.md)以及issue区,如仍无法解决再提交issue。
         
     | 
| 245 | 
         
            +
             
     | 
| 246 | 
         
            +
            If you meet problems, please refer to [FAQ](https://github.com/QwenLM/Qwen/blob/main/FAQ.md) and the issues first to search a solution before you launch a new issue.
         
     | 
| 247 | 
         
            +
            <br>
         
     | 
| 248 | 
         
            +
             
     | 
| 249 | 
         
            +
            ## 引用 (Citation)
         
     | 
| 250 | 
         
            +
             
     | 
| 251 | 
         
            +
            如果你觉得我们的工作对你有帮助,欢迎引用!
         
     | 
| 252 | 
         
            +
             
     | 
| 253 | 
         
            +
            If you find our work helpful, feel free to give us a cite.
         
     | 
| 254 | 
         
            +
             
     | 
| 255 | 
         
            +
            ```
         
     | 
| 256 | 
         
            +
            @article{qwen,
         
     | 
| 257 | 
         
            +
              title={Qwen Technical Report},
         
     | 
| 258 | 
         
            +
              author={Jinze Bai and Shuai Bai and Yunfei Chu and Zeyu Cui and Kai Dang and Xiaodong Deng and Yang Fan and Wenbin Ge and Yu Han and Fei Huang and Binyuan Hui and Luo Ji and Mei Li and Junyang Lin and Runji Lin and Dayiheng Liu and Gao Liu and Chengqiang Lu and Keming Lu and Jianxin Ma and Rui Men and Xingzhang Ren and Xuancheng Ren and Chuanqi Tan and Sinan Tan and Jianhong Tu and Peng Wang and Shijie Wang and Wei Wang and Shengguang Wu and Benfeng Xu and Jin Xu and An Yang and Hao Yang and Jian Yang and Shusheng Yang and Yang Yao and Bowen Yu and Hongyi Yuan and Zheng Yuan and Jianwei Zhang and Xingxuan Zhang and Yichang Zhang and Zhenru Zhang and Chang Zhou and Jingren Zhou and Xiaohuan Zhou and Tianhang Zhu},
         
     | 
| 259 | 
         
            +
              journal={arXiv preprint arXiv:2309.16609},
         
     | 
| 260 | 
         
            +
              year={2023}
         
     | 
| 261 | 
         
            +
            }
         
     | 
| 262 | 
         
            +
            ```
         
     | 
| 263 | 
         
            +
            <br>
         
     | 
| 264 | 
         
            +
             
     | 
| 265 | 
         
            +
            ## 使用协议(License Agreement)
         
     | 
| 266 | 
         
            +
             
     | 
| 267 | 
         
            +
            我们的代码和模型权重对学术研究完全开放,并支持商用。请查看[LICENSE](https://github.com/QwenLM/Qwen/blob/main/Tongyi%20Qianwen%20LICENSE%20AGREEMENT)了解具体的开源协议细节。如需商用,请填写[问卷](https://dashscope.console.aliyun.com/openModelApply/Qwen-14B-Chat)申请。
         
     | 
| 268 | 
         
            +
             
     | 
| 269 | 
         
            +
            Our code and checkpoints are open to research purpose, and they are allowed for commercial purposes. Check [LICENSE](https://github.com/QwenLM/Qwen/blob/main/Tongyi%20Qianwen%20LICENSE%20AGREEMENT) for more details about the license. If you have requirements for commercial use, please fill out the [form](https://dashscope.console.aliyun.com/openModelApply/Qwen-14B-Chat) to apply.
         
     | 
| 270 | 
         
            +
            <br>
         
     | 
| 271 | 
         
            +
             
     | 
| 272 | 
         
            +
            ## 联系我们(Contact Us)
         
     | 
| 273 | 
         
            +
             
     | 
| 274 | 
         
            +
            如果你想给我们的研发团队和产品团队��言,欢迎加入我们的微信群、钉钉群以及Discord!同时,也欢迎通过邮件(qianwen_opensource@alibabacloud.com)联系我们。
         
     | 
| 275 | 
         
            +
             
     | 
| 276 | 
         
            +
            If you are interested to leave a message to either our research team or product team, join our Discord or WeChat groups! Also, feel free to send an email to qianwen_opensource@alibabacloud.com.
         
     | 
| 277 | 
         
            +
             
     | 
| 278 | 
         
            +
            ","{""id"": ""Qwen/Qwen-14B"", ""author"": ""Qwen"", ""sha"": ""c4051215126d906ac22bb67fe5edb39a921cd831"", ""last_modified"": ""2023-12-13 15:44:30+00:00"", ""created_at"": ""2023-09-24 03:28:41+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 3224, ""downloads_all_time"": null, ""likes"": 208, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""qwen"", ""text-generation"", ""custom_code"", ""zh"", ""en"", ""arxiv:2309.16609"", ""autotrain_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""language:\n- zh\n- en\npipeline_tag: text-generation\ntags:\n- qwen\ninference: false"", ""widget_data"": [{""text"": ""\u6211\u53eb\u6731\u5229\u5b89\uff0c\u6211\u559c\u6b22""}, {""text"": ""\u6211\u53eb\u6258\u9a6c\u65af\uff0c\u6211\u7684\u4e3b\u8981""}, {""text"": ""\u6211\u53eb\u739b\u4e3d\u4e9a\uff0c\u6211\u6700\u559c\u6b22\u7684""}, {""text"": ""\u6211\u53eb\u514b\u62c9\u62c9\uff0c\u6211\u662f""}, {""text"": ""\u4ece\u524d\uff0c""}], ""model_index"": null, ""config"": {""architectures"": [""QWenLMHeadModel""], ""auto_map"": {""AutoConfig"": ""configuration_qwen.QWenConfig"", ""AutoModelForCausalLM"": ""modeling_qwen.QWenLMHeadModel""}, ""model_type"": ""qwen"", ""tokenizer_config"": {}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": ""modeling_qwen.QWenLMHeadModel"", ""pipeline_tag"": ""text-generation"", ""processor"": null}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='LICENSE', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='NOTICE', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/logo.jpg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/qwen_tokenizer.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/tokenizer.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/wechat.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='cache_autogptq_cuda_256.cpp', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='cache_autogptq_cuda_kernel_256.cu', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='configuration_qwen.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='cpp_kernels.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00001-of-00015.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00002-of-00015.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00003-of-00015.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00004-of-00015.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00005-of-00015.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00006-of-00015.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00007-of-00015.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00008-of-00015.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00009-of-00015.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00010-of-00015.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00011-of-00015.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00012-of-00015.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00013-of-00015.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00014-of-00015.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00015-of-00015.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='modeling_qwen.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='qwen.tiktoken', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='qwen_generation_utils.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenization_qwen.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [""eduagarcia/open_pt_llm_leaderboard"", ""Justinrune/LLaMA-Factory"", ""kenken999/fastapi_django_main_live"", ""officialhimanshu595/llama-factory"", ""44brabal/ai"", ""pennxp/qianwen"", ""msun415/Llamole""], ""safetensors"": {""parameters"": {""BF16"": 14167290880}, ""total"": 14167290880}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2023-12-13 15:44:30+00:00"", ""cardData"": ""language:\n- zh\n- en\npipeline_tag: text-generation\ntags:\n- qwen\ninference: false"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": ""modeling_qwen.QWenLMHeadModel"", ""pipeline_tag"": ""text-generation"", ""processor"": null}, ""_id"": ""650face9be2dd282dcbd6b80"", ""modelId"": ""Qwen/Qwen-14B"", ""usedStorage"": 28334617016}",0,"https://huggingface.co/rinna/nekomata-14b, https://huggingface.co/imdatta0/qwen_OpenAssistant_oasst_top1_2023-08-25, https://huggingface.co/imdatta0/qwen_nampdn-ai_tiny-textbooks, https://huggingface.co/imdatta0/qwen_databricks_databricks-dolly-15k, https://huggingface.co/imdatta0/qwen_cnn_dailymail, https://huggingface.co/imdatta0/qwen_jeopardy",6,"https://huggingface.co/quyanh/qwen-14b-neurips-v1, https://huggingface.co/quyanh/qwen-14b-neurips-v2, https://huggingface.co/quyanh/qwen-14b-neurips-a100, https://huggingface.co/llm-efficiency-challenge/QuyAnh2005-A100-qwen-qwen-finetune, https://huggingface.co/chenhaodev/qwen-14b-onc-v1, https://huggingface.co/ytcheng/Qwen-14B-lora-pretrain, https://huggingface.co/raflisyawal5/qwen-14b-news-finetune, https://huggingface.co/raflisyawal5/Qwen-17B-V2-ArticleGEN",8,"https://huggingface.co/mradermacher/Qwen-14B-i1-GGUF, https://huggingface.co/mradermacher/Qwen-14B-GGUF",2,,0,"44brabal/ai, Justinrune/LLaMA-Factory, eduagarcia/open_pt_llm_leaderboard, huggingface/InferenceSupport/discussions/new?title=Qwen/Qwen-14B&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BQwen%2FQwen-14B%5D(%2FQwen%2FQwen-14B)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, kenken999/fastapi_django_main_live, msun415/Llamole, officialhimanshu595/llama-factory, pennxp/qianwen",8
         
     | 
| 279 | 
         
            +
            rinna/nekomata-14b,"---
         
     | 
| 280 | 
         
            +
            thumbnail: https://github.com/rinnakk/japanese-pretrained-models/blob/master/rinna.png
         
     | 
| 281 | 
         
            +
            datasets:
         
     | 
| 282 | 
         
            +
            - mc4
         
     | 
| 283 | 
         
            +
            - wikipedia
         
     | 
| 284 | 
         
            +
            - EleutherAI/pile
         
     | 
| 285 | 
         
            +
            - oscar-corpus/colossal-oscar-1.0
         
     | 
| 286 | 
         
            +
            - cc100
         
     | 
| 287 | 
         
            +
            language:
         
     | 
| 288 | 
         
            +
            - ja
         
     | 
| 289 | 
         
            +
            - en
         
     | 
| 290 | 
         
            +
            tags:
         
     | 
| 291 | 
         
            +
            - qwen
         
     | 
| 292 | 
         
            +
            inference: false
         
     | 
| 293 | 
         
            +
            license: other
         
     | 
| 294 | 
         
            +
            license_name: tongyi-qianwen-license-agreement
         
     | 
| 295 | 
         
            +
            license_link: >-
         
     | 
| 296 | 
         
            +
              https://github.com/QwenLM/Qwen/blob/main/Tongyi%20Qianwen%20LICENSE%20AGREEMENT
         
     | 
| 297 | 
         
            +
            base_model: Qwen/Qwen-14B
         
     | 
| 298 | 
         
            +
            ---
         
     | 
| 299 | 
         
            +
             
     | 
| 300 | 
         
            +
            # `rinna/nekomata-14b`
         
     | 
| 301 | 
         
            +
             
     | 
| 302 | 
         
            +
            
         
     | 
| 303 | 
         
            +
             
     | 
| 304 | 
         
            +
            # Overview
         
     | 
| 305 | 
         
            +
            We conduct continual pre-training of [qwen-14b](https://huggingface.co/Qwen/Qwen-14B) on **66B** tokens from a mixture of Japanese and English datasets. The continual pre-training significantly improves the model's performance on Japanese tasks. It also enjoys the following great features provided by the original Qwen model.
         
     | 
| 306 | 
         
            +
            * The inclusive Qwen vocabulary (vocab size > 150k) enables the model to processs Japanese texts much more efficiently than the previously released [youri series](https://huggingface.co/collections/rinna/youri-7b-654053610cb8e9d8e6289efc).
         
     | 
| 307 | 
         
            +
            * The model supports a maximum sequence length of 8192.
         
     | 
| 308 | 
         
            +
             
     | 
| 309 | 
         
            +
            The name `nekomata` comes from the Japanese word [`猫又/ねこまた/Nekomata`](https://ja.wikipedia.org/wiki/%E7%8C%AB%E5%8F%88), which is a kind of Japanese mythical creature ([`妖怪/ようかい/Youkai`](https://ja.wikipedia.org/wiki/%E5%A6%96%E6%80%AA)).
         
     | 
| 310 | 
         
            +
             
     | 
| 311 | 
         
            +
             
     | 
| 312 | 
         
            +
            * **Library**
         
     | 
| 313 | 
         
            +
             
     | 
| 314 | 
         
            +
                The model was trained using code based on [aws-neuron/neuronx-nemo-megatron](https://github.com/aws-neuron/neuronx-nemo-megatron/).
         
     | 
| 315 | 
         
            +
             
     | 
| 316 | 
         
            +
            * **Model architecture**
         
     | 
| 317 | 
         
            +
             
     | 
| 318 | 
         
            +
                A 40-layer, 5120-hidden-size transformer-based language model. Please refer to the [Qwen paper](https://arxiv.org/abs/2309.16609) for architecture details.
         
     | 
| 319 | 
         
            +
             
     | 
| 320 | 
         
            +
            * **Continual pre-training**
         
     | 
| 321 | 
         
            +
             
     | 
| 322 | 
         
            +
                The model was initialized with the [qwen-14b](https://huggingface.co/Qwen/Qwen-14B) model and continually trained on around **66B** tokens from a mixture of the following corpora
         
     | 
| 323 | 
         
            +
                - [Japanese CC-100](http://data.statmt.org/cc-100/ja.txt.xz)
         
     | 
| 324 | 
         
            +
                - [Japanese C4](https://huggingface.co/datasets/mc4)
         
     | 
| 325 | 
         
            +
                - [Japanese OSCAR](https://huggingface.co/datasets/oscar-corpus/colossal-oscar-1.0)
         
     | 
| 326 | 
         
            +
                - [The Pile](https://huggingface.co/datasets/EleutherAI/pile)
         
     | 
| 327 | 
         
            +
                - [Wikipedia](https://dumps.wikimedia.org/other/cirrussearch)
         
     | 
| 328 | 
         
            +
                - rinna curated Japanese dataset
         
     | 
| 329 | 
         
            +
             
     | 
| 330 | 
         
            +
            * **Training Infrastructure**
         
     | 
| 331 | 
         
            +
             
     | 
| 332 | 
         
            +
                `nekomata-14B` was trained on 16 nodes of Amazon EC2 trn1.32xlarge instance powered by AWS Trainium purpose-built ML accelerator chip. The pre-training job was completed within a timeframe of approximately 7 days.
         
     | 
| 333 | 
         
            +
             
     | 
| 334 | 
         
            +
            * **Contributors**
         
     | 
| 335 | 
         
            +
             
     | 
| 336 | 
         
            +
                - [Tianyu Zhao](https://huggingface.co/tianyuz)
         
     | 
| 337 | 
         
            +
                - [Akio Kaga](https://huggingface.co/rakaga)
         
     | 
| 338 | 
         
            +
                - [Kei Sawada](https://huggingface.co/keisawada)
         
     | 
| 339 | 
         
            +
                
         
     | 
| 340 | 
         
            +
            * **Release date**
         
     | 
| 341 | 
         
            +
             
     | 
| 342 | 
         
            +
                December 21, 2023
         
     | 
| 343 | 
         
            +
             
     | 
| 344 | 
         
            +
            ---
         
     | 
| 345 | 
         
            +
             
     | 
| 346 | 
         
            +
            # Benchmarking
         
     | 
| 347 | 
         
            +
            Please refer to [rinna's LM benchmark page (Sheet 20231221)](https://rinnakk.github.io/research/benchmarks/lm/index.html).
         
     | 
| 348 | 
         
            +
             
     | 
| 349 | 
         
            +
            ---
         
     | 
| 350 | 
         
            +
             
     | 
| 351 | 
         
            +
            # How to use the model
         
     | 
| 352 | 
         
            +
             
     | 
| 353 | 
         
            +
            ~~~~python
         
     | 
| 354 | 
         
            +
            import torch
         
     | 
| 355 | 
         
            +
            from transformers import AutoTokenizer, AutoModelForCausalLM
         
     | 
| 356 | 
         
            +
             
     | 
| 357 | 
         
            +
            tokenizer = AutoTokenizer.from_pretrained(""rinna/nekomata-14b"", trust_remote_code=True)
         
     | 
| 358 | 
         
            +
             
     | 
| 359 | 
         
            +
            # Use GPU with bf16
         
     | 
| 360 | 
         
            +
            # model = AutoModelForCausalLM.from_pretrained(""rinna/nekomata-14b"", device_map=""auto"", trust_remote_code=True, bf16=True)
         
     | 
| 361 | 
         
            +
             
     | 
| 362 | 
         
            +
            # Use GPU with fp16
         
     | 
| 363 | 
         
            +
            # model = AutoModelForCausalLM.from_pretrained(""rinna/nekomata-14b"", device_map=""auto"", trust_remote_code=True, fp16=True)
         
     | 
| 364 | 
         
            +
             
     | 
| 365 | 
         
            +
            # Use CPU
         
     | 
| 366 | 
         
            +
            # model = AutoModelForCausalLM.from_pretrained(""rinna/nekomata-14b"", device_map=""cpu"", trust_remote_code=True)
         
     | 
| 367 | 
         
            +
             
     | 
| 368 | 
         
            +
            # Automatically select device and precision
         
     | 
| 369 | 
         
            +
            model = AutoModelForCausalLM.from_pretrained(""rinna/nekomata-14b"", device_map=""auto"", trust_remote_code=True)
         
     | 
| 370 | 
         
            +
             
     | 
| 371 | 
         
            +
            text = ""西田幾多郎は、""
         
     | 
| 372 | 
         
            +
            token_ids = tokenizer.encode(text, add_special_tokens=False, return_tensors=""pt"")
         
     | 
| 373 | 
         
            +
             
     | 
| 374 | 
         
            +
            with torch.no_grad():
         
     | 
| 375 | 
         
            +
                output_ids = model.generate(
         
     | 
| 376 | 
         
            +
                    token_ids.to(model.device),
         
     | 
| 377 | 
         
            +
                    max_new_tokens=200,
         
     | 
| 378 | 
         
            +
                    min_new_tokens=200,
         
     | 
| 379 | 
         
            +
                    do_sample=True,
         
     | 
| 380 | 
         
            +
                    temperature=1.0,
         
     | 
| 381 | 
         
            +
                    top_p=0.95,
         
     | 
| 382 | 
         
            +
                    pad_token_id=tokenizer.pad_token_id,
         
     | 
| 383 | 
         
            +
                    bos_token_id=tokenizer.bos_token_id,
         
     | 
| 384 | 
         
            +
                    eos_token_id=tokenizer.eos_token_id
         
     | 
| 385 | 
         
            +
                )
         
     | 
| 386 | 
         
            +
             
     | 
| 387 | 
         
            +
            output = tokenizer.decode(output_ids.tolist()[0])
         
     | 
| 388 | 
         
            +
            print(output)
         
     | 
| 389 | 
         
            +
            ~~~~
         
     | 
| 390 | 
         
            +
             
     | 
| 391 | 
         
            +
            ---
         
     | 
| 392 | 
         
            +
             
     | 
| 393 | 
         
            +
            # Tokenization
         
     | 
| 394 | 
         
            +
            The model uses the original Qwen tokenizer. It augments the [`cl100k` tiktoken tokenizer](https://github.com/openai/tiktoken) and has a vocabulary size of 151,936. The inclusive vocabulary helps the model to reach a better tokenization efficiency, especially for Japanese texts.
         
     | 
| 395 | 
         
            +
             
     | 
| 396 | 
         
            +
            We compared the `Qwen` tokenizer (as used in `nekomata`) and the `llama-2` tokenizer (as used in `youri`) on different text collections and found that the Qwen tokenizer achieves a much better byte2token rate (i.e. the average number of tokens produced from 1 byte of text) as following. A lower byte2token rate indicates a better tokenization efficiency.
         
     | 
| 397 | 
         
            +
             
     | 
| 398 | 
         
            +
             
     | 
| 399 | 
         
            +
            | Tokenizer | Japanese | English | Multilingual |
         
     | 
| 400 | 
         
            +
            | --- | --- | --- | --- |
         
     | 
| 401 | 
         
            +
            | Qwen | 0.24 | 0.27 | 0.27 |
         
     | 
| 402 | 
         
            +
            | llama-2 | 0.40 | 0.29 | 0.36 |
         
     | 
| 403 | 
         
            +
             
     | 
| 404 | 
         
            +
            ---
         
     | 
| 405 | 
         
            +
             
     | 
| 406 | 
         
            +
            # How to cite
         
     | 
| 407 | 
         
            +
            ```bibtex
         
     | 
| 408 | 
         
            +
            @misc{rinna-nekomata-14b,
         
     | 
| 409 | 
         
            +
                title = {rinna/nekomata-14b},
         
     | 
| 410 | 
         
            +
                author = {Zhao, Tianyu and Kaga, Akio and Sawada, Kei},
         
     | 
| 411 | 
         
            +
                url = {https://huggingface.co/rinna/nekomata-14b}
         
     | 
| 412 | 
         
            +
            }
         
     | 
| 413 | 
         
            +
             
     | 
| 414 | 
         
            +
            @inproceedings{sawada2024release,
         
     | 
| 415 | 
         
            +
                title = {Release of Pre-Trained Models for the {J}apanese Language},
         
     | 
| 416 | 
         
            +
                author = {Sawada, Kei and Zhao, Tianyu and Shing, Makoto and Mitsui, Kentaro and Kaga, Akio and Hono, Yukiya and Wakatsuki, Toshiaki and Mitsuda, Koh},
         
     | 
| 417 | 
         
            +
                booktitle = {Proceedings of the 2024 Joint International Conference on Computational Linguistics, Language Resources and Evaluation (LREC-COLING 2024)},
         
     | 
| 418 | 
         
            +
                month = {5},
         
     | 
| 419 | 
         
            +
                year = {2024},
         
     | 
| 420 | 
         
            +
                pages = {13898--13905},
         
     | 
| 421 | 
         
            +
                url = {https://aclanthology.org/2024.lrec-main.1213},
         
     | 
| 422 | 
         
            +
                note = {\url{https://arxiv.org/abs/2404.01657}}
         
     | 
| 423 | 
         
            +
            }
         
     | 
| 424 | 
         
            +
            ```
         
     | 
| 425 | 
         
            +
            ---
         
     | 
| 426 | 
         
            +
             
     | 
| 427 | 
         
            +
            # License
         
     | 
| 428 | 
         
            +
            [Tongyi Qianwen LICENSE AGREEMENT](https://github.com/QwenLM/Qwen/blob/main/Tongyi%20Qianwen%20LICENSE%20AGREEMENT)","{""id"": ""rinna/nekomata-14b"", ""author"": ""rinna"", ""sha"": ""c387cd32bff9f2ac6d3f20f4b96cc4dc2488e18d"", ""last_modified"": ""2025-03-23 11:30:23+00:00"", ""created_at"": ""2023-12-19 08:09:51+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 692, ""downloads_all_time"": null, ""likes"": 20, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""pytorch"", ""safetensors"", ""qwen"", ""text-generation"", ""custom_code"", ""ja"", ""en"", ""dataset:mc4"", ""dataset:wikipedia"", ""dataset:EleutherAI/pile"", ""dataset:oscar-corpus/colossal-oscar-1.0"", ""dataset:cc100"", ""arxiv:2309.16609"", ""arxiv:2404.01657"", ""base_model:Qwen/Qwen-14B"", ""base_model:finetune:Qwen/Qwen-14B"", ""license:other"", ""autotrain_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: Qwen/Qwen-14B\ndatasets:\n- mc4\n- wikipedia\n- EleutherAI/pile\n- oscar-corpus/colossal-oscar-1.0\n- cc100\nlanguage:\n- ja\n- en\nlicense: other\nlicense_name: tongyi-qianwen-license-agreement\nlicense_link: https://github.com/QwenLM/Qwen/blob/main/Tongyi%20Qianwen%20LICENSE%20AGREEMENT\ntags:\n- qwen\nthumbnail: https://github.com/rinnakk/japanese-pretrained-models/blob/master/rinna.png\ninference: false"", ""widget_data"": null, ""model_index"": null, ""config"": {""architectures"": [""QWenLMHeadModel""], ""auto_map"": {""AutoConfig"": ""configuration_qwen.QWenConfig"", ""AutoModelForCausalLM"": ""modeling_qwen.QWenLMHeadModel""}, ""model_type"": ""qwen"", ""tokenizer_config"": {""bos_token"": ""<|endoftext|>"", ""eos_token"": ""<|endoftext|>"", ""pad_token"": ""<|extra_204|>""}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": ""modeling_qwen.QWenLMHeadModel"", ""pipeline_tag"": ""text-generation"", ""processor"": null}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='LICENSE', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='NOTICE', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='cache_autogptq_cuda_256.cpp', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='cache_autogptq_cuda_kernel_256.cu', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='configuration_qwen.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='cpp_kernels.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00001-of-00003.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00002-of-00003.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00003-of-00003.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='modeling_qwen.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00001-of-00003.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00002-of-00003.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00003-of-00003.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model.bin.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='qwen.tiktoken', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='qwen_generation_utils.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='rinna.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenization_qwen.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": {""parameters"": {""BF16"": 14167290880}, ""total"": 14167290880}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-03-23 11:30:23+00:00"", ""cardData"": ""base_model: Qwen/Qwen-14B\ndatasets:\n- mc4\n- wikipedia\n- EleutherAI/pile\n- oscar-corpus/colossal-oscar-1.0\n- cc100\nlanguage:\n- ja\n- en\nlicense: other\nlicense_name: tongyi-qianwen-license-agreement\nlicense_link: https://github.com/QwenLM/Qwen/blob/main/Tongyi%20Qianwen%20LICENSE%20AGREEMENT\ntags:\n- qwen\nthumbnail: https://github.com/rinnakk/japanese-pretrained-models/blob/master/rinna.png\ninference: false"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": ""modeling_qwen.QWenLMHeadModel"", ""pipeline_tag"": ""text-generation"", ""processor"": null}, ""_id"": ""65814fcf4d5a928775653071"", ""modelId"": ""rinna/nekomata-14b"", ""usedStorage"": 56669308470}",1,https://huggingface.co/rinna/nekomata-14b-instruction,1,,0,"https://huggingface.co/rinna/nekomata-14b-gguf, https://huggingface.co/mradermacher/nekomata-14b-GGUF, https://huggingface.co/mradermacher/nekomata-14b-i1-GGUF",3,,0,huggingface/InferenceSupport/discussions/new?title=rinna/nekomata-14b&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Brinna%2Fnekomata-14b%5D(%2Frinna%2Fnekomata-14b)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
         
     | 
| 429 | 
         
            +
            rinna/nekomata-14b-instruction,"---
         
     | 
| 430 | 
         
            +
            thumbnail: https://github.com/rinnakk/japanese-pretrained-models/blob/master/rinna.png
         
     | 
| 431 | 
         
            +
            datasets:
         
     | 
| 432 | 
         
            +
            - databricks/databricks-dolly-15k
         
     | 
| 433 | 
         
            +
            - kunishou/databricks-dolly-15k-ja
         
     | 
| 434 | 
         
            +
            - izumi-lab/llm-japanese-dataset
         
     | 
| 435 | 
         
            +
            language:
         
     | 
| 436 | 
         
            +
            - ja
         
     | 
| 437 | 
         
            +
            - en
         
     | 
| 438 | 
         
            +
            tags:
         
     | 
| 439 | 
         
            +
            - qwen
         
     | 
| 440 | 
         
            +
            inference: false
         
     | 
| 441 | 
         
            +
            license: other
         
     | 
| 442 | 
         
            +
            license_name: tongyi-qianwen-license-agreement
         
     | 
| 443 | 
         
            +
            license_link: >-
         
     | 
| 444 | 
         
            +
              https://github.com/QwenLM/Qwen/blob/main/Tongyi%20Qianwen%20LICENSE%20AGREEMENT
         
     | 
| 445 | 
         
            +
            base_model: rinna/nekomata-14b
         
     | 
| 446 | 
         
            +
            ---
         
     | 
| 447 | 
         
            +
             
     | 
| 448 | 
         
            +
            # `rinna/nekomata-14b-instruction`
         
     | 
| 449 | 
         
            +
             
     | 
| 450 | 
         
            +
            
         
     | 
| 451 | 
         
            +
             
     | 
| 452 | 
         
            +
            # Overview
         
     | 
| 453 | 
         
            +
            The model is the instruction-tuned version of [`rinna/nekomata-14b`](https://huggingface.co/rinna/nekomata-14b). It adopts the Alpaca input format.
         
     | 
| 454 | 
         
            +
             
     | 
| 455 | 
         
            +
            * **Model architecture**
         
     | 
| 456 | 
         
            +
             
     | 
| 457 | 
         
            +
                A 40-layer, 5120-hidden-size transformer-based language model. Please refer to the [Qwen paper](https://arxiv.org/abs/2309.16609) for architecture details.
         
     | 
| 458 | 
         
            +
             
     | 
| 459 | 
         
            +
            * **Fine-tuning**
         
     | 
| 460 | 
         
            +
                
         
     | 
| 461 | 
         
            +
                The fine-tuning data is the subset of the following datasets.
         
     | 
| 462 | 
         
            +
                * [Databricks Dolly data](https://huggingface.co/datasets/databricks/databricks-dolly-15k)
         
     | 
| 463 | 
         
            +
                * [Japanese Databricks Dolly data](https://huggingface.co/datasets/kunishou/databricks-dolly-15k-ja)
         
     | 
| 464 | 
         
            +
                * [FLAN Instruction Tuning data](https://github.com/google-research/FLAN) and its Japanese translation
         
     | 
| 465 | 
         
            +
                * [Izumi lab LLM Japanese dataset](https://github.com/masanorihirano/llm-japanese-dataset/tree/main)
         
     | 
| 466 | 
         
            +
                  * The following sections are used
         
     | 
| 467 | 
         
            +
                    * alt
         
     | 
| 468 | 
         
            +
                    * aozora-txt
         
     | 
| 469 | 
         
            +
                    * CourseraParallel
         
     | 
| 470 | 
         
            +
                    * ParaNatCom
         
     | 
| 471 | 
         
            +
                    * Tab-delimited_Bilingual_Sentence_Pairs
         
     | 
| 472 | 
         
            +
                    * tanaka-corpus
         
     | 
| 473 | 
         
            +
                    * wikinews
         
     | 
| 474 | 
         
            +
                    * wordnet
         
     | 
| 475 | 
         
            +
                    * yasashi-japanese
         
     | 
| 476 | 
         
            +
                  * The [remaining sections](https://github.com/masanorihirano/llm-japanese-dataset/tree/main/datasets-cc-by-sa) contain commonly used evaluation corpora so they are skipped to prevent data leak.
         
     | 
| 477 | 
         
            +
             
     | 
| 478 | 
         
            +
            * **Contributors**
         
     | 
| 479 | 
         
            +
             
     | 
| 480 | 
         
            +
                - [Tianyu Zhao](https://huggingface.co/tianyuz)
         
     | 
| 481 | 
         
            +
                - [Kei Sawada](https://huggingface.co/keisawada)
         
     | 
| 482 | 
         
            +
                
         
     | 
| 483 | 
         
            +
            * **Release date**
         
     | 
| 484 | 
         
            +
             
     | 
| 485 | 
         
            +
                December 21, 2023
         
     | 
| 486 | 
         
            +
             
     | 
| 487 | 
         
            +
            ---
         
     | 
| 488 | 
         
            +
             
     | 
| 489 | 
         
            +
            # Benchmarking
         
     | 
| 490 | 
         
            +
            Please refer to [rinna's LM benchmark page (Sheet 20231221)](https://rinnakk.github.io/research/benchmarks/lm/index.html).
         
     | 
| 491 | 
         
            +
             
     | 
| 492 | 
         
            +
            ---
         
     | 
| 493 | 
         
            +
             
     | 
| 494 | 
         
            +
            # How to use the model
         
     | 
| 495 | 
         
            +
             
     | 
| 496 | 
         
            +
            ~~~~python
         
     | 
| 497 | 
         
            +
            import torch
         
     | 
| 498 | 
         
            +
            from transformers import AutoTokenizer, AutoModelForCausalLM
         
     | 
| 499 | 
         
            +
             
     | 
| 500 | 
         
            +
            tokenizer = AutoTokenizer.from_pretrained(""rinna/nekomata-14b-instruction"", trust_remote_code=True)
         
     | 
| 501 | 
         
            +
             
     | 
| 502 | 
         
            +
            # Use GPU with bf16
         
     | 
| 503 | 
         
            +
            # model = AutoModelForCausalLM.from_pretrained(""rinna/nekomata-14b-instruction"", device_map=""auto"", trust_remote_code=True, bf16=True)
         
     | 
| 504 | 
         
            +
             
     | 
| 505 | 
         
            +
            # Use GPU with fp16
         
     | 
| 506 | 
         
            +
            # model = AutoModelForCausalLM.from_pretrained(""rinna/nekomata-14b-instruction"", device_map=""auto"", trust_remote_code=True, fp16=True)
         
     | 
| 507 | 
         
            +
             
     | 
| 508 | 
         
            +
            # Use CPU
         
     | 
| 509 | 
         
            +
            # model = AutoModelForCausalLM.from_pretrained(""rinna/nekomata-14b-instruction"", device_map=""cpu"", trust_remote_code=True)
         
     | 
| 510 | 
         
            +
             
     | 
| 511 | 
         
            +
            # Automatically select device and precision
         
     | 
| 512 | 
         
            +
            model = AutoModelForCausalLM.from_pretrained(""rinna/nekomata-14b-instruction"", device_map=""auto"", trust_remote_code=True)
         
     | 
| 513 | 
         
            +
             
     | 
| 514 | 
         
            +
            instruction = ""次の日本語を英語に翻訳してください。""
         
     | 
| 515 | 
         
            +
            input = ""大規模言語モデル(だいきぼげんごモデル、英: large language model、LLM)は、多数のパラメータ(数千万から数十億)を持つ人工ニューラルネットワークで構成されるコンピュータ言語モデルで、膨大なラベルなしテキストを使用して自己教師あり学習または半教師あり学習によって訓練が行われる。""
         
     | 
| 516 | 
         
            +
            prompt = f""""""
         
     | 
| 517 | 
         
            +
            以下は、タスクを説明する指示と、文脈のある入力の組み合わせです。要求を適切に満たす応答を書きなさい。
         
     | 
| 518 | 
         
            +
             
     | 
| 519 | 
         
            +
            ### 指示:
         
     | 
| 520 | 
         
            +
            {instruction}
         
     | 
| 521 | 
         
            +
             
     | 
| 522 | 
         
            +
            ### 入力:
         
     | 
| 523 | 
         
            +
            {input}
         
     | 
| 524 | 
         
            +
             
     | 
| 525 | 
         
            +
            ### 応答:
         
     | 
| 526 | 
         
            +
            """"""
         
     | 
| 527 | 
         
            +
            token_ids = tokenizer.encode(prompt, add_special_tokens=False, return_tensors=""pt"")
         
     | 
| 528 | 
         
            +
             
     | 
| 529 | 
         
            +
            with torch.no_grad():
         
     | 
| 530 | 
         
            +
                output_ids = model.generate(
         
     | 
| 531 | 
         
            +
                    token_ids.to(model.device),
         
     | 
| 532 | 
         
            +
                    max_new_tokens=200,
         
     | 
| 533 | 
         
            +
                    do_sample=True,
         
     | 
| 534 | 
         
            +
                    temperature=0.5,
         
     | 
| 535 | 
         
            +
                    pad_token_id=tokenizer.pad_token_id,
         
     | 
| 536 | 
         
            +
                    bos_token_id=tokenizer.bos_token_id,
         
     | 
| 537 | 
         
            +
                    eos_token_id=tokenizer.eos_token_id
         
     | 
| 538 | 
         
            +
                )
         
     | 
| 539 | 
         
            +
             
     | 
| 540 | 
         
            +
            output = tokenizer.decode(output_ids.tolist()[0])
         
     | 
| 541 | 
         
            +
            print(output)
         
     | 
| 542 | 
         
            +
            """"""
         
     | 
| 543 | 
         
            +
            以下は、タスクを説明する指示と、文脈のある入力の組み合わせです。要求を適切に満たす応答を書きなさい。
         
     | 
| 544 | 
         
            +
             
     | 
| 545 | 
         
            +
            ### 指示:
         
     | 
| 546 | 
         
            +
            次の日本語を英語に翻訳してください。
         
     | 
| 547 | 
         
            +
             
     | 
| 548 | 
         
            +
            ### 入力:
         
     | 
| 549 | 
         
            +
            大規模言語モデル(だいきぼげんごモデル、英: large language model、LLM)は、多数のパラメータ(数千万から数十億)を持つ人工ニューラルネットワークで構成されるコンピュータ言語モデルで、膨大なラベルなしテキストを使 用して自己教師あり学習または半教師あり学習によって訓練が行われる。
         
     | 
| 550 | 
         
            +
             
     | 
| 551 | 
         
            +
            ### 応答:
         
     | 
| 552 | 
         
            +
             A large language model (LLM) is a computer language model composed of artificial neural networks with many parameters (from tens of millions to billions) trained by self-supervised learning or semi-supervised learning using a large amount of unlabeled text.<|endoftext|>
         
     | 
| 553 | 
         
            +
            """"""
         
     | 
| 554 | 
         
            +
            ~~~~
         
     | 
| 555 | 
         
            +
             
     | 
| 556 | 
         
            +
            ---
         
     | 
| 557 | 
         
            +
             
     | 
| 558 | 
         
            +
            # Tokenization
         
     | 
| 559 | 
         
            +
            Please refer to [`rinna/nekomata-14b`](https://huggingface.co/rinna/nekomata-14b) for tokenization details.
         
     | 
| 560 | 
         
            +
             
     | 
| 561 | 
         
            +
            ---
         
     | 
| 562 | 
         
            +
             
     | 
| 563 | 
         
            +
            # How to cite
         
     | 
| 564 | 
         
            +
            ```bibtex
         
     | 
| 565 | 
         
            +
            @misc{rinna-nekomata-14b-instruction,
         
     | 
| 566 | 
         
            +
                title = {rinna/nekomata-14b-instruction},
         
     | 
| 567 | 
         
            +
                author = {Zhao, Tianyu and Sawada, Kei},
         
     | 
| 568 | 
         
            +
                url = {https://huggingface.co/rinna/nekomata-14b-instruction}
         
     | 
| 569 | 
         
            +
            }
         
     | 
| 570 | 
         
            +
             
     | 
| 571 | 
         
            +
            @inproceedings{sawada2024release,
         
     | 
| 572 | 
         
            +
                title = {Release of Pre-Trained Models for the {J}apanese Language},
         
     | 
| 573 | 
         
            +
                author = {Sawada, Kei and Zhao, Tianyu and Shing, Makoto and Mitsui, Kentaro and Kaga, Akio and Hono, Yukiya and Wakatsuki, Toshiaki and Mitsuda, Koh},
         
     | 
| 574 | 
         
            +
                booktitle = {Proceedings of the 2024 Joint International Conference on Computational Linguistics, Language Resources and Evaluation (LREC-COLING 2024)},
         
     | 
| 575 | 
         
            +
                month = {5},
         
     | 
| 576 | 
         
            +
                year = {2024},
         
     | 
| 577 | 
         
            +
                pages = {13898--13905},
         
     | 
| 578 | 
         
            +
                url = {https://aclanthology.org/2024.lrec-main.1213},
         
     | 
| 579 | 
         
            +
                note = {\url{https://arxiv.org/abs/2404.01657}}
         
     | 
| 580 | 
         
            +
            }
         
     | 
| 581 | 
         
            +
            ```
         
     | 
| 582 | 
         
            +
            ---
         
     | 
| 583 | 
         
            +
             
     | 
| 584 | 
         
            +
            # License
         
     | 
| 585 | 
         
            +
            [Tongyi Qianwen LICENSE AGREEMENT](https://github.com/QwenLM/Qwen/blob/main/Tongyi%20Qianwen%20LICENSE%20AGREEMENT)","{""id"": ""rinna/nekomata-14b-instruction"", ""author"": ""rinna"", ""sha"": ""2bba2dd2aa0cf07c3deafeb76854cf53524dece0"", ""last_modified"": ""2025-03-23 11:29:49+00:00"", ""created_at"": ""2023-12-19 08:10:24+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 638, ""downloads_all_time"": null, ""likes"": 24, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""pytorch"", ""safetensors"", ""qwen"", ""text-generation"", ""custom_code"", ""ja"", ""en"", ""dataset:databricks/databricks-dolly-15k"", ""dataset:kunishou/databricks-dolly-15k-ja"", ""dataset:izumi-lab/llm-japanese-dataset"", ""arxiv:2309.16609"", ""arxiv:2404.01657"", ""base_model:rinna/nekomata-14b"", ""base_model:finetune:rinna/nekomata-14b"", ""license:other"", ""autotrain_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: rinna/nekomata-14b\ndatasets:\n- databricks/databricks-dolly-15k\n- kunishou/databricks-dolly-15k-ja\n- izumi-lab/llm-japanese-dataset\nlanguage:\n- ja\n- en\nlicense: other\nlicense_name: tongyi-qianwen-license-agreement\nlicense_link: https://github.com/QwenLM/Qwen/blob/main/Tongyi%20Qianwen%20LICENSE%20AGREEMENT\ntags:\n- qwen\nthumbnail: https://github.com/rinnakk/japanese-pretrained-models/blob/master/rinna.png\ninference: false"", ""widget_data"": null, ""model_index"": null, ""config"": {""architectures"": [""QWenLMHeadModel""], ""auto_map"": {""AutoConfig"": ""configuration_qwen.QWenConfig"", ""AutoModelForCausalLM"": ""modeling_qwen.QWenLMHeadModel""}, ""model_type"": ""qwen"", ""tokenizer_config"": {""bos_token"": ""<|endoftext|>"", ""eos_token"": ""<|endoftext|>"", ""pad_token"": ""<|extra_204|>""}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": ""modeling_qwen.QWenLMHeadModel"", ""pipeline_tag"": ""text-generation"", ""processor"": null}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='LICENSE', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='NOTICE', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='cache_autogptq_cuda_256.cpp', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='cache_autogptq_cuda_kernel_256.cu', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='configuration_qwen.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='cpp_kernels.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='modeling_qwen.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='qwen.tiktoken', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='qwen_generation_utils.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='rinna.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenization_qwen.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [""hayas/nekomata-14b-instruction""], ""safetensors"": {""parameters"": {""BF16"": 14167290880}, ""total"": 14167290880}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-03-23 11:29:49+00:00"", ""cardData"": ""base_model: rinna/nekomata-14b\ndatasets:\n- databricks/databricks-dolly-15k\n- kunishou/databricks-dolly-15k-ja\n- izumi-lab/llm-japanese-dataset\nlanguage:\n- ja\n- en\nlicense: other\nlicense_name: tongyi-qianwen-license-agreement\nlicense_link: https://github.com/QwenLM/Qwen/blob/main/Tongyi%20Qianwen%20LICENSE%20AGREEMENT\ntags:\n- qwen\nthumbnail: https://github.com/rinnakk/japanese-pretrained-models/blob/master/rinna.png\ninference: false"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": ""modeling_qwen.QWenLMHeadModel"", ""pipeline_tag"": ""text-generation"", ""processor"": null}, ""_id"": ""65814ff07c71acb646762be3"", ""modelId"": ""rinna/nekomata-14b-instruction"", ""usedStorage"": 56669241227}",2,,0,"https://huggingface.co/ayousanz/nekomata-14b-gozaru, https://huggingface.co/smorce/nekomata-14B-instruction-qlora-merge-gozaru",2,"https://huggingface.co/rinna/nekomata-14b-instruction-gguf, https://huggingface.co/mradermacher/nekomata-14b-instruction-GGUF, https://huggingface.co/mradermacher/nekomata-14b-instruction-i1-GGUF",3,,0,"hayas/nekomata-14b-instruction, huggingface/InferenceSupport/discussions/new?title=rinna/nekomata-14b-instruction&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Brinna%2Fnekomata-14b-instruction%5D(%2Frinna%2Fnekomata-14b-instruction)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A",2
         
     | 
| 586 | 
         
            +
            imdatta0/qwen_OpenAssistant_oasst_top1_2023-08-25,"---
         
     | 
| 587 | 
         
            +
            base_model: Qwen/Qwen-14B
         
     | 
| 588 | 
         
            +
            tags:
         
     | 
| 589 | 
         
            +
            - generated_from_trainer
         
     | 
| 590 | 
         
            +
            model-index:
         
     | 
| 591 | 
         
            +
            - name: OpenAssistant_oasst_top1_2023-08-25
         
     | 
| 592 | 
         
            +
              results: []
         
     | 
| 593 | 
         
            +
            ---
         
     | 
| 594 | 
         
            +
             
     | 
| 595 | 
         
            +
            <!-- This model card has been generated automatically according to the information the Trainer had access to. You
         
     | 
| 596 | 
         
            +
            should probably proofread and complete it, then remove this comment. -->
         
     | 
| 597 | 
         
            +
             
     | 
| 598 | 
         
            +
            # OpenAssistant_oasst_top1_2023-08-25
         
     | 
| 599 | 
         
            +
             
     | 
| 600 | 
         
            +
            This model is a fine-tuned version of [Qwen/Qwen-14B](https://huggingface.co/Qwen/Qwen-14B) on an unknown dataset.
         
     | 
| 601 | 
         
            +
            It achieves the following results on the evaluation set:
         
     | 
| 602 | 
         
            +
            - Loss: 1.6501
         
     | 
| 603 | 
         
            +
             
     | 
| 604 | 
         
            +
            ## Model description
         
     | 
| 605 | 
         
            +
             
     | 
| 606 | 
         
            +
            More information needed
         
     | 
| 607 | 
         
            +
             
     | 
| 608 | 
         
            +
            ## Intended uses & limitations
         
     | 
| 609 | 
         
            +
             
     | 
| 610 | 
         
            +
            More information needed
         
     | 
| 611 | 
         
            +
             
     | 
| 612 | 
         
            +
            ## Training and evaluation data
         
     | 
| 613 | 
         
            +
             
     | 
| 614 | 
         
            +
            More information needed
         
     | 
| 615 | 
         
            +
             
     | 
| 616 | 
         
            +
            ## Training procedure
         
     | 
| 617 | 
         
            +
             
     | 
| 618 | 
         
            +
            ### Training hyperparameters
         
     | 
| 619 | 
         
            +
             
     | 
| 620 | 
         
            +
            The following hyperparameters were used during training:
         
     | 
| 621 | 
         
            +
            - learning_rate: 0.0001
         
     | 
| 622 | 
         
            +
            - train_batch_size: 1
         
     | 
| 623 | 
         
            +
            - eval_batch_size: 1
         
     | 
| 624 | 
         
            +
            - seed: 42
         
     | 
| 625 | 
         
            +
            - gradient_accumulation_steps: 16
         
     | 
| 626 | 
         
            +
            - total_train_batch_size: 16
         
     | 
| 627 | 
         
            +
            - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
         
     | 
| 628 | 
         
            +
            - lr_scheduler_type: cosine
         
     | 
| 629 | 
         
            +
            - lr_scheduler_warmup_steps: 0.01
         
     | 
| 630 | 
         
            +
            - num_epochs: 1
         
     | 
| 631 | 
         
            +
             
     | 
| 632 | 
         
            +
            ### Training results
         
     | 
| 633 | 
         
            +
             
     | 
| 634 | 
         
            +
            | Training Loss | Epoch | Step | Validation Loss |
         
     | 
| 635 | 
         
            +
            |:-------------:|:-----:|:----:|:---------------:|
         
     | 
| 636 | 
         
            +
            | 2.163         | 0.02  | 16   | 1.9459          |
         
     | 
| 637 | 
         
            +
            | 1.9498        | 0.04  | 32   | 1.8467          |
         
     | 
| 638 | 
         
            +
            | 1.9578        | 0.06  | 48   | 1.7864          |
         
     | 
| 639 | 
         
            +
            | 1.8398        | 0.08  | 64   | 1.7530          |
         
     | 
| 640 | 
         
            +
            | 1.7696        | 0.1   | 80   | 1.7076          |
         
     | 
| 641 | 
         
            +
            | 1.7744        | 0.12  | 96   | 1.7275          |
         
     | 
| 642 | 
         
            +
            | 1.8108        | 0.14  | 112  | 1.6887          |
         
     | 
| 643 | 
         
            +
            | 1.7707        | 0.17  | 128  | 1.6942          |
         
     | 
| 644 | 
         
            +
            | 1.787         | 0.19  | 144  | 1.6894          |
         
     | 
| 645 | 
         
            +
            | 1.7029        | 0.21  | 160  | 1.6760          |
         
     | 
| 646 | 
         
            +
            | 1.6732        | 0.23  | 176  | 1.6838          |
         
     | 
| 647 | 
         
            +
            | 1.6313        | 0.25  | 192  | 1.6754          |
         
     | 
| 648 | 
         
            +
            | 1.7071        | 0.27  | 208  | 1.6752          |
         
     | 
| 649 | 
         
            +
            | 1.6781        | 0.29  | 224  | 1.6741          |
         
     | 
| 650 | 
         
            +
            | 1.7782        | 0.31  | 240  | 1.6698          |
         
     | 
| 651 | 
         
            +
            | 1.6836        | 0.33  | 256  | 1.6592          |
         
     | 
| 652 | 
         
            +
            | 1.7229        | 0.35  | 272  | 1.6633          |
         
     | 
| 653 | 
         
            +
            | 1.7196        | 0.37  | 288  | 1.6638          |
         
     | 
| 654 | 
         
            +
            | 1.6892        | 0.39  | 304  | 1.6627          |
         
     | 
| 655 | 
         
            +
            | 1.6844        | 0.41  | 320  | 1.6557          |
         
     | 
| 656 | 
         
            +
            | 1.8027        | 0.43  | 336  | 1.6540          |
         
     | 
| 657 | 
         
            +
            | 1.692         | 0.45  | 352  | 1.6577          |
         
     | 
| 658 | 
         
            +
            | 1.7088        | 0.47  | 368  | 1.6611          |
         
     | 
| 659 | 
         
            +
            | 1.7987        | 0.5   | 384  | 1.6557          |
         
     | 
| 660 | 
         
            +
            | 1.709         | 0.52  | 400  | 1.6600          |
         
     | 
| 661 | 
         
            +
            | 1.701         | 0.54  | 416  | 1.6588          |
         
     | 
| 662 | 
         
            +
            | 1.6784        | 0.56  | 432  | 1.6594          |
         
     | 
| 663 | 
         
            +
            | 1.6997        | 0.58  | 448  | 1.6484          |
         
     | 
| 664 | 
         
            +
            | 1.7811        | 0.6   | 464  | 1.6583          |
         
     | 
| 665 | 
         
            +
            | 1.7628        | 0.62  | 480  | 1.6461          |
         
     | 
| 666 | 
         
            +
            | 1.6254        | 0.64  | 496  | 1.6527          |
         
     | 
| 667 | 
         
            +
            | 1.6684        | 0.66  | 512  | 1.6520          |
         
     | 
| 668 | 
         
            +
            | 1.6837        | 0.68  | 528  | 1.6570          |
         
     | 
| 669 | 
         
            +
            | 1.7209        | 0.7   | 544  | 1.6543          |
         
     | 
| 670 | 
         
            +
            | 1.677         | 0.72  | 560  | 1.6562          |
         
     | 
| 671 | 
         
            +
            | 1.6819        | 0.74  | 576  | 1.6517          |
         
     | 
| 672 | 
         
            +
            | 1.7072        | 0.76  | 592  | 1.6551          |
         
     | 
| 673 | 
         
            +
            | 1.6446        | 0.78  | 608  | 1.6562          |
         
     | 
| 674 | 
         
            +
            | 1.6908        | 0.8   | 624  | 1.6528          |
         
     | 
| 675 | 
         
            +
            | 1.7209        | 0.83  | 640  | 1.6518          |
         
     | 
| 676 | 
         
            +
            | 1.6818        | 0.85  | 656  | 1.6517          |
         
     | 
| 677 | 
         
            +
            | 1.7007        | 0.87  | 672  | 1.6525          |
         
     | 
| 678 | 
         
            +
            | 1.8077        | 0.89  | 688  | 1.6522          |
         
     | 
| 679 | 
         
            +
            | 1.6856        | 0.91  | 704  | 1.6516          |
         
     | 
| 680 | 
         
            +
            | 1.7247        | 0.93  | 720  | 1.6509          |
         
     | 
| 681 | 
         
            +
            | 1.6645        | 0.95  | 736  | 1.6500          |
         
     | 
| 682 | 
         
            +
            | 1.6841        | 0.97  | 752  | 1.6499          |
         
     | 
| 683 | 
         
            +
            | 1.7244        | 0.99  | 768  | 1.6501          |
         
     | 
| 684 | 
         
            +
             
     | 
| 685 | 
         
            +
             
     | 
| 686 | 
         
            +
            ### Framework versions
         
     | 
| 687 | 
         
            +
             
     | 
| 688 | 
         
            +
            - Transformers 4.32.0
         
     | 
| 689 | 
         
            +
            - Pytorch 2.1.0
         
     | 
| 690 | 
         
            +
            - Datasets 2.14.7
         
     | 
| 691 | 
         
            +
            - Tokenizers 0.13.3
         
     | 
| 692 | 
         
            +
            ","{""id"": ""imdatta0/qwen_OpenAssistant_oasst_top1_2023-08-25"", ""author"": ""imdatta0"", ""sha"": ""550fe9f187844887c98d7ac3eefeef73857f69e3"", ""last_modified"": ""2023-11-16 06:32:49+00:00"", ""created_at"": ""2023-10-27 21:03:56+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": null, ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""generated_from_trainer"", ""base_model:Qwen/Qwen-14B"", ""base_model:finetune:Qwen/Qwen-14B"", ""region:us""], ""pipeline_tag"": null, ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: Qwen/Qwen-14B\ntags:\n- generated_from_trainer\nmodel-index:\n- name: OpenAssistant_oasst_top1_2023-08-25\n  results: []"", ""widget_data"": null, ""model_index"": [{""name"": ""OpenAssistant_oasst_top1_2023-08-25"", ""results"": []}], ""config"": {""tokenizer_config"": {}}, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='adapter_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='adapter_model.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='qwen.tiktoken', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='training_args.bin', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2023-11-16 06:32:49+00:00"", ""cardData"": ""base_model: Qwen/Qwen-14B\ntags:\n- generated_from_trainer\nmodel-index:\n- name: OpenAssistant_oasst_top1_2023-08-25\n  results: []"", ""transformersInfo"": null, ""_id"": ""653c25bc9c7bf38da272b65e"", ""modelId"": ""imdatta0/qwen_OpenAssistant_oasst_top1_2023-08-25"", ""usedStorage"": 699473314}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=imdatta0/qwen_OpenAssistant_oasst_top1_2023-08-25&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bimdatta0%2Fqwen_OpenAssistant_oasst_top1_2023-08-25%5D(%2Fimdatta0%2Fqwen_OpenAssistant_oasst_top1_2023-08-25)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
         
     | 
| 693 | 
         
            +
            imdatta0/qwen_nampdn-ai_tiny-textbooks,"---
         
     | 
| 694 | 
         
            +
            base_model: Qwen/Qwen-14B
         
     | 
| 695 | 
         
            +
            tags:
         
     | 
| 696 | 
         
            +
            - generated_from_trainer
         
     | 
| 697 | 
         
            +
            model-index:
         
     | 
| 698 | 
         
            +
            - name: nampdn-ai_tiny-textbooks
         
     | 
| 699 | 
         
            +
              results: []
         
     | 
| 700 | 
         
            +
            ---
         
     | 
| 701 | 
         
            +
             
     | 
| 702 | 
         
            +
            <!-- This model card has been generated automatically according to the information the Trainer had access to. You
         
     | 
| 703 | 
         
            +
            should probably proofread and complete it, then remove this comment. -->
         
     | 
| 704 | 
         
            +
             
     | 
| 705 | 
         
            +
            # nampdn-ai_tiny-textbooks
         
     | 
| 706 | 
         
            +
             
     | 
| 707 | 
         
            +
            This model is a fine-tuned version of [Qwen/Qwen-14B](https://huggingface.co/Qwen/Qwen-14B) on an unknown dataset.
         
     | 
| 708 | 
         
            +
            It achieves the following results on the evaluation set:
         
     | 
| 709 | 
         
            +
            - Loss: 2.3572
         
     | 
| 710 | 
         
            +
             
     | 
| 711 | 
         
            +
            ## Model description
         
     | 
| 712 | 
         
            +
             
     | 
| 713 | 
         
            +
            More information needed
         
     | 
| 714 | 
         
            +
             
     | 
| 715 | 
         
            +
            ## Intended uses & limitations
         
     | 
| 716 | 
         
            +
             
     | 
| 717 | 
         
            +
            More information needed
         
     | 
| 718 | 
         
            +
             
     | 
| 719 | 
         
            +
            ## Training and evaluation data
         
     | 
| 720 | 
         
            +
             
     | 
| 721 | 
         
            +
            More information needed
         
     | 
| 722 | 
         
            +
             
     | 
| 723 | 
         
            +
            ## Training procedure
         
     | 
| 724 | 
         
            +
             
     | 
| 725 | 
         
            +
            ### Training hyperparameters
         
     | 
| 726 | 
         
            +
             
     | 
| 727 | 
         
            +
            The following hyperparameters were used during training:
         
     | 
| 728 | 
         
            +
            - learning_rate: 1e-05
         
     | 
| 729 | 
         
            +
            - train_batch_size: 1
         
     | 
| 730 | 
         
            +
            - eval_batch_size: 1
         
     | 
| 731 | 
         
            +
            - seed: 42
         
     | 
| 732 | 
         
            +
            - gradient_accumulation_steps: 8
         
     | 
| 733 | 
         
            +
            - total_train_batch_size: 8
         
     | 
| 734 | 
         
            +
            - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
         
     | 
| 735 | 
         
            +
            - lr_scheduler_type: cosine
         
     | 
| 736 | 
         
            +
            - lr_scheduler_warmup_steps: 0.01
         
     | 
| 737 | 
         
            +
            - num_epochs: 1
         
     | 
| 738 | 
         
            +
             
     | 
| 739 | 
         
            +
            ### Training results
         
     | 
| 740 | 
         
            +
             
     | 
| 741 | 
         
            +
            | Training Loss | Epoch | Step  | Validation Loss |
         
     | 
| 742 | 
         
            +
            |:-------------:|:-----:|:-----:|:---------------:|
         
     | 
| 743 | 
         
            +
            | 2.4651        | 0.02  | 200   | 2.3996          |
         
     | 
| 744 | 
         
            +
            | 2.4335        | 0.04  | 400   | 2.3799          |
         
     | 
| 745 | 
         
            +
            | 2.3848        | 0.06  | 600   | 2.3746          |
         
     | 
| 746 | 
         
            +
            | 2.4037        | 0.08  | 800   | 2.3714          |
         
     | 
| 747 | 
         
            +
            | 2.3985        | 0.1   | 1000  | 2.3693          |
         
     | 
| 748 | 
         
            +
            | 2.4072        | 0.12  | 1200  | 2.3673          |
         
     | 
| 749 | 
         
            +
            | 2.4028        | 0.14  | 1400  | 2.3665          |
         
     | 
| 750 | 
         
            +
            | 2.3748        | 0.16  | 1600  | 2.3643          |
         
     | 
| 751 | 
         
            +
            | 2.4119        | 0.18  | 1800  | 2.3635          |
         
     | 
| 752 | 
         
            +
            | 2.4002        | 0.2   | 2000  | 2.3640          |
         
     | 
| 753 | 
         
            +
            | 2.3865        | 0.22  | 2200  | 2.3635          |
         
     | 
| 754 | 
         
            +
            | 2.4           | 0.24  | 2400  | 2.3628          |
         
     | 
| 755 | 
         
            +
            | 2.4096        | 0.26  | 2600  | 2.3625          |
         
     | 
| 756 | 
         
            +
            | 2.3976        | 0.28  | 2800  | 2.3614          |
         
     | 
| 757 | 
         
            +
            | 2.3767        | 0.3   | 3000  | 2.3618          |
         
     | 
| 758 | 
         
            +
            | 2.4151        | 0.32  | 3200  | 2.3616          |
         
     | 
| 759 | 
         
            +
            | 2.3835        | 0.34  | 3400  | 2.3605          |
         
     | 
| 760 | 
         
            +
            | 2.3995        | 0.36  | 3600  | 2.3608          |
         
     | 
| 761 | 
         
            +
            | 2.4121        | 0.38  | 3800  | 2.3602          |
         
     | 
| 762 | 
         
            +
            | 2.4262        | 0.4   | 4000  | 2.3591          |
         
     | 
| 763 | 
         
            +
            | 2.3604        | 0.42  | 4200  | 2.3594          |
         
     | 
| 764 | 
         
            +
            | 2.3954        | 0.44  | 4400  | 2.3594          |
         
     | 
| 765 | 
         
            +
            | 2.3743        | 0.46  | 4600  | 2.3587          |
         
     | 
| 766 | 
         
            +
            | 2.4069        | 0.48  | 4800  | 2.3591          |
         
     | 
| 767 | 
         
            +
            | 2.4103        | 0.5   | 5000  | 2.3585          |
         
     | 
| 768 | 
         
            +
            | 2.4133        | 0.52  | 5200  | 2.3585          |
         
     | 
| 769 | 
         
            +
            | 2.4229        | 0.54  | 5400  | 2.3578          |
         
     | 
| 770 | 
         
            +
            | 2.4397        | 0.56  | 5600  | 2.3581          |
         
     | 
| 771 | 
         
            +
            | 2.4237        | 0.58  | 5800  | 2.3581          |
         
     | 
| 772 | 
         
            +
            | 2.4109        | 0.6   | 6000  | 2.3577          |
         
     | 
| 773 | 
         
            +
            | 2.43          | 0.62  | 6200  | 2.3575          |
         
     | 
| 774 | 
         
            +
            | 2.3999        | 0.64  | 6400  | 2.3572          |
         
     | 
| 775 | 
         
            +
            | 2.3771        | 0.66  | 6600  | 2.3577          |
         
     | 
| 776 | 
         
            +
            | 2.4119        | 0.68  | 6800  | 2.3576          |
         
     | 
| 777 | 
         
            +
            | 2.3877        | 0.7   | 7000  | 2.3576          |
         
     | 
| 778 | 
         
            +
            | 2.411         | 0.72  | 7200  | 2.3569          |
         
     | 
| 779 | 
         
            +
            | 2.3808        | 0.74  | 7400  | 2.3570          |
         
     | 
| 780 | 
         
            +
            | 2.3989        | 0.76  | 7600  | 2.3571          |
         
     | 
| 781 | 
         
            +
            | 2.422         | 0.78  | 7800  | 2.3569          |
         
     | 
| 782 | 
         
            +
            | 2.3768        | 0.8   | 8000  | 2.3569          |
         
     | 
| 783 | 
         
            +
            | 2.3988        | 0.82  | 8200  | 2.3572          |
         
     | 
| 784 | 
         
            +
            | 2.3927        | 0.84  | 8400  | 2.3572          |
         
     | 
| 785 | 
         
            +
            | 2.3961        | 0.86  | 8600  | 2.3573          |
         
     | 
| 786 | 
         
            +
            | 2.4021        | 0.88  | 8800  | 2.3570          |
         
     | 
| 787 | 
         
            +
            | 2.3889        | 0.9   | 9000  | 2.3570          |
         
     | 
| 788 | 
         
            +
            | 2.404         | 0.92  | 9200  | 2.3570          |
         
     | 
| 789 | 
         
            +
            | 2.3982        | 0.94  | 9400  | 2.3572          |
         
     | 
| 790 | 
         
            +
            | 2.4018        | 0.96  | 9600  | 2.3573          |
         
     | 
| 791 | 
         
            +
            | 2.3717        | 0.98  | 9800  | 2.3572          |
         
     | 
| 792 | 
         
            +
            | 2.4076        | 1.0   | 10000 | 2.3572          |
         
     | 
| 793 | 
         
            +
             
     | 
| 794 | 
         
            +
             
     | 
| 795 | 
         
            +
            ### Framework versions
         
     | 
| 796 | 
         
            +
             
     | 
| 797 | 
         
            +
            - Transformers 4.34.1
         
     | 
| 798 | 
         
            +
            - Pytorch 2.1.0
         
     | 
| 799 | 
         
            +
            - Datasets 2.14.5
         
     | 
| 800 | 
         
            +
            - Tokenizers 0.14.1
         
     | 
| 801 | 
         
            +
            ","{""id"": ""imdatta0/qwen_nampdn-ai_tiny-textbooks"", ""author"": ""imdatta0"", ""sha"": ""f667fcf66dd1f6987e748622d594932feca4adc2"", ""last_modified"": ""2023-11-15 18:38:39+00:00"", ""created_at"": ""2023-10-28 05:43:47+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 1, ""library_name"": null, ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""generated_from_trainer"", ""base_model:Qwen/Qwen-14B"", ""base_model:finetune:Qwen/Qwen-14B"", ""region:us""], ""pipeline_tag"": null, ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: Qwen/Qwen-14B\ntags:\n- generated_from_trainer\nmodel-index:\n- name: nampdn-ai_tiny-textbooks\n  results: []"", ""widget_data"": null, ""model_index"": [{""name"": ""nampdn-ai_tiny-textbooks"", ""results"": []}], ""config"": {""tokenizer_config"": {""eos_token"": ""<|endoftext|>"", ""pad_token"": ""<|endoftext|>""}}, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='adapter_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='adapter_model.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='qwen.tiktoken', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='training_args.bin', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2023-11-15 18:38:39+00:00"", ""cardData"": ""base_model: Qwen/Qwen-14B\ntags:\n- generated_from_trainer\nmodel-index:\n- name: nampdn-ai_tiny-textbooks\n  results: []"", ""transformersInfo"": null, ""_id"": ""653c9f936bcb9f518a3f86d8"", ""modelId"": ""imdatta0/qwen_nampdn-ai_tiny-textbooks"", ""usedStorage"": 11403823368}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=imdatta0/qwen_nampdn-ai_tiny-textbooks&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bimdatta0%2Fqwen_nampdn-ai_tiny-textbooks%5D(%2Fimdatta0%2Fqwen_nampdn-ai_tiny-textbooks)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
         
     | 
| 802 | 
         
            +
            imdatta0/qwen_databricks_databricks-dolly-15k,"---
         
     | 
| 803 | 
         
            +
            base_model: Qwen/Qwen-14B
         
     | 
| 804 | 
         
            +
            tags:
         
     | 
| 805 | 
         
            +
            - generated_from_trainer
         
     | 
| 806 | 
         
            +
            model-index:
         
     | 
| 807 | 
         
            +
            - name: final_databricks_databricks-dolly-15k
         
     | 
| 808 | 
         
            +
              results: []
         
     | 
| 809 | 
         
            +
            ---
         
     | 
| 810 | 
         
            +
             
     | 
| 811 | 
         
            +
            <!-- This model card has been generated automatically according to the information the Trainer had access to. You
         
     | 
| 812 | 
         
            +
            should probably proofread and complete it, then remove this comment. -->
         
     | 
| 813 | 
         
            +
             
     | 
| 814 | 
         
            +
            # final_databricks_databricks-dolly-15k
         
     | 
| 815 | 
         
            +
             
     | 
| 816 | 
         
            +
            This model is a fine-tuned version of [Qwen/Qwen-14B](https://huggingface.co/Qwen/Qwen-14B) on an unknown dataset.
         
     | 
| 817 | 
         
            +
            It achieves the following results on the evaluation set:
         
     | 
| 818 | 
         
            +
            - Loss: 1.6083
         
     | 
| 819 | 
         
            +
             
     | 
| 820 | 
         
            +
            ## Model description
         
     | 
| 821 | 
         
            +
             
     | 
| 822 | 
         
            +
            More information needed
         
     | 
| 823 | 
         
            +
             
     | 
| 824 | 
         
            +
            ## Intended uses & limitations
         
     | 
| 825 | 
         
            +
             
     | 
| 826 | 
         
            +
            More information needed
         
     | 
| 827 | 
         
            +
             
     | 
| 828 | 
         
            +
            ## Training and evaluation data
         
     | 
| 829 | 
         
            +
             
     | 
| 830 | 
         
            +
            More information needed
         
     | 
| 831 | 
         
            +
             
     | 
| 832 | 
         
            +
            ## Training procedure
         
     | 
| 833 | 
         
            +
             
     | 
| 834 | 
         
            +
            ### Training hyperparameters
         
     | 
| 835 | 
         
            +
             
     | 
| 836 | 
         
            +
            The following hyperparameters were used during training:
         
     | 
| 837 | 
         
            +
            - learning_rate: 0.0003
         
     | 
| 838 | 
         
            +
            - train_batch_size: 2
         
     | 
| 839 | 
         
            +
            - eval_batch_size: 2
         
     | 
| 840 | 
         
            +
            - seed: 42
         
     | 
| 841 | 
         
            +
            - gradient_accumulation_steps: 132
         
     | 
| 842 | 
         
            +
            - total_train_batch_size: 264
         
     | 
| 843 | 
         
            +
            - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
         
     | 
| 844 | 
         
            +
            - lr_scheduler_type: cosine
         
     | 
| 845 | 
         
            +
            - lr_scheduler_warmup_steps: 0.01
         
     | 
| 846 | 
         
            +
            - num_epochs: 1
         
     | 
| 847 | 
         
            +
             
     | 
| 848 | 
         
            +
            ### Training results
         
     | 
| 849 | 
         
            +
             
     | 
| 850 | 
         
            +
            | Training Loss | Epoch | Step | Validation Loss |
         
     | 
| 851 | 
         
            +
            |:-------------:|:-----:|:----:|:---------------:|
         
     | 
| 852 | 
         
            +
            | 1.694         | 0.04  | 2    | 1.8016          |
         
     | 
| 853 | 
         
            +
            | 1.6398        | 0.07  | 4    | 1.7369          |
         
     | 
| 854 | 
         
            +
            | 1.6421        | 0.11  | 6    | 1.6886          |
         
     | 
| 855 | 
         
            +
            | 1.579         | 0.15  | 8    | 1.6596          |
         
     | 
| 856 | 
         
            +
            | 1.5589        | 0.18  | 10   | 1.6420          |
         
     | 
| 857 | 
         
            +
            | 1.5944        | 0.22  | 12   | 1.6305          |
         
     | 
| 858 | 
         
            +
            | 1.5314        | 0.26  | 14   | 1.6274          |
         
     | 
| 859 | 
         
            +
            | 1.5841        | 0.29  | 16   | 1.6238          |
         
     | 
| 860 | 
         
            +
            | 1.5945        | 0.33  | 18   | 1.6229          |
         
     | 
| 861 | 
         
            +
            | 1.5755        | 0.37  | 20   | 1.6234          |
         
     | 
| 862 | 
         
            +
            | 1.5527        | 0.4   | 22   | 1.6231          |
         
     | 
| 863 | 
         
            +
            | 1.6121        | 0.44  | 24   | 1.6224          |
         
     | 
| 864 | 
         
            +
            | 1.586         | 0.48  | 26   | 1.6219          |
         
     | 
| 865 | 
         
            +
            | 1.5995        | 0.52  | 28   | 1.6213          |
         
     | 
| 866 | 
         
            +
            | 1.5942        | 0.55  | 30   | 1.6200          |
         
     | 
| 867 | 
         
            +
            | 1.5738        | 0.59  | 32   | 1.6180          |
         
     | 
| 868 | 
         
            +
            | 1.5825        | 0.63  | 34   | 1.6161          |
         
     | 
| 869 | 
         
            +
            | 1.5183        | 0.66  | 36   | 1.6137          |
         
     | 
| 870 | 
         
            +
            | 1.5964        | 0.7   | 38   | 1.6120          |
         
     | 
| 871 | 
         
            +
            | 1.623         | 0.74  | 40   | 1.6105          |
         
     | 
| 872 | 
         
            +
            | 1.5783        | 0.77  | 42   | 1.6098          |
         
     | 
| 873 | 
         
            +
            | 1.6046        | 0.81  | 44   | 1.6093          |
         
     | 
| 874 | 
         
            +
            | 1.5157        | 0.85  | 46   | 1.6088          |
         
     | 
| 875 | 
         
            +
            | 1.5317        | 0.88  | 48   | 1.6086          |
         
     | 
| 876 | 
         
            +
            | 1.5578        | 0.92  | 50   | 1.6086          |
         
     | 
| 877 | 
         
            +
            | 1.5402        | 0.96  | 52   | 1.6084          |
         
     | 
| 878 | 
         
            +
            | 1.5616        | 0.99  | 54   | 1.6083          |
         
     | 
| 879 | 
         
            +
             
     | 
| 880 | 
         
            +
             
     | 
| 881 | 
         
            +
            ### Framework versions
         
     | 
| 882 | 
         
            +
             
     | 
| 883 | 
         
            +
            - Transformers 4.32.0
         
     | 
| 884 | 
         
            +
            - Pytorch 2.1.0
         
     | 
| 885 | 
         
            +
            - Datasets 2.14.7
         
     | 
| 886 | 
         
            +
            - Tokenizers 0.13.3
         
     | 
| 887 | 
         
            +
            ","{""id"": ""imdatta0/qwen_databricks_databricks-dolly-15k"", ""author"": ""imdatta0"", ""sha"": ""6b13b031ea0f7fb196309d37edac63d0cf889d92"", ""last_modified"": ""2023-11-17 18:10:52+00:00"", ""created_at"": ""2023-11-16 21:30:20+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": null, ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""generated_from_trainer"", ""base_model:Qwen/Qwen-14B"", ""base_model:finetune:Qwen/Qwen-14B"", ""region:us""], ""pipeline_tag"": null, ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: Qwen/Qwen-14B\ntags:\n- generated_from_trainer\nmodel-index:\n- name: final_databricks_databricks-dolly-15k\n  results: []"", ""widget_data"": null, ""model_index"": [{""name"": ""final_databricks_databricks-dolly-15k"", ""results"": []}], ""config"": {""tokenizer_config"": {}}, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='adapter_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='adapter_model.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='qwen.tiktoken', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='training_args.bin', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2023-11-17 18:10:52+00:00"", ""cardData"": ""base_model: Qwen/Qwen-14B\ntags:\n- generated_from_trainer\nmodel-index:\n- name: final_databricks_databricks-dolly-15k\n  results: []"", ""transformersInfo"": null, ""_id"": ""655689ec0e7a7067a92767c6"", ""modelId"": ""imdatta0/qwen_databricks_databricks-dolly-15k"", ""usedStorage"": 920024074}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=imdatta0/qwen_databricks_databricks-dolly-15k&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bimdatta0%2Fqwen_databricks_databricks-dolly-15k%5D(%2Fimdatta0%2Fqwen_databricks_databricks-dolly-15k)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
         
     | 
| 888 | 
         
            +
            imdatta0/qwen_cnn_dailymail,"---
         
     | 
| 889 | 
         
            +
            base_model: Qwen/Qwen-14B
         
     | 
| 890 | 
         
            +
            tags:
         
     | 
| 891 | 
         
            +
            - generated_from_trainer
         
     | 
| 892 | 
         
            +
            datasets:
         
     | 
| 893 | 
         
            +
            - cnn_dailymail
         
     | 
| 894 | 
         
            +
            model-index:
         
     | 
| 895 | 
         
            +
            - name: final_cnn_dailymail
         
     | 
| 896 | 
         
            +
              results: []
         
     | 
| 897 | 
         
            +
            ---
         
     | 
| 898 | 
         
            +
             
     | 
| 899 | 
         
            +
            <!-- This model card has been generated automatically according to the information the Trainer had access to. You
         
     | 
| 900 | 
         
            +
            should probably proofread and complete it, then remove this comment. -->
         
     | 
| 901 | 
         
            +
             
     | 
| 902 | 
         
            +
            # final_cnn_dailymail
         
     | 
| 903 | 
         
            +
             
     | 
| 904 | 
         
            +
            This model is a fine-tuned version of [Qwen/Qwen-14B](https://huggingface.co/Qwen/Qwen-14B) on the cnn_dailymail dataset.
         
     | 
| 905 | 
         
            +
            It achieves the following results on the evaluation set:
         
     | 
| 906 | 
         
            +
            - Loss: 2.2127
         
     | 
| 907 | 
         
            +
             
     | 
| 908 | 
         
            +
            ## Model description
         
     | 
| 909 | 
         
            +
             
     | 
| 910 | 
         
            +
            More information needed
         
     | 
| 911 | 
         
            +
             
     | 
| 912 | 
         
            +
            ## Intended uses & limitations
         
     | 
| 913 | 
         
            +
             
     | 
| 914 | 
         
            +
            More information needed
         
     | 
| 915 | 
         
            +
             
     | 
| 916 | 
         
            +
            ## Training and evaluation data
         
     | 
| 917 | 
         
            +
             
     | 
| 918 | 
         
            +
            More information needed
         
     | 
| 919 | 
         
            +
             
     | 
| 920 | 
         
            +
            ## Training procedure
         
     | 
| 921 | 
         
            +
             
     | 
| 922 | 
         
            +
            ### Training hyperparameters
         
     | 
| 923 | 
         
            +
             
     | 
| 924 | 
         
            +
            The following hyperparameters were used during training:
         
     | 
| 925 | 
         
            +
            - learning_rate: 1e-05
         
     | 
| 926 | 
         
            +
            - train_batch_size: 1
         
     | 
| 927 | 
         
            +
            - eval_batch_size: 1
         
     | 
| 928 | 
         
            +
            - seed: 42
         
     | 
| 929 | 
         
            +
            - gradient_accumulation_steps: 16
         
     | 
| 930 | 
         
            +
            - total_train_batch_size: 16
         
     | 
| 931 | 
         
            +
            - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
         
     | 
| 932 | 
         
            +
            - lr_scheduler_type: cosine
         
     | 
| 933 | 
         
            +
            - lr_scheduler_warmup_steps: 0.01
         
     | 
| 934 | 
         
            +
            - num_epochs: 1
         
     | 
| 935 | 
         
            +
             
     | 
| 936 | 
         
            +
            ### Training results
         
     | 
| 937 | 
         
            +
             
     | 
| 938 | 
         
            +
            | Training Loss | Epoch | Step | Validation Loss |
         
     | 
| 939 | 
         
            +
            |:-------------:|:-----:|:----:|:---------------:|
         
     | 
| 940 | 
         
            +
            | 1.9757        | 0.02  | 100  | 1.9261          |
         
     | 
| 941 | 
         
            +
            | 1.9258        | 0.04  | 200  | 1.8833          |
         
     | 
| 942 | 
         
            +
            | 1.8977        | 0.06  | 300  | 1.8657          |
         
     | 
| 943 | 
         
            +
            | 1.8903        | 0.08  | 400  | 1.8630          |
         
     | 
| 944 | 
         
            +
            | 1.8858        | 0.1   | 500  | 1.8638          |
         
     | 
| 945 | 
         
            +
            | 1.89          | 0.12  | 600  | 1.8636          |
         
     | 
| 946 | 
         
            +
            | 1.873         | 0.14  | 700  | 1.8637          |
         
     | 
| 947 | 
         
            +
            | 1.8908        | 0.16  | 800  | 1.8637          |
         
     | 
| 948 | 
         
            +
            | 1.8791        | 0.18  | 900  | 1.8626          |
         
     | 
| 949 | 
         
            +
            | 1.8851        | 0.2   | 1000 | 1.8634          |
         
     | 
| 950 | 
         
            +
            | 1.89          | 0.22  | 1100 | 1.8651          |
         
     | 
| 951 | 
         
            +
            | 1.8889        | 0.24  | 1200 | 1.8681          |
         
     | 
| 952 | 
         
            +
            | 1.8896        | 0.26  | 1300 | 1.8708          |
         
     | 
| 953 | 
         
            +
            | 1.8817        | 0.28  | 1400 | 1.8739          |
         
     | 
| 954 | 
         
            +
            | 1.9003        | 0.3   | 1500 | 1.8791          |
         
     | 
| 955 | 
         
            +
            | 1.9005        | 0.32  | 1600 | 1.8825          |
         
     | 
| 956 | 
         
            +
            | 1.9024        | 0.34  | 1700 | 1.8864          |
         
     | 
| 957 | 
         
            +
            | 1.9204        | 0.36  | 1800 | 1.8929          |
         
     | 
| 958 | 
         
            +
            | 1.9182        | 0.38  | 1900 | 1.8955          |
         
     | 
| 959 | 
         
            +
            | 1.9289        | 0.4   | 2000 | 1.9035          |
         
     | 
| 960 | 
         
            +
            | 1.9348        | 0.42  | 2100 | 1.9157          |
         
     | 
| 961 | 
         
            +
            | 1.9453        | 0.44  | 2200 | 1.9277          |
         
     | 
| 962 | 
         
            +
            | 1.9689        | 0.46  | 2300 | 1.9457          |
         
     | 
| 963 | 
         
            +
            | 1.9829        | 0.48  | 2400 | 1.9596          |
         
     | 
| 964 | 
         
            +
            | 1.9874        | 0.5   | 2500 | 1.9803          |
         
     | 
| 965 | 
         
            +
            | 2.0148        | 0.52  | 2600 | 1.9991          |
         
     | 
| 966 | 
         
            +
            | 2.0391        | 0.54  | 2700 | 2.0249          |
         
     | 
| 967 | 
         
            +
            | 2.0619        | 0.56  | 2800 | 2.0477          |
         
     | 
| 968 | 
         
            +
            | 2.0736        | 0.58  | 2900 | 2.0678          |
         
     | 
| 969 | 
         
            +
            | 2.0957        | 0.6   | 3000 | 2.0825          |
         
     | 
| 970 | 
         
            +
            | 2.1223        | 0.62  | 3100 | 2.1097          |
         
     | 
| 971 | 
         
            +
            | 2.1357        | 0.64  | 3200 | 2.1164          |
         
     | 
| 972 | 
         
            +
            | 2.1759        | 0.66  | 3300 | 2.1524          |
         
     | 
| 973 | 
         
            +
            | 2.168         | 0.68  | 3400 | 2.1650          |
         
     | 
| 974 | 
         
            +
            | 2.1842        | 0.7   | 3500 | 2.1637          |
         
     | 
| 975 | 
         
            +
            | 2.1956        | 0.72  | 3600 | 2.1775          |
         
     | 
| 976 | 
         
            +
            | 2.2131        | 0.74  | 3700 | 2.1888          |
         
     | 
| 977 | 
         
            +
            | 2.198         | 0.76  | 3800 | 2.1953          |
         
     | 
| 978 | 
         
            +
            | 2.2231        | 0.78  | 3900 | 2.1994          |
         
     | 
| 979 | 
         
            +
            | 2.2292        | 0.8   | 4000 | 2.2080          |
         
     | 
| 980 | 
         
            +
            | 2.2343        | 0.82  | 4100 | 2.2093          |
         
     | 
| 981 | 
         
            +
            | 2.2261        | 0.84  | 4200 | 2.2009          |
         
     | 
| 982 | 
         
            +
            | 2.2104        | 0.86  | 4300 | 2.2015          |
         
     | 
| 983 | 
         
            +
            | 2.2255        | 0.88  | 4400 | 2.2077          |
         
     | 
| 984 | 
         
            +
            | 2.2299        | 0.9   | 4500 | 2.2099          |
         
     | 
| 985 | 
         
            +
            | 2.2253        | 0.92  | 4600 | 2.2100          |
         
     | 
| 986 | 
         
            +
            | 2.2239        | 0.94  | 4700 | 2.2116          |
         
     | 
| 987 | 
         
            +
            | 2.2322        | 0.96  | 4800 | 2.2122          |
         
     | 
| 988 | 
         
            +
            | 2.2457        | 0.98  | 4900 | 2.2127          |
         
     | 
| 989 | 
         
            +
            | 2.2325        | 1.0   | 5000 | 2.2127          |
         
     | 
| 990 | 
         
            +
             
     | 
| 991 | 
         
            +
             
     | 
| 992 | 
         
            +
            ### Framework versions
         
     | 
| 993 | 
         
            +
             
     | 
| 994 | 
         
            +
            - Transformers 4.32.0
         
     | 
| 995 | 
         
            +
            - Pytorch 2.1.0
         
     | 
| 996 | 
         
            +
            - Datasets 2.14.7
         
     | 
| 997 | 
         
            +
            - Tokenizers 0.13.3
         
     | 
| 998 | 
         
            +
            ","{""id"": ""imdatta0/qwen_cnn_dailymail"", ""author"": ""imdatta0"", ""sha"": ""ca172638b3f5cc92594b450b2713ff619471c81f"", ""last_modified"": ""2023-11-17 14:26:15+00:00"", ""created_at"": ""2023-11-16 22:35:58+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": null, ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""generated_from_trainer"", ""dataset:cnn_dailymail"", ""base_model:Qwen/Qwen-14B"", ""base_model:finetune:Qwen/Qwen-14B"", ""region:us""], ""pipeline_tag"": null, ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: Qwen/Qwen-14B\ndatasets:\n- cnn_dailymail\ntags:\n- generated_from_trainer\nmodel-index:\n- name: final_cnn_dailymail\n  results: []"", ""widget_data"": null, ""model_index"": [{""name"": ""final_cnn_dailymail"", ""results"": []}], ""config"": {""tokenizer_config"": {}}, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='adapter_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='adapter_model.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='qwen.tiktoken', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='training_args.bin', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2023-11-17 14:26:15+00:00"", ""cardData"": ""base_model: Qwen/Qwen-14B\ndatasets:\n- cnn_dailymail\ntags:\n- generated_from_trainer\nmodel-index:\n- name: final_cnn_dailymail\n  results: []"", ""transformersInfo"": null, ""_id"": ""6556994e38754a6b38f143cb"", ""modelId"": ""imdatta0/qwen_cnn_dailymail"", ""usedStorage"": 1396473164}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=imdatta0/qwen_cnn_dailymail&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bimdatta0%2Fqwen_cnn_dailymail%5D(%2Fimdatta0%2Fqwen_cnn_dailymail)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
         
     | 
| 999 | 
         
            +
            imdatta0/qwen_jeopardy,"---
         
     | 
| 1000 | 
         
            +
            base_model: Qwen/Qwen-14B
         
     | 
| 1001 | 
         
            +
            tags:
         
     | 
| 1002 | 
         
            +
            - generated_from_trainer
         
     | 
| 1003 | 
         
            +
            datasets:
         
     | 
| 1004 | 
         
            +
            - jeopardy
         
     | 
| 1005 | 
         
            +
            model-index:
         
     | 
| 1006 | 
         
            +
            - name: final_jeopardy
         
     | 
| 1007 | 
         
            +
              results: []
         
     | 
| 1008 | 
         
            +
            ---
         
     | 
| 1009 | 
         
            +
             
     | 
| 1010 | 
         
            +
            <!-- This model card has been generated automatically according to the information the Trainer had access to. You
         
     | 
| 1011 | 
         
            +
            should probably proofread and complete it, then remove this comment. -->
         
     | 
| 1012 | 
         
            +
             
     | 
| 1013 | 
         
            +
            # final_jeopardy
         
     | 
| 1014 | 
         
            +
             
     | 
| 1015 | 
         
            +
            This model is a fine-tuned version of [Qwen/Qwen-14B](https://huggingface.co/Qwen/Qwen-14B) on the jeopardy dataset.
         
     | 
| 1016 | 
         
            +
            It achieves the following results on the evaluation set:
         
     | 
| 1017 | 
         
            +
            - Loss: 2.3619
         
     | 
| 1018 | 
         
            +
             
     | 
| 1019 | 
         
            +
            ## Model description
         
     | 
| 1020 | 
         
            +
             
     | 
| 1021 | 
         
            +
            More information needed
         
     | 
| 1022 | 
         
            +
             
     | 
| 1023 | 
         
            +
            ## Intended uses & limitations
         
     | 
| 1024 | 
         
            +
             
     | 
| 1025 | 
         
            +
            More information needed
         
     | 
| 1026 | 
         
            +
             
     | 
| 1027 | 
         
            +
            ## Training and evaluation data
         
     | 
| 1028 | 
         
            +
             
     | 
| 1029 | 
         
            +
            More information needed
         
     | 
| 1030 | 
         
            +
             
     | 
| 1031 | 
         
            +
            ## Training procedure
         
     | 
| 1032 | 
         
            +
             
     | 
| 1033 | 
         
            +
            ### Training hyperparameters
         
     | 
| 1034 | 
         
            +
             
     | 
| 1035 | 
         
            +
            The following hyperparameters were used during training:
         
     | 
| 1036 | 
         
            +
            - learning_rate: 0.0001
         
     | 
| 1037 | 
         
            +
            - train_batch_size: 2
         
     | 
| 1038 | 
         
            +
            - eval_batch_size: 2
         
     | 
| 1039 | 
         
            +
            - seed: 42
         
     | 
| 1040 | 
         
            +
            - gradient_accumulation_steps: 8
         
     | 
| 1041 | 
         
            +
            - total_train_batch_size: 16
         
     | 
| 1042 | 
         
            +
            - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
         
     | 
| 1043 | 
         
            +
            - lr_scheduler_type: cosine
         
     | 
| 1044 | 
         
            +
            - lr_scheduler_warmup_steps: 0.01
         
     | 
| 1045 | 
         
            +
            - num_epochs: 1
         
     | 
| 1046 | 
         
            +
             
     | 
| 1047 | 
         
            +
            ### Training results
         
     | 
| 1048 | 
         
            +
             
     | 
| 1049 | 
         
            +
            | Training Loss | Epoch | Step | Validation Loss |
         
     | 
| 1050 | 
         
            +
            |:-------------:|:-----:|:----:|:---------------:|
         
     | 
| 1051 | 
         
            +
            | 3.0584        | 0.02  | 100  | 2.6536          |
         
     | 
| 1052 | 
         
            +
            | 2.6474        | 0.04  | 200  | 2.5669          |
         
     | 
| 1053 | 
         
            +
            | 2.5729        | 0.06  | 300  | 2.5225          |
         
     | 
| 1054 | 
         
            +
            | 2.5364        | 0.08  | 400  | 2.5054          |
         
     | 
| 1055 | 
         
            +
            | 2.4918        | 0.1   | 500  | 2.4876          |
         
     | 
| 1056 | 
         
            +
            | 2.502         | 0.12  | 600  | 2.4734          |
         
     | 
| 1057 | 
         
            +
            | 2.4993        | 0.14  | 700  | 2.4651          |
         
     | 
| 1058 | 
         
            +
            | 2.4982        | 0.16  | 800  | 2.4514          |
         
     | 
| 1059 | 
         
            +
            | 2.4676        | 0.18  | 900  | 2.4419          |
         
     | 
| 1060 | 
         
            +
            | 2.4414        | 0.2   | 1000 | 2.4396          |
         
     | 
| 1061 | 
         
            +
            | 2.4656        | 0.22  | 1100 | 2.4292          |
         
     | 
| 1062 | 
         
            +
            | 2.4795        | 0.24  | 1200 | 2.4250          |
         
     | 
| 1063 | 
         
            +
            | 2.4341        | 0.26  | 1300 | 2.4228          |
         
     | 
| 1064 | 
         
            +
            | 2.4276        | 0.28  | 1400 | 2.4157          |
         
     | 
| 1065 | 
         
            +
            | 2.4297        | 0.3   | 1500 | 2.4105          |
         
     | 
| 1066 | 
         
            +
            | 2.4617        | 0.32  | 1600 | 2.4084          |
         
     | 
| 1067 | 
         
            +
            | 2.4431        | 0.34  | 1700 | 2.4016          |
         
     | 
| 1068 | 
         
            +
            | 2.4037        | 0.36  | 1800 | 2.4002          |
         
     | 
| 1069 | 
         
            +
            | 2.4289        | 0.38  | 1900 | 2.3984          |
         
     | 
| 1070 | 
         
            +
            | 2.4351        | 0.4   | 2000 | 2.3922          |
         
     | 
| 1071 | 
         
            +
            | 2.3931        | 0.42  | 2100 | 2.3920          |
         
     | 
| 1072 | 
         
            +
            | 2.4253        | 0.44  | 2200 | 2.3892          |
         
     | 
| 1073 | 
         
            +
            | 2.4507        | 0.46  | 2300 | 2.3856          |
         
     | 
| 1074 | 
         
            +
            | 2.4063        | 0.48  | 2400 | 2.3846          |
         
     | 
| 1075 | 
         
            +
            | 2.4253        | 0.5   | 2500 | 2.3825          |
         
     | 
| 1076 | 
         
            +
            | 2.3948        | 0.52  | 2600 | 2.3778          |
         
     | 
| 1077 | 
         
            +
            | 2.3839        | 0.54  | 2700 | 2.3781          |
         
     | 
| 1078 | 
         
            +
            | 2.4304        | 0.56  | 2800 | 2.3799          |
         
     | 
| 1079 | 
         
            +
            | 2.4458        | 0.58  | 2900 | 2.3723          |
         
     | 
| 1080 | 
         
            +
            | 2.4051        | 0.6   | 3000 | 2.3733          |
         
     | 
| 1081 | 
         
            +
            | 2.3984        | 0.62  | 3100 | 2.3713          |
         
     | 
| 1082 | 
         
            +
            | 2.3886        | 0.64  | 3200 | 2.3702          |
         
     | 
| 1083 | 
         
            +
            | 2.3625        | 0.66  | 3300 | 2.3717          |
         
     | 
| 1084 | 
         
            +
            | 2.3745        | 0.68  | 3400 | 2.3676          |
         
     | 
| 1085 | 
         
            +
            | 2.4168        | 0.7   | 3500 | 2.3665          |
         
     | 
| 1086 | 
         
            +
            | 2.3761        | 0.72  | 3600 | 2.3669          |
         
     | 
| 1087 | 
         
            +
            | 2.379         | 0.74  | 3700 | 2.3662          |
         
     | 
| 1088 | 
         
            +
            | 2.3801        | 0.76  | 3800 | 2.3642          |
         
     | 
| 1089 | 
         
            +
            | 2.3817        | 0.78  | 3900 | 2.3640          |
         
     | 
| 1090 | 
         
            +
            | 2.4002        | 0.8   | 4000 | 2.3645          |
         
     | 
| 1091 | 
         
            +
            | 2.3989        | 0.82  | 4100 | 2.3635          |
         
     | 
| 1092 | 
         
            +
            | 2.3916        | 0.84  | 4200 | 2.3629          |
         
     | 
| 1093 | 
         
            +
            | 2.4045        | 0.86  | 4300 | 2.3624          |
         
     | 
| 1094 | 
         
            +
            | 2.3919        | 0.88  | 4400 | 2.3626          |
         
     | 
| 1095 | 
         
            +
            | 2.3943        | 0.9   | 4500 | 2.3626          |
         
     | 
| 1096 | 
         
            +
            | 2.3896        | 0.92  | 4600 | 2.3616          |
         
     | 
| 1097 | 
         
            +
            | 2.3518        | 0.94  | 4700 | 2.3621          |
         
     | 
| 1098 | 
         
            +
            | 2.41          | 0.96  | 4800 | 2.3616          |
         
     | 
| 1099 | 
         
            +
            | 2.3782        | 0.98  | 4900 | 2.3621          |
         
     | 
| 1100 | 
         
            +
            | 2.3589        | 1.0   | 5000 | 2.3619          |
         
     | 
| 1101 | 
         
            +
             
     | 
| 1102 | 
         
            +
             
     | 
| 1103 | 
         
            +
            ### Framework versions
         
     | 
| 1104 | 
         
            +
             
     | 
| 1105 | 
         
            +
            - Transformers 4.32.0
         
     | 
| 1106 | 
         
            +
            - Pytorch 2.1.0
         
     | 
| 1107 | 
         
            +
            - Datasets 2.14.7
         
     | 
| 1108 | 
         
            +
            - Tokenizers 0.13.3
         
     | 
| 1109 | 
         
            +
            ","{""id"": ""imdatta0/qwen_jeopardy"", ""author"": ""imdatta0"", ""sha"": ""c5103d6520a72bcfee28e99ac7dbf618dcdf3e6b"", ""last_modified"": ""2023-11-19 19:02:54+00:00"", ""created_at"": ""2023-11-17 20:00:35+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": null, ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""generated_from_trainer"", ""dataset:jeopardy"", ""base_model:Qwen/Qwen-14B"", ""base_model:finetune:Qwen/Qwen-14B"", ""region:us""], ""pipeline_tag"": null, ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: Qwen/Qwen-14B\ndatasets:\n- jeopardy\ntags:\n- generated_from_trainer\nmodel-index:\n- name: final_jeopardy\n  results: []"", ""widget_data"": null, ""model_index"": [{""name"": ""final_jeopardy"", ""results"": []}], ""config"": {""tokenizer_config"": {}}, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='adapter_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='adapter_model.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='qwen.tiktoken', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='training_args.bin', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2023-11-19 19:02:54+00:00"", ""cardData"": ""base_model: Qwen/Qwen-14B\ndatasets:\n- jeopardy\ntags:\n- generated_from_trainer\nmodel-index:\n- name: final_jeopardy\n  results: []"", ""transformersInfo"": null, ""_id"": ""6557c663ab0644b5313bc990"", ""modelId"": ""imdatta0/qwen_jeopardy"", ""usedStorage"": 809530622}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=imdatta0/qwen_jeopardy&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bimdatta0%2Fqwen_jeopardy%5D(%2Fimdatta0%2Fqwen_jeopardy)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
         
     | 
    	
        ReaderLM-v2_finetunes_20250425_143346.csv_finetunes_20250425_143346.csv
    ADDED
    
    | 
         @@ -0,0 +1,305 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
         
     | 
| 2 | 
         
            +
            jinaai/ReaderLM-v2,"---
         
     | 
| 3 | 
         
            +
            pipeline_tag: text-generation
         
     | 
| 4 | 
         
            +
            language:
         
     | 
| 5 | 
         
            +
            - multilingual
         
     | 
| 6 | 
         
            +
            inference: false
         
     | 
| 7 | 
         
            +
            license: cc-by-nc-4.0
         
     | 
| 8 | 
         
            +
            library_name: transformers
         
     | 
| 9 | 
         
            +
            ---
         
     | 
| 10 | 
         
            +
             
     | 
| 11 | 
         
            +
            <br><br>
         
     | 
| 12 | 
         
            +
             
     | 
| 13 | 
         
            +
            <p align=""center"">
         
     | 
| 14 | 
         
            +
            <img src=""https://huggingface.co/datasets/jinaai/documentation-images/resolve/main/logo.webp"" alt=""Jina AI: Your Search Foundation, Supercharged!"" width=""150px"">
         
     | 
| 15 | 
         
            +
            </p>
         
     | 
| 16 | 
         
            +
             
     | 
| 17 | 
         
            +
            <p align=""center"">
         
     | 
| 18 | 
         
            +
            <b>Trained by <a href=""https://jina.ai/""><b>Jina AI</b></a>.</b>
         
     | 
| 19 | 
         
            +
            </p>
         
     | 
| 20 | 
         
            +
             
     | 
| 21 | 
         
            +
            [Blog](https://jina.ai/news/readerlm-v2-frontier-small-language-model-for-html-to-markdown-and-json) | [API](https://jina.ai/reader) | [Colab](https://colab.research.google.com/drive/1FfPjZwkMSocOLsEYH45B3B4NxDryKLGI?usp=sharing) | [AWS](https://aws.amazon.com/marketplace/pp/prodview-jwfct4j4rvxk2?sr=0-21&ref_=beagle&applicationId=AWSMPContessa) | [Azure](https://azuremarketplace.microsoft.com/en-us/marketplace/apps/jinaai.reader-lm-v2-vm)| [Arxiv](https://arxiv.org/abs/2503.01151)
         
     | 
| 22 | 
         
            +
             
     | 
| 23 | 
         
            +
            # ReaderLM-v2
         
     | 
| 24 | 
         
            +
             
     | 
| 25 | 
         
            +
            `ReaderLM-v2` is a 1.5B parameter language model that converts raw HTML into beautifully formatted markdown or JSON with superior accuracy and improved longer context handling. Supporting multiple languages (29 in total), `ReaderLM-v2` is specialized for tasks involving HTML parsing, transformation, and text extraction.
         
     | 
| 26 | 
         
            +
             
     | 
| 27 | 
         
            +
            ## What's New in `ReaderLM-v2`
         
     | 
| 28 | 
         
            +
             
     | 
| 29 | 
         
            +
            `ReaderLM-v2` represents a significant leap forward from its predecessor, with several key improvements:
         
     | 
| 30 | 
         
            +
             
     | 
| 31 | 
         
            +
            - **Better Markdown Generation**: Thanks to its new training paradigm and higher-quality training data, the model excels at generating complex elements like code fences, nested lists, tables, and LaTeX equations.
         
     | 
| 32 | 
         
            +
            - **JSON Output**: Introduces direct HTML-to-JSON generation using predefined schemas, eliminating the need for intermediate markdown conversion.
         
     | 
| 33 | 
         
            +
            - **Longer Context Handling**: Handles up to 512K tokens combined input and output length, with improved performance on long-form content.
         
     | 
| 34 | 
         
            +
            - **Multilingual Support**: Comprehensive support across 29 languages for broader applications.
         
     | 
| 35 | 
         
            +
            - **Enhanced Stability**: Greatly alleviates degeneration issues after generating long sequences through contrastive loss during training.
         
     | 
| 36 | 
         
            +
             
     | 
| 37 | 
         
            +
            ## Model Overview
         
     | 
| 38 | 
         
            +
             
     | 
| 39 | 
         
            +
            - **Model Type**: Autoregressive, decoder-only transformer
         
     | 
| 40 | 
         
            +
            - **Parameter Count**: 1.54B
         
     | 
| 41 | 
         
            +
            - **Context Window**: Up to 512K tokens (combined input and output)
         
     | 
| 42 | 
         
            +
            - **Hidden Size**: 1536
         
     | 
| 43 | 
         
            +
            - **Number of Layers**: 28
         
     | 
| 44 | 
         
            +
            - **Query Heads**: 12
         
     | 
| 45 | 
         
            +
            - **KV Heads**: 2
         
     | 
| 46 | 
         
            +
            - **Head Size**: 128
         
     | 
| 47 | 
         
            +
            - **Intermediate Size**: 8960
         
     | 
| 48 | 
         
            +
            - **Supported Languages**: English, Chinese, Japanese, Korean, French, Spanish, Portuguese, German, Italian, Russian, Vietnamese, Thai, Arabic, and more (29 total)
         
     | 
| 49 | 
         
            +
             
     | 
| 50 | 
         
            +
            ---
         
     | 
| 51 | 
         
            +
             
     | 
| 52 | 
         
            +
            # Usage
         
     | 
| 53 | 
         
            +
             
     | 
| 54 | 
         
            +
            Below, you will find instructions and examples for using `ReaderLM-v2` locally using the Hugging Face Transformers library.
         
     | 
| 55 | 
         
            +
            For a more hands-on experience in a hosted environment, see the [Google Colab Notebook](https://colab.research.google.com/drive/1FfPjZwkMSocOLsEYH45B3B4NxDryKLGI?usp=sharing).
         
     | 
| 56 | 
         
            +
             
     | 
| 57 | 
         
            +
            ## Via Reader API
         
     | 
| 58 | 
         
            +
             
     | 
| 59 | 
         
            +
            `ReaderLM-v2` is now fully integrated with [Reader API](https://jina.ai/reader/). To use it, simply specify `x-engine: readerlm-v2` in your request headers and enable response streaming with `-H 'Accept: text/event-stream'`:
         
     | 
| 60 | 
         
            +
             
     | 
| 61 | 
         
            +
            ```bash
         
     | 
| 62 | 
         
            +
            curl https://r.jina.ai/https://news.ycombinator.com/ -H 'x-engine: readerlm-v2' -H 'Accept: text/event-stream'
         
     | 
| 63 | 
         
            +
            ```
         
     | 
| 64 | 
         
            +
             
     | 
| 65 | 
         
            +
            You can try it without an API key at a lower rate limit. For higher rate limits, you can purchase an API key. Please note that ReaderLM-v2 requests consume 3x the normal token count from your API key allocation. This is currently an experimental feature, and we're working with the GCP team to improve GPU efficiency.
         
     | 
| 66 | 
         
            +
             
     | 
| 67 | 
         
            +
            ## On Google Colab
         
     | 
| 68 | 
         
            +
             
     | 
| 69 | 
         
            +
            You can try `ReaderLM-v2` via our [Colab notebook](https://colab.research.google.com/drive/1FfPjZwkMSocOLsEYH45B3B4NxDryKLGI?usp=sharing), which demonstrates HTML-to-markdown conversion, JSON extraction, and instruction-following using the HackerNews frontpage as an example. The notebook is optimized for Colab's free T4 GPU tier and requires `vllm` and `triton` for acceleration and running.
         
     | 
| 70 | 
         
            +
             
     | 
| 71 | 
         
            +
            Note that the free T4 GPU has limitations—it doesn't support bfloat16 or flash attention 2, leading to higher memory usage and slower processing of longer inputs. Nevertheless, ReaderLM-v2 successfully processes large documents under these constraints, achieving processing speeds of 67 tokens/s input and 36 tokens/s output. For production use, we recommend an RTX 3090/4090 for optimal performance.
         
     | 
| 72 | 
         
            +
             
     | 
| 73 | 
         
            +
            ## Local Usage
         
     | 
| 74 | 
         
            +
             
     | 
| 75 | 
         
            +
            To use `ReaderLM-v2` locally:
         
     | 
| 76 | 
         
            +
             
     | 
| 77 | 
         
            +
            1. Install the necessary dependencies:
         
     | 
| 78 | 
         
            +
             
     | 
| 79 | 
         
            +
               ```bash
         
     | 
| 80 | 
         
            +
               pip install transformers
         
     | 
| 81 | 
         
            +
               ```
         
     | 
| 82 | 
         
            +
             
     | 
| 83 | 
         
            +
            2. Load and run the model:
         
     | 
| 84 | 
         
            +
             
     | 
| 85 | 
         
            +
               ```python
         
     | 
| 86 | 
         
            +
               from transformers import AutoModelForCausalLM, AutoTokenizer
         
     | 
| 87 | 
         
            +
             
     | 
| 88 | 
         
            +
               device = ""cuda""  # or ""cpu""
         
     | 
| 89 | 
         
            +
               tokenizer = AutoTokenizer.from_pretrained(""jinaai/ReaderLM-v2"")
         
     | 
| 90 | 
         
            +
               model = AutoModelForCausalLM.from_pretrained(""jinaai/ReaderLM-v2"").to(device)
         
     | 
| 91 | 
         
            +
               ```
         
     | 
| 92 | 
         
            +
             
     | 
| 93 | 
         
            +
            3. (Optional) Pre-clean your HTML to remove scripts, styles, comments, to reduce the noise and length of the input:
         
     | 
| 94 | 
         
            +
             
     | 
| 95 | 
         
            +
               ```python
         
     | 
| 96 | 
         
            +
               import re
         
     | 
| 97 | 
         
            +
             
     | 
| 98 | 
         
            +
               # Patterns
         
     | 
| 99 | 
         
            +
               SCRIPT_PATTERN = r""<[ ]*script.*?\/[ ]*script[ ]*>""
         
     | 
| 100 | 
         
            +
               STYLE_PATTERN = r""<[ ]*style.*?\/[ ]*style[ ]*>""
         
     | 
| 101 | 
         
            +
               META_PATTERN = r""<[ ]*meta.*?>""
         
     | 
| 102 | 
         
            +
               COMMENT_PATTERN = r""<[ ]*!--.*?--[ ]*>""
         
     | 
| 103 | 
         
            +
               LINK_PATTERN = r""<[ ]*link.*?>""
         
     | 
| 104 | 
         
            +
               BASE64_IMG_PATTERN = r'<img[^>]+src=""data:image/[^;]+;base64,[^""]+""[^>]*>'
         
     | 
| 105 | 
         
            +
               SVG_PATTERN = r""(<svg[^>]*>)(.*?)(<\/svg>)""
         
     | 
| 106 | 
         
            +
             
     | 
| 107 | 
         
            +
             
     | 
| 108 | 
         
            +
               def replace_svg(html: str, new_content: str = ""this is a placeholder"") -> str:
         
     | 
| 109 | 
         
            +
                   return re.sub(
         
     | 
| 110 | 
         
            +
                       SVG_PATTERN,
         
     | 
| 111 | 
         
            +
                       lambda match: f""{match.group(1)}{new_content}{match.group(3)}"",
         
     | 
| 112 | 
         
            +
                       html,
         
     | 
| 113 | 
         
            +
                       flags=re.DOTALL,
         
     | 
| 114 | 
         
            +
                   )
         
     | 
| 115 | 
         
            +
             
     | 
| 116 | 
         
            +
             
     | 
| 117 | 
         
            +
               def replace_base64_images(html: str, new_image_src: str = ""#"") -> str:
         
     | 
| 118 | 
         
            +
                   return re.sub(BASE64_IMG_PATTERN, f'<img src=""{new_image_src}""/>', html)
         
     | 
| 119 | 
         
            +
             
     | 
| 120 | 
         
            +
             
     | 
| 121 | 
         
            +
               def clean_html(html: str, clean_svg: bool = False, clean_base64: bool = False):
         
     | 
| 122 | 
         
            +
                   html = re.sub(
         
     | 
| 123 | 
         
            +
                       SCRIPT_PATTERN, """", html, flags=re.IGNORECASE | re.MULTILINE | re.DOTALL
         
     | 
| 124 | 
         
            +
                   )
         
     | 
| 125 | 
         
            +
                   html = re.sub(
         
     | 
| 126 | 
         
            +
                       STYLE_PATTERN, """", html, flags=re.IGNORECASE | re.MULTILINE | re.DOTALL
         
     | 
| 127 | 
         
            +
                   )
         
     | 
| 128 | 
         
            +
                   html = re.sub(
         
     | 
| 129 | 
         
            +
                       META_PATTERN, """", html, flags=re.IGNORECASE | re.MULTILINE | re.DOTALL
         
     | 
| 130 | 
         
            +
                   )
         
     | 
| 131 | 
         
            +
                   html = re.sub(
         
     | 
| 132 | 
         
            +
                       COMMENT_PATTERN, """", html, flags=re.IGNORECASE | re.MULTILINE | re.DOTALL
         
     | 
| 133 | 
         
            +
                   )
         
     | 
| 134 | 
         
            +
                   html = re.sub(
         
     | 
| 135 | 
         
            +
                       LINK_PATTERN, """", html, flags=re.IGNORECASE | re.MULTILINE | re.DOTALL
         
     | 
| 136 | 
         
            +
                   )
         
     | 
| 137 | 
         
            +
             
     | 
| 138 | 
         
            +
                   if clean_svg:
         
     | 
| 139 | 
         
            +
                       html = replace_svg(html)
         
     | 
| 140 | 
         
            +
                   if clean_base64:
         
     | 
| 141 | 
         
            +
                       html = replace_base64_images(html)
         
     | 
| 142 | 
         
            +
                   return html
         
     | 
| 143 | 
         
            +
               ```
         
     | 
| 144 | 
         
            +
             
     | 
| 145 | 
         
            +
            4. Create a prompt for the model:
         
     | 
| 146 | 
         
            +
             
     | 
| 147 | 
         
            +
               ```python
         
     | 
| 148 | 
         
            +
               def create_prompt(
         
     | 
| 149 | 
         
            +
                   text: str, tokenizer=None, instruction: str = None, schema: str = None
         
     | 
| 150 | 
         
            +
               ) -> str:
         
     | 
| 151 | 
         
            +
                   """"""
         
     | 
| 152 | 
         
            +
                   Create a prompt for the model with optional instruction and JSON schema.
         
     | 
| 153 | 
         
            +
                   """"""
         
     | 
| 154 | 
         
            +
                   if not instruction:
         
     | 
| 155 | 
         
            +
                       instruction = ""Extract the main content from the given HTML and convert it to Markdown format.""
         
     | 
| 156 | 
         
            +
                   if schema:
         
     | 
| 157 | 
         
            +
                       instruction = ""Extract the specified information from a list of news threads and present it in a structured JSON format.""
         
     | 
| 158 | 
         
            +
                       prompt = f""{instruction}\n```html\n{text}\n```\nThe JSON schema is as follows:```json\n{schema}\n```""
         
     | 
| 159 | 
         
            +
                   else:
         
     | 
| 160 | 
         
            +
                       prompt = f""{instruction}\n```html\n{text}\n```""
         
     | 
| 161 | 
         
            +
             
     | 
| 162 | 
         
            +
                   messages = [
         
     | 
| 163 | 
         
            +
                       {
         
     | 
| 164 | 
         
            +
                           ""role"": ""user"",
         
     | 
| 165 | 
         
            +
                           ""content"": prompt,
         
     | 
| 166 | 
         
            +
                       }
         
     | 
| 167 | 
         
            +
                   ]
         
     | 
| 168 | 
         
            +
             
     | 
| 169 | 
         
            +
                   return tokenizer.apply_chat_template(
         
     | 
| 170 | 
         
            +
                       messages, tokenize=False, add_generation_prompt=True
         
     | 
| 171 | 
         
            +
                   )
         
     | 
| 172 | 
         
            +
               ```
         
     | 
| 173 | 
         
            +
             
     | 
| 174 | 
         
            +
            ### HTML to Markdown Example
         
     | 
| 175 | 
         
            +
             
     | 
| 176 | 
         
            +
            ```python
         
     | 
| 177 | 
         
            +
            html = ""<html><body><h1>Hello, world!</h1></body></html>""
         
     | 
| 178 | 
         
            +
             
     | 
| 179 | 
         
            +
            html = clean_html(html)
         
     | 
| 180 | 
         
            +
             
     | 
| 181 | 
         
            +
            input_prompt = create_prompt(html, tokenizer=tokenizer)
         
     | 
| 182 | 
         
            +
            inputs = tokenizer.encode(input_prompt, return_tensors=""pt"").to(device)
         
     | 
| 183 | 
         
            +
            outputs = model.generate(
         
     | 
| 184 | 
         
            +
                inputs, max_new_tokens=1024, temperature=0, do_sample=False, repetition_penalty=1.08
         
     | 
| 185 | 
         
            +
            )
         
     | 
| 186 | 
         
            +
             
     | 
| 187 | 
         
            +
            print(tokenizer.decode(outputs[0]))
         
     | 
| 188 | 
         
            +
            ```
         
     | 
| 189 | 
         
            +
             
     | 
| 190 | 
         
            +
            ### HTML to JSON Example
         
     | 
| 191 | 
         
            +
             
     | 
| 192 | 
         
            +
            ```python
         
     | 
| 193 | 
         
            +
            schema = """"""
         
     | 
| 194 | 
         
            +
            {
         
     | 
| 195 | 
         
            +
              ""type"": ""object"",
         
     | 
| 196 | 
         
            +
              ""properties"": {
         
     | 
| 197 | 
         
            +
                ""title"": {
         
     | 
| 198 | 
         
            +
                  ""type"": ""string""
         
     | 
| 199 | 
         
            +
                },
         
     | 
| 200 | 
         
            +
                ""author"": {
         
     | 
| 201 | 
         
            +
                  ""type"": ""string""
         
     | 
| 202 | 
         
            +
                },
         
     | 
| 203 | 
         
            +
                ""date"": {
         
     | 
| 204 | 
         
            +
                  ""type"": ""string""
         
     | 
| 205 | 
         
            +
                },
         
     | 
| 206 | 
         
            +
                ""content"": {
         
     | 
| 207 | 
         
            +
                  ""type"": ""string""
         
     | 
| 208 | 
         
            +
                }
         
     | 
| 209 | 
         
            +
              },
         
     | 
| 210 | 
         
            +
              ""required"": [""title"", ""author"", ""date"", ""content""]
         
     | 
| 211 | 
         
            +
            }
         
     | 
| 212 | 
         
            +
            """"""
         
     | 
| 213 | 
         
            +
             
     | 
| 214 | 
         
            +
            html = clean_html(html)
         
     | 
| 215 | 
         
            +
            input_prompt = create_prompt(html, tokenizer=tokenizer, schema=schema)
         
     | 
| 216 | 
         
            +
             
     | 
| 217 | 
         
            +
            inputs = tokenizer.encode(input_prompt, return_tensors=""pt"").to(device)
         
     | 
| 218 | 
         
            +
            outputs = model.generate(
         
     | 
| 219 | 
         
            +
                inputs, max_new_tokens=1024, temperature=0, do_sample=False, repetition_penalty=1.08
         
     | 
| 220 | 
         
            +
            )
         
     | 
| 221 | 
         
            +
             
     | 
| 222 | 
         
            +
            print(tokenizer.decode(outputs[0]))
         
     | 
| 223 | 
         
            +
            ```
         
     | 
| 224 | 
         
            +
             
     | 
| 225 | 
         
            +
            ## Model Performance
         
     | 
| 226 | 
         
            +
             
     | 
| 227 | 
         
            +
            ReaderLM-v2 has been extensively evaluated on various tasks:
         
     | 
| 228 | 
         
            +
             
     | 
| 229 | 
         
            +
            ### Quantitative Evaluation
         
     | 
| 230 | 
         
            +
             
     | 
| 231 | 
         
            +
            For HTML-to-Markdown tasks, the model outperforms much larger models like Qwen2.5-32B-Instruct and Gemini2-flash-expr, achieving:
         
     | 
| 232 | 
         
            +
            - ROUGE-L: 0.84
         
     | 
| 233 | 
         
            +
            - Levenshtein Distance: 0.22
         
     | 
| 234 | 
         
            +
            - Jaro-Winkler Similarity: 0.82
         
     | 
| 235 | 
         
            +
             
     | 
| 236 | 
         
            +
            For HTML-to-JSON tasks, it shows competitive performance with:
         
     | 
| 237 | 
         
            +
            - F1 Score: 0.81
         
     | 
| 238 | 
         
            +
            - Precision: 0.82
         
     | 
| 239 | 
         
            +
            - Recall: 0.81
         
     | 
| 240 | 
         
            +
            - Pass-Rate: 0.98
         
     | 
| 241 | 
         
            +
             
     | 
| 242 | 
         
            +
            ### Qualitative Evaluation
         
     | 
| 243 | 
         
            +
             
     | 
| 244 | 
         
            +
            The model excels in three key dimensions:
         
     | 
| 245 | 
         
            +
            - Content Integrity: 39/50
         
     | 
| 246 | 
         
            +
            - Structural Accuracy: 35/50
         
     | 
| 247 | 
         
            +
            - Format Compliance: 36/50
         
     | 
| 248 | 
         
            +
             
     | 
| 249 | 
         
            +
            These scores demonstrate strong performance in preserving semantic information, maintaining structural accuracy, and adhering to markdown syntax standards.
         
     | 
| 250 | 
         
            +
             
     | 
| 251 | 
         
            +
            ## Training Details
         
     | 
| 252 | 
         
            +
             
     | 
| 253 | 
         
            +
            ReaderLM-v2 is built on Qwen2.5-1.5B-Instruction and trained using a sophisticated pipeline:
         
     | 
| 254 | 
         
            +
             
     | 
| 255 | 
         
            +
            1. Data Preparation: Created html-markdown-1m dataset with 1 million HTML documents
         
     | 
| 256 | 
         
            +
            2. Synthetic Data Generation: Three-step pipeline using Qwen2.5-32B-Instruction
         
     | 
| 257 | 
         
            +
               - Drafting: Initial markdown and JSON generation
         
     | 
| 258 | 
         
            +
               - Refinement: Content cleanup and structure alignment
         
     | 
| 259 | 
         
            +
               - Critique: Quality evaluation and filtering
         
     | 
| 260 | 
         
            +
             
     | 
| 261 | 
         
            +
            3. Training Process:
         
     | 
| 262 | 
         
            +
               - Long-context pretraining
         
     | 
| 263 | 
         
            +
               - Supervised fine-tuning
         
     | 
| 264 | 
         
            +
               - Direct preference optimization
         
     | 
| 265 | 
         
            +
               - Self-play reinforcement tuning","{""id"": ""jinaai/ReaderLM-v2"", ""author"": ""jinaai"", ""sha"": ""1d07078459ee1e880a22d67387b5e683d50a6e4b"", ""last_modified"": ""2025-03-04 06:52:36+00:00"", ""created_at"": ""2025-01-13 02:44:23+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 69284, ""downloads_all_time"": null, ""likes"": 615, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""onnx"", ""safetensors"", ""qwen2"", ""text-generation"", ""conversational"", ""multilingual"", ""arxiv:2503.01151"", ""license:cc-by-nc-4.0"", ""autotrain_compatible"", ""text-generation-inference"", ""region:eu""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""language:\n- multilingual\nlibrary_name: transformers\nlicense: cc-by-nc-4.0\npipeline_tag: text-generation\ninference: false"", ""widget_data"": null, ""model_index"": null, ""config"": {""architectures"": [""Qwen2ForCausalLM""], ""model_type"": ""qwen2"", ""tokenizer_config"": {""bos_token"": null, ""chat_template"": ""{% for message in messages %}{% if loop.first and messages[0]['role'] != 'system' %}{{ '<|im_start|>system\nYou are an AI assistant developed by Jina AI.<|im_end|>\n' }}{% endif %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n' }}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}"", ""eos_token"": ""<|im_end|>"", ""pad_token"": ""<|endoftext|>"", ""unk_token"": null}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='added_tokens.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='merges.txt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='onnx/model.onnx', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='onnx/model.onnx_data', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='onnx/model_bnb4.onnx', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='onnx/model_fp16.onnx', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='onnx/model_fp16.onnx_data', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='onnx/model_int8.onnx', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='onnx/model_q4.onnx', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='onnx/model_q4f16.onnx', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='onnx/model_quantized.onnx', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='onnx/model_uint8.onnx', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vocab.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [""KBaba7/Quant"", ""bhaskartripathi/LLM_Quantization"", ""totolook/Quant"", ""FallnAI/Quantize-HF-Models"", ""ruslanmv/convert_to_gguf"", ""Felguk/ReaderLM-v2"", ""Nymbo/Markdown-Studio"", ""kevcx2/jinaai-ReaderLM-v2"", ""K00B404/LLM_Quantization"", ""Alun6666/jinaai-ReaderLM-v2"", ""agideia/jinaai-ReaderLM-v2""], ""safetensors"": {""parameters"": {""BF16"": 1543714304}, ""total"": 1543714304}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-03-04 06:52:36+00:00"", ""cardData"": ""language:\n- multilingual\nlibrary_name: transformers\nlicense: cc-by-nc-4.0\npipeline_tag: text-generation\ninference: false"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""_id"": ""67847e071ceb7569fc09eb5f"", ""modelId"": ""jinaai/ReaderLM-v2"", ""usedStorage"": 31852785268}",0,https://huggingface.co/alexgusevski/ReaderLM-v2-mlx,1,,0,"https://huggingface.co/mlx-community/jinaai-ReaderLM-v2, https://huggingface.co/fdgsgqaegt/ReaderLM-v2-Q4_K_M-GGUF, https://huggingface.co/Svngoku/ReaderLM-v2-Q8_0-GGUF, https://huggingface.co/mradermacher/ReaderLM-v2-GGUF, https://huggingface.co/adriabama06/ReaderLM-v2-AWQ, https://huggingface.co/adriabama06/ReaderLM-v2-GPTQ-8bit, https://huggingface.co/Emm9625/ReaderLM-v2-Q3_K_L-GGUF, https://huggingface.co/Emm9625/ReaderLM-v2-Q6_K-GGUF, https://huggingface.co/roleplaiapp/ReaderLM-v2-Q2_K-GGUF, https://huggingface.co/roleplaiapp/ReaderLM-v2-Q3_K_S-GGUF, https://huggingface.co/roleplaiapp/ReaderLM-v2-Q3_K_M-GGUF, https://huggingface.co/roleplaiapp/ReaderLM-v2-Q3_K_L-GGUF, https://huggingface.co/roleplaiapp/ReaderLM-v2-Q4_0-GGUF, https://huggingface.co/roleplaiapp/ReaderLM-v2-Q4_K_S-GGUF, https://huggingface.co/roleplaiapp/ReaderLM-v2-Q4_K_M-GGUF, https://huggingface.co/roleplaiapp/ReaderLM-v2-Q5_0-GGUF, https://huggingface.co/roleplaiapp/ReaderLM-v2-Q5_K_S-GGUF, https://huggingface.co/roleplaiapp/ReaderLM-v2-Q5_K_M-GGUF, https://huggingface.co/roleplaiapp/ReaderLM-v2-Q6_K-GGUF, https://huggingface.co/roleplaiapp/ReaderLM-v2-Q8_0-GGUF, https://huggingface.co/yasserrmd/ReaderLM-v2-gguf, https://huggingface.co/ysn-rfd/ReaderLM-v2-Q8_0-GGUF, https://huggingface.co/DevQuasar/jinaai.ReaderLM-v2-GGUF, https://huggingface.co/mradermacher/ReaderLM-v2-i1-GGUF, https://huggingface.co/PrunaAI/jinaai-ReaderLM-v2-GGUF-smashed, https://huggingface.co/alexgusevski/ReaderLM-v2-q3-mlx, https://huggingface.co/alexgusevski/ReaderLM-v2-q4-mlx, https://huggingface.co/alexgusevski/ReaderLM-v2-q6-mlx, https://huggingface.co/alexgusevski/ReaderLM-v2-q8-mlx, https://huggingface.co/matrixportal/ReaderLM-v2-GGUF, https://huggingface.co/ysn-rfd/ReaderLM-v2-GGUF",31,"https://huggingface.co/kunal732/QwenReader, https://huggingface.co/kunal732/qwenreader-m2, https://huggingface.co/kunal732/qwenreader3, https://huggingface.co/kunal732/qwenreader4",4,"Alun6666/jinaai-ReaderLM-v2, FallnAI/Quantize-HF-Models, Felguk/ReaderLM-v2, K00B404/LLM_Quantization, KBaba7/Quant, Nymbo/Markdown-Studio, agideia/jinaai-ReaderLM-v2, bhaskartripathi/LLM_Quantization, huggingface/InferenceSupport/discussions/new?title=jinaai/ReaderLM-v2&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bjinaai%2FReaderLM-v2%5D(%2Fjinaai%2FReaderLM-v2)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, kevcx2/jinaai-ReaderLM-v2, ruslanmv/convert_to_gguf, totolook/Quant",12
         
     | 
| 266 | 
         
            +
            alexgusevski/ReaderLM-v2-mlx,"---
         
     | 
| 267 | 
         
            +
            pipeline_tag: text-generation
         
     | 
| 268 | 
         
            +
            language:
         
     | 
| 269 | 
         
            +
            - multilingual
         
     | 
| 270 | 
         
            +
            inference: false
         
     | 
| 271 | 
         
            +
            license: cc-by-nc-4.0
         
     | 
| 272 | 
         
            +
            library_name: transformers
         
     | 
| 273 | 
         
            +
            base_model: jinaai/ReaderLM-v2
         
     | 
| 274 | 
         
            +
            tags:
         
     | 
| 275 | 
         
            +
            - mlx
         
     | 
| 276 | 
         
            +
            ---
         
     | 
| 277 | 
         
            +
             
     | 
| 278 | 
         
            +
            # alexgusevski/ReaderLM-v2-mlx
         
     | 
| 279 | 
         
            +
             
     | 
| 280 | 
         
            +
            The Model [alexgusevski/ReaderLM-v2-mlx](https://huggingface.co/alexgusevski/ReaderLM-v2-mlx) was
         
     | 
| 281 | 
         
            +
            converted to MLX format from [jinaai/ReaderLM-v2](https://huggingface.co/jinaai/ReaderLM-v2)
         
     | 
| 282 | 
         
            +
            using mlx-lm version **0.21.4**.
         
     | 
| 283 | 
         
            +
             
     | 
| 284 | 
         
            +
            ## Use with mlx
         
     | 
| 285 | 
         
            +
             
     | 
| 286 | 
         
            +
            ```bash
         
     | 
| 287 | 
         
            +
            pip install mlx-lm
         
     | 
| 288 | 
         
            +
            ```
         
     | 
| 289 | 
         
            +
             
     | 
| 290 | 
         
            +
            ```python
         
     | 
| 291 | 
         
            +
            from mlx_lm import load, generate
         
     | 
| 292 | 
         
            +
             
     | 
| 293 | 
         
            +
            model, tokenizer = load(""alexgusevski/ReaderLM-v2-mlx"")
         
     | 
| 294 | 
         
            +
             
     | 
| 295 | 
         
            +
            prompt = ""hello""
         
     | 
| 296 | 
         
            +
             
     | 
| 297 | 
         
            +
            if tokenizer.chat_template is not None:
         
     | 
| 298 | 
         
            +
                messages = [{""role"": ""user"", ""content"": prompt}]
         
     | 
| 299 | 
         
            +
                prompt = tokenizer.apply_chat_template(
         
     | 
| 300 | 
         
            +
                    messages, add_generation_prompt=True
         
     | 
| 301 | 
         
            +
                )
         
     | 
| 302 | 
         
            +
             
     | 
| 303 | 
         
            +
            response = generate(model, tokenizer, prompt=prompt, verbose=True)
         
     | 
| 304 | 
         
            +
            ```
         
     | 
| 305 | 
         
            +
            ","{""id"": ""alexgusevski/ReaderLM-v2-mlx"", ""author"": ""alexgusevski"", ""sha"": ""d4ec67dfcf82838a77aaa2e2e256316d6d3f5d54"", ""last_modified"": ""2025-02-24 18:14:25+00:00"", ""created_at"": ""2025-02-24 18:02:47+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 13, ""downloads_all_time"": null, ""likes"": 1, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""qwen2"", ""text-generation"", ""mlx"", ""conversational"", ""multilingual"", ""base_model:jinaai/ReaderLM-v2"", ""base_model:finetune:jinaai/ReaderLM-v2"", ""license:cc-by-nc-4.0"", ""autotrain_compatible"", ""text-generation-inference"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: jinaai/ReaderLM-v2\nlanguage:\n- multilingual\nlibrary_name: transformers\nlicense: cc-by-nc-4.0\npipeline_tag: text-generation\ntags:\n- mlx\ninference: false"", ""widget_data"": null, ""model_index"": null, ""config"": {""architectures"": [""Qwen2ForCausalLM""], ""model_type"": ""qwen2"", ""tokenizer_config"": {""bos_token"": null, ""chat_template"": ""{% for message in messages %}{% if loop.first and messages[0]['role'] != 'system' %}{{ '<|im_start|>system\nYou are an AI assistant developed by Jina AI.<|im_end|>\n' }}{% endif %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n' }}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}"", ""eos_token"": ""<|im_end|>"", ""pad_token"": ""<|endoftext|>"", ""unk_token"": null}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='added_tokens.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='merges.txt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vocab.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": {""parameters"": {""F16"": 1543714304}, ""total"": 1543714304}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-02-24 18:14:25+00:00"", ""cardData"": ""base_model: jinaai/ReaderLM-v2\nlanguage:\n- multilingual\nlibrary_name: transformers\nlicense: cc-by-nc-4.0\npipeline_tag: text-generation\ntags:\n- mlx\ninference: false"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""_id"": ""67bcb447efd8d2373026aa71"", ""modelId"": ""alexgusevski/ReaderLM-v2-mlx"", ""usedStorage"": 3098888563}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=alexgusevski/ReaderLM-v2-mlx&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Balexgusevski%2FReaderLM-v2-mlx%5D(%2Falexgusevski%2FReaderLM-v2-mlx)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
         
     | 
    	
        SillyTavern-Presets-Sphiratrioth_finetunes_20250427_003734.csv_finetunes_20250427_003734.csv
    ADDED
    
    | 
         @@ -0,0 +1,171 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
         
     | 
| 2 | 
         
            +
            sphiratrioth666/SillyTavern-Presets-Sphiratrioth,"---
         
     | 
| 3 | 
         
            +
            license: cc-by-nc-3.0
         
     | 
| 4 | 
         
            +
            language:
         
     | 
| 5 | 
         
            +
            - en
         
     | 
| 6 | 
         
            +
            tags:
         
     | 
| 7 | 
         
            +
            - art
         
     | 
| 8 | 
         
            +
            - roleplay
         
     | 
| 9 | 
         
            +
            - preset
         
     | 
| 10 | 
         
            +
            - presets
         
     | 
| 11 | 
         
            +
            - sillytavern
         
     | 
| 12 | 
         
            +
            - silly tavern
         
     | 
| 13 | 
         
            +
            - template
         
     | 
| 14 | 
         
            +
            - prompt
         
     | 
| 15 | 
         
            +
            base_model:
         
     | 
| 16 | 
         
            +
            - mistralai/Mistral-Nemo-Instruct-2407
         
     | 
| 17 | 
         
            +
            - mistralai/Mistral-Small-Instruct-2409
         
     | 
| 18 | 
         
            +
            - TheDrummer/Rocinante-12B-v1.1
         
     | 
| 19 | 
         
            +
            - TheDrummer/Cydonia-22B-v1
         
     | 
| 20 | 
         
            +
            - TheDrummer/Theia-21B-v2
         
     | 
| 21 | 
         
            +
            - MarinaraSpaghetti/NemoMix-Unleashed-12B
         
     | 
| 22 | 
         
            +
            - anthracite-org/magnum-v2-12b
         
     | 
| 23 | 
         
            +
            - anthracite-org/magnum-v3-34b
         
     | 
| 24 | 
         
            +
            - Sao10K/Fimbulvetr-11B-v2
         
     | 
| 25 | 
         
            +
            - Sao10K/L3-8B-Stheno-v3.2
         
     | 
| 26 | 
         
            +
            - nothingiisreal/MN-12B-Celeste-V1.9
         
     | 
| 27 | 
         
            +
            - nothingiisreal/L3.1-8B-Celeste-V1.5
         
     | 
| 28 | 
         
            +
            ---
         
     | 
| 29 | 
         
            +
            <h1><ins>Silly Tavern Presets ~ by Sphiratrioth</ins></h1>
         
     | 
| 30 | 
         
            +
             
     | 
| 31 | 
         
            +
            <p align=""justify"">Simple, effective, universal presets for Silly Tavern role-playing purposes. Organized file names, organized preset names, detailed instruction and explanations.<br><br><b>SillyTavern (Version):</b> 1.12.6+<br><b>Templates (Included):</b> Mistral, Mistral V3-Tekken, Mistral V7-Tekken, ChatML, LLAMA3, Alpaca, Metharme/Pygmalion<br><b>Models (Suggested):</b> a) Mistral Nemo - Instruct (12B); b) Mistral Small - Instruct (22B/24B); c) Magnum V2/V.3 (12B/34B); d) Rocinante (12B); e) Nemo Unleashed by Marinara (12B); f) ArliRPG Models (12B/22B); g) Lyra V4 (12B); h) Cydonia (22B/24B); i) Celeste 1.5/1.9 (8B/12B); j) Stheno 3.2 (8B), k) Silicon/Loyal Macaroni/Kunoichi Maids (7B), l) any other models using those exact templates</p>
         
     | 
| 32 | 
         
            +
             
     | 
| 33 | 
         
            +
            |
         
     | 
| 34 | 
         
            +
            |:--:|
         
     | 
| 35 | 
         
            +
            |Creative Commons Attribution-NonCommercial-ShareAlike 3.0 License (https://www.deviantart.com/selrond/art/One-Ring-To-Rule-Them-All-507183083)|<br>
         
     | 
| 36 | 
         
            +
             
     | 
| 37 | 
         
            +
            <h1><ins><b><i>Don't be the Amazon's Saur-off. Be a true Lord of the Templates.</i></b></ins></h1>
         
     | 
| 38 | 
         
            +
             
     | 
| 39 | 
         
            +
            <p><a href=""https://buymeacoffee.com/sphiratrioth"">
         
     | 
| 40 | 
         
            +
            <img src=""https://cdn-uploads.huggingface.co/production/uploads/66a2a6820c4cd36ffbc95623/DInaQUVOSuDcBEKOJoNPH.png"" alt=""W3Schools.com"" width=""1273"" height=""64"">
         
     | 
| 41 | 
         
            +
            </a></p>
         
     | 
| 42 | 
         
            +
             
     | 
| 43 | 
         
            +
            <div style=""text-align: justify"">Here, you can buy me a Coffee. Just click on the banner above. All the work I do remains free - but I drink a lot of coffee, actually, haha - so it is very nice when you show support by fueling my caffeine addiction :-D
         
     | 
| 44 | 
         
            +
            As a bonus, you can also find a lot of my other, side (hobby) projects. Some Reshade presets for different games, my custom TTRPG system (soon), maybe other goodies in the future.
         
     | 
| 45 | 
         
            +
            </div>
         
     | 
| 46 | 
         
            +
             
     | 
| 47 | 
         
            +
            <br>
         
     | 
| 48 | 
         
            +
            <h1><ins>FILES (.JSON) INCLUDED:</ins></h1>
         
     | 
| 49 | 
         
            +
            <li>CONTEXT TEMPLATES (STORY STRING)</li>
         
     | 
| 50 | 
         
            +
            <li>INSTRUCT TEMPLATES</li>
         
     | 
| 51 | 
         
            +
            <li>SYSTEM PROMPTS</li>
         
     | 
| 52 | 
         
            +
            <li>SETTINGS (SAMPLERS)</li>
         
     | 
| 53 | 
         
            +
            <li>BONUS/OPTIONAL: REGEX</li>
         
     | 
| 54 | 
         
            +
            <br>
         
     | 
| 55 | 
         
            +
            <h1><ins>SAMPLERS (USED):</ins></h1>
         
     | 
| 56 | 
         
            +
            <li>Min-P</li>
         
     | 
| 57 | 
         
            +
            <li>Temperature</li>
         
     | 
| 58 | 
         
            +
            <li>DRY (prevents repetition)</li>
         
     | 
| 59 | 
         
            +
            <br>
         
     | 
| 60 | 
         
            +
            > All the other samplers neutralized. That's all you need in modern times (2024/2025). Samplers order is also correct - so min-p before temperature. <br>> I am not a fan of XTC, it feels too ""loose"".
         
     | 
| 61 | 
         
            +
            <br>
         
     | 
| 62 | 
         
            +
            <br>
         
     | 
| 63 | 
         
            +
            <h5><b>CREATIVITY VARIANTS:</b></h5>
         
     | 
| 64 | 
         
            +
            <div style=""background-color: #ffefb8; padding: 16px 32px; outline: 2px solid; border-radius: 10px;"">
         
     | 
| 65 | 
         
            +
            <li><b>Sphiratrioth [T=0.7] (Temperature = 0.7)</b> - less creative responses, more precise, tames down the ""fidgeting"" models</li>
         
     | 
| 66 | 
         
            +
            <li><b>Sphiratrioth [T=1.0] (Temperature = 1.0)</b> - standard, balanced responses, works well with a majority of modern models</li>
         
     | 
| 67 | 
         
            +
            <li><b>Sphiratrioth [T=0.7] (Temperature = 1.3)</b> - creative boost - but without going off the rails, which starts around T=1.5</li>
         
     | 
| 68 | 
         
            +
            </div>
         
     | 
| 69 | 
         
            +
            <br>
         
     | 
| 70 | 
         
            +
            <h5><b>MESSAGES FORMATS:</b></h5>
         
     | 
| 71 | 
         
            +
            <div style=""background-color: #ffefb8; padding: 16px 32px; outline: 2px solid; border-radius: 10px;"">
         
     | 
| 72 | 
         
            +
            <li><b>1st Person / 3rd Person</b> (switch through System Prompt Presets!)</li>
         
     | 
| 73 | 
         
            +
            <li><b>DIALOGUES</b> - quotation marks if you do not use my REGEX/plain text, no quotation marks, if you add my REGEX</li>
         
     | 
| 74 | 
         
            +
            <li><b>NARRATION</b> - italics</li>
         
     | 
| 75 | 
         
            +
            <li><b>INNER THOUGHTS</b> - code block aka ```Thoughts```</li>
         
     | 
| 76 | 
         
            +
            <li><b>Trim Incomplete Sentences</b>: ON (it prevents messeges from being cut in half with short maximum tokens setting under ""conversation"" mode preset, you need to turn it on manually in the newest version or...); OFF - you can use my REGEX files instead, which are better than a basic SillyTavern tool in trimming the unfinished sentences. Additionally, doing it through REGEX allows trimming even the extremely short messages. You can set the output toxens to crazy values such as 20-50 and it will trim everything perfectly, which is not doable otherwise - only through proper regex. The endless issue with too long messages finally solved! Look for Regex files is a separate folder to download now - you import them under SillyTavern Extensions Tab -> REGEX, in order provided in the files readme. You can also turn quotations ON/OFF this way - since LLM will generate with quotations now.</li>
         
     | 
| 77 | 
         
            +
            </div><br>
         
     | 
| 78 | 
         
            +
            <h5><b>PRESETS:</b></h5>
         
     | 
| 79 | 
         
            +
            <div style=""background-color: #ffefb8; padding: 16px 32px; outline: 2px solid; border-radius: 10px;"">
         
     | 
| 80 | 
         
            +
             
     | 
| 81 | 
         
            +
            **A) Sphiratrioth (Conversation)** - minimal narration - when you want to simulate a natural, human-like conversation with a bot. For those who do not like the ""verbose"" role-play.
         
     | 
| 82 | 
         
            +
            ***<<< conversation-like, ~ 150 tokens responses, mostly dialogues with basic narration when needed, it works well >>>***
         
     | 
| 83 | 
         
            +
             
     | 
| 84 | 
         
            +
            
         
     | 
| 85 | 
         
            +
             
     | 
| 86 | 
         
            +
            **B) Sphiratrioth (Roleplay)** - balanced RP - when you want to experience a balanced role-play with responses around 1-2 paragraphs.
         
     | 
| 87 | 
         
            +
            ***<<< 1 short paragraph, ~ 250 tokens responses, around 1/3 dialogues against 2/3 narration >>>***
         
     | 
| 88 | 
         
            +
             
     | 
| 89 | 
         
            +
            
         
     | 
| 90 | 
         
            +
             
     | 
| 91 | 
         
            +
            **C) Sphiratrioth (Story)** - AI becomes your Game Master (GM) - when you want those long, verbose paragraphs with rich narration & story-telling.
         
     | 
| 92 | 
         
            +
            ***<<< 1-3 paragraphs, ~ 350 tokens responses, around 50/50 narration against dialogues, pushes the story on its own >>>***
         
     | 
| 93 | 
         
            +
             
     | 
| 94 | 
         
            +
            
         
     | 
| 95 | 
         
            +
             
     | 
| 96 | 
         
            +
            **D) Sphiratrioth (GM)** - you are a Game Master (GM), AI becomes your player - tabletop RPG-like experience from a GM-POV!
         
     | 
| 97 | 
         
            +
            ***<<< 1-3 paragraphs, ~ between 150-350 tokens responses, around 50/50 narration and inner thoughts, capable of dialogues when situation calls, follows your story >>>***
         
     | 
| 98 | 
         
            +
             
     | 
| 99 | 
         
            +
            
         
     | 
| 100 | 
         
            +
            (Yeah, I was sleepy - {{char}} wanted lush greenery but it accepts it's Autumn, I actually did not read properly what LLM came up with in the first place, fault on my side :-D But you get the idea of a preset at least, haha.)
         
     | 
| 101 | 
         
            +
             
     | 
| 102 | 
         
            +
            </div>
         
     | 
| 103 | 
         
            +
            <br>
         
     | 
| 104 | 
         
            +
            <h1><ins>How to use:</ins></h1>
         
     | 
| 105 | 
         
            +
             
     | 
| 106 | 
         
            +
            **Advanced Users:** Enjoy, modify, forget.\
         
     | 
| 107 | 
         
            +
            **Beginners:** Read further.
         
     | 
| 108 | 
         
            +
             
     | 
| 109 | 
         
            +
            1. Find your model's native INSTRUCT TEMPLATE. Every fine-tune stands on a base model, which is usually specified by its creator in a manner of ""We're presenting the first fine-tune of Mistral Small"" or ""Nemo on steroids"" or ""LLAMA 3 with our special sauce"" etc. It may be confusing, it may be clear. Not my fault :-P However, you need to identify the base model of a fine-tune by reading its description page. Different templates may be used by creators (tuners) for training - and then - templates suggested on a fine-tune's page work best. However, creators often just choose the basic template of a model they're tuning - so make sure to check the model's description page or assume it's using the base model's template (template of the main model that a given tuner is tuning).
         
     | 
| 110 | 
         
            +
            ***<<< Most typical templates: Mistral, ChatML, LLAMA3, Alpaca, Metharme/Pygmalion >>>***
         
     | 
| 111 | 
         
            +
            2. Golden Rule: Match CONTEXT TEMPLATE (Story String) and INSTRUCT TEMPLATE with your model. For instance, when Mistral-Nemo fine-tune is using the Mistral Instruct Template (as specified by creator of a fine-tune) - pick up the ""Sphiratrioth - Mistral [CONTEXT TEMPLATE]"" file and ""Sphiratrioth - Mistral [INSTRUCT TEMPLATE]"" file. It's very easy. I keep my naming clear - both the file names and preset names inside of the Silly Tavern UI list when you load them up. They're always clear and distinguishable.
         
     | 
| 112 | 
         
            +
            ***<<< This rule may be broken when you become the advanced user. Then, you'll know what you're doing so you'll mix different context/instruct templates. For now - one ring... khem - one template to rule them all. Don't be the Amazon Saur-off. Be a true Lord of the Templates.>>>***/
         
     | 
| 113 | 
         
            +
            3. Download my presets.
         
     | 
| 114 | 
         
            +
            4. Copy the [Sphiratrioth-(...)-CONTEXT TEMPLATE] files to (...)\SillyTavern\data\default-user\context.
         
     | 
| 115 | 
         
            +
            5. Copy the [Sphiratrioth-(...)-INSTRUCT TEMPLATE] files to (...)\SillyTavern\data\default-user\instruct.
         
     | 
| 116 | 
         
            +
            6. Copy the [Sphiratrioth-(...)-SETTINGS] files to SillyTavern\data\default-user\TextGen Settings.
         
     | 
| 117 | 
         
            +
            7. Copy the [Sphiratrioth-(...)-PROMPT] files to (...)\SillyTavern\data\default-user\sysprompt.
         
     | 
| 118 | 
         
            +
            8. Open up Silly Tavern UI.
         
     | 
| 119 | 
         
            +
            9. Click on the ""Capital A"" tab in Silly Tavern UI (AI Response Formatting).
         
     | 
| 120 | 
         
            +
            10. Load up my Context Template (Story String) Preset from the Context Templates list.
         
     | 
| 121 | 
         
            +
            11. Click on the ""Enable Instruct Mode"" button (ON/OFF next to the name ""Instruct Template"").
         
     | 
| 122 | 
         
            +
            12. Load up my Instruct Template Preset from the Instruct Templates list.
         
     | 
| 123 | 
         
            +
            13. Load up my System Prompt from the System Prompts list.
         
     | 
| 124 | 
         
            +
            14. Click on the ""Sliders"" tab in Silly Tavern UI (AI Response Configuration).
         
     | 
| 125 | 
         
            +
            15. Load up my Settings Preset from the Text Completion Presets list.
         
     | 
| 126 | 
         
            +
            16. Switch between the 1st Person/3rd Person narration by switching the System Prompt Presets. Theoretically, it should re-evaluate the prompt but it may not work unless you start a new chat!
         
     | 
| 127 | 
         
            +
            17. Switch between Conversation/Roleplay/Story Modes & Creativity (Temperature) by switching the Settings (Text Completion Presets/Samplers). Add optional Regex if you want.
         
     | 
| 128 | 
         
            +
            18. Enjoy.
         
     | 
| 129 | 
         
            +
            <br>
         
     | 
| 130 | 
         
            +
            <br>
         
     | 
| 131 | 
         
            +
            <h1><ins>BEWARE:</ins></h1>
         
     | 
| 132 | 
         
            +
             
     | 
| 133 | 
         
            +
            <p align=""justify"">**IF YOU WANT THE PRESETS TO WORK PROPERLY, REMEMBER TO START A NEW CHAT FOR TESTING PURPOSES/ACTUAL ROLEPLAY. IF YOU ""HOT"" SWAP THE PRESETS, YOU MAY EASILY FALL INTO A TRAP OF CONTINUING THE PREVIOUS PROMPT. IN RESULT, THE SHORT/BALANCED/LONG RESPONSES AND NARRATION IN 1st/3rd PERSON WILL NOT SWITCH PROPERLY. ALWAYS START A NEW CHAT. THEORETICALLY, SWITCHING A SYSTEM PROMPT FORCES THE PROMPT EVALUATION BUT IN REALITY - IT OFTEN BREAKS, IT DOES NOT WORK PROPERLY. IT WORKS WHEN YOU START A NEW CHAT THOUGH - OR WHEN YOU RELOAD THE WHOLE SILLY TAVERN UI.**</p>
         
     | 
| 134 | 
         
            +
             
     | 
| 135 | 
         
            +
            <div style=""background-color: #ffefb8; padding: 16px 32px; outline: 2px solid; border-radius: 10px;"">
         
     | 
| 136 | 
         
            +
            > Match <b>CONTEXT TEMPLATE</b> & <b>INSTRUCT TEMPLATE</b> (by names - while loading them up in Silly Tavern UI) [<u>""CAPITAL A"" - SETTINGS TAB</u>].<br>> Switch <b>1st/3rd Person</b> LLM Narration through <b>SYSTEM PROMPT</b> (start new chat to be sure that it works) [<u>""CAPITAL A"" - SETTINGS TAB</u>].<br>> Switch <b>CONVERSATION/ROLEPLAY/STORY</b> LLM responses style & <b>MODEL'S CREATIVITY</b> through <b>SAMPLERS</b> [<u>""SLIDERS"" - SETTINGS TAB</u>].
         
     | 
| 137 | 
         
            +
            </div>
         
     | 
| 138 | 
         
            +
            <br>
         
     | 
| 139 | 
         
            +
            <h1><ins>REGEX:</ins></h1>
         
     | 
| 140 | 
         
            +
             
     | 
| 141 | 
         
            +
            1. Download the REGEX files.
         
     | 
| 142 | 
         
            +
            2. Open up SillyTavern Extensions tab.
         
     | 
| 143 | 
         
            +
            3. Find REGEX.
         
     | 
| 144 | 
         
            +
            4. Import downloaded files as GLOBAL.
         
     | 
| 145 | 
         
            +
            5. Adjust order: starts with ""trim incomplete"", followed by: ""find last special character"", followed by: ""remove **"""", followed by ""remove ``````"""", followed by: ""delete quotes"" (optional)
         
     | 
| 146 | 
         
            +
            <br>
         
     | 
| 147 | 
         
            +
            Turn Smooth Streaming in SillyTavern Settings OFF. REGEX will make your streaming ""laggy"" since it corrects the messages as they're generated live but - it's still so much better than unfinished sentences with lack of closing * or "" or ``` signs. You can leave the normal streaming on, it's ok - just turn the Smooth Streaming OFF. It is the only way to trim very short messages around 20-50 tokens properly - if that's what you want - and it generally does its job much better than SillyTavern on itself with all normal message lengths. It's worth it.
         
     | 
| 148 | 
         
            +
            <br>
         
     | 
| 149 | 
         
            +
            Troubleshooting: if it does not allow you entering any special sign at the end of your message - open up the regex definitions (in SillyTavern, just edit them) - and uncheck the box for user messages. It will only affect the AI output then.
         
     | 
| 150 | 
         
            +
            <br>
         
     | 
| 151 | 
         
            +
            <br>
         
     | 
| 152 | 
         
            +
             
     | 
| 153 | 
         
            +
            <h1><ins>PLANS FOR THE FUTURE:</ins></h1>
         
     | 
| 154 | 
         
            +
            Updating the presets pack to keep up with the new, better models coming out in the future.
         
     | 
| 155 | 
         
            +
            <br>
         
     | 
| 156 | 
         
            +
            <br>
         
     | 
| 157 | 
         
            +
            <h1><ins>ANNOTATIONS:</ins></h1>
         
     | 
| 158 | 
         
            +
             
     | 
| 159 | 
         
            +
            I am using the full names of what's what but people simplify and people are great but messy - thus - you'll come across the words such as context or instruct meaning completely different things as you're continuing your journey in the LLMs community. No one writes context template vs context length or instruct template vs instruct mode/instruct model. Everyone simply uses context & instruct, also a story string (since it's clear it is a part of the context template, isn't it? Isn't?! Of course it is! Everyone knows at birth!). It all causes confusion for beginners. Here - a small guide for you - to understand how others operate.
         
     | 
| 160 | 
         
            +
            <br>
         
     | 
| 161 | 
         
            +
            <li>Context = Context Template or Story String (IN TERMS OF PRESETS)</li>
         
     | 
| 162 | 
         
            +
            I'm simplifying here but people often get confused with names. Story String is a part of the Context Template - to be precise - but creators of models and presets use those terms as equivalents...
         
     | 
| 163 | 
         
            +
            <li>Context = Context Length of a model (IN TERMS OF PROPER USE OF A WORD). People simplify and use mental leaps. Not my fault, again :-P</li>
         
     | 
| 164 | 
         
            +
            <li>Instruct = Instruct Template (IN TERMS OF PROPER USE OF A WORD). Again, people simplify...</li>
         
     | 
| 165 | 
         
            +
            <li>Model Name - Instruct Model = a model trained to follow instructions in a given instruct template (IN TERMS OF PROPER USE OF A WORD). You catch the drill by now, don't you? :-P</li>
         
     | 
| 166 | 
         
            +
            <br>
         
     | 
| 167 | 
         
            +
            There are other great presets from Virt-io or Marinara available on the site but they come with messy preset names, less clear file structures or forced instructions such as including the character example messages through instruct/context template, which becomes the unnecessary bother. Also, they do a bit less than mine.<br><br>However, both Marinara and Virt-AI are great creators and great people - so when you learn and understand how presets actually work - be sure to give their collections a try! You may like the more, you may prefer this, who knows :-)
         
     | 
| 168 | 
         
            +
             
     | 
| 169 | 
         
            +
            <a id=""1"">[1]</a> : Virt-io, [Silly Tavern Presets](https://huggingface.co/Virt-io/SillyTavern-Presets), 2024
         
     | 
| 170 | 
         
            +
             
     | 
| 171 | 
         
            +
            <a id=""1"">[2]</a> : Marinara, [Silly Tavern Presets](https://huggingface.co/MarinaraSpaghetti/SillyTavern-Settings), 2024","{""id"": ""sphiratrioth666/SillyTavern-Presets-Sphiratrioth"", ""author"": ""sphiratrioth666"", ""sha"": ""516d9adfc479a7220d2c62249b6a581d1f2fd303"", ""last_modified"": ""2025-03-20 23:31:55+00:00"", ""created_at"": ""2024-09-28 18:14:58+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 160, ""library_name"": null, ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""art"", ""roleplay"", ""preset"", ""presets"", ""sillytavern"", ""silly tavern"", ""template"", ""prompt"", ""en"", ""base_model:MarinaraSpaghetti/NemoMix-Unleashed-12B"", ""base_model:finetune:MarinaraSpaghetti/NemoMix-Unleashed-12B"", ""license:cc-by-nc-3.0"", ""region:us""], ""pipeline_tag"": null, ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- mistralai/Mistral-Nemo-Instruct-2407\n- mistralai/Mistral-Small-Instruct-2409\n- TheDrummer/Rocinante-12B-v1.1\n- TheDrummer/Cydonia-22B-v1\n- TheDrummer/Theia-21B-v2\n- MarinaraSpaghetti/NemoMix-Unleashed-12B\n- anthracite-org/magnum-v2-12b\n- anthracite-org/magnum-v3-34b\n- Sao10K/Fimbulvetr-11B-v2\n- Sao10K/L3-8B-Stheno-v3.2\n- nothingiisreal/MN-12B-Celeste-V1.9\n- nothingiisreal/L3.1-8B-Celeste-V1.5\nlanguage:\n- en\nlicense: cc-by-nc-3.0\ntags:\n- art\n- roleplay\n- preset\n- presets\n- sillytavern\n- silly tavern\n- template\n- prompt"", ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Regex/00. Regex Order.docx', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Regex/delete_quotes.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Regex/find_last_special_character.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Regex/remove___.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Regex/remove_``````.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Regex/trim_incomplete.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='TextGen Settings/Sphiratrioth - Conversation [T=0.7].json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='TextGen Settings/Sphiratrioth - Conversation [T=1.0].json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='TextGen Settings/Sphiratrioth - Conversation [T=1.3].json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='TextGen Settings/Sphiratrioth - GM [T=1.0].json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='TextGen Settings/Sphiratrioth - Roleplay [T=0.7].json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='TextGen Settings/Sphiratrioth - Roleplay [T=1.0].json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='TextGen Settings/Sphiratrioth - Roleplay [T=1.3].json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='TextGen Settings/Sphiratrioth - Story [T=0.7].json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='TextGen Settings/Sphiratrioth - Story [T=1.0].json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='TextGen Settings/Sphiratrioth - Story [T=1.3].json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='context/Sphiratrioth - Alpaca.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='context/Sphiratrioth - ChatML.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='context/Sphiratrioth - LLAMA 3.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='context/Sphiratrioth - Metharme.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='context/Sphiratrioth - Mistral V3-Tekken.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='context/Sphiratrioth - Mistral V7-Tekken.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='context/Sphiratrioth - Mistral.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='instruct/Sphiratrioth - Alpaca.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='instruct/Sphiratrioth - ChatML.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='instruct/Sphiratrioth - LLAMA 3.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='instruct/Sphiratrioth - Metharme.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='instruct/Sphiratrioth - Mistral V3-Tekken.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='instruct/Sphiratrioth - Mistral V7-Tekken.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='instruct/Sphiratrioth - Mistral.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='sysprompt/Sphiratrioth - Conversation - 1st Person (Inner Thoughts).json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='sysprompt/Sphiratrioth - Conversation - 1st Person.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='sysprompt/Sphiratrioth - Conversation - 3rd person.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='sysprompt/Sphiratrioth - GM.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='sysprompt/Sphiratrioth - Roleplay - 1st Person (Inner Thoughts).json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='sysprompt/Sphiratrioth - Roleplay - 1st Person.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='sysprompt/Sphiratrioth - Roleplay - 3rd person.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='sysprompt/Sphiratrioth - Story - 1st Person (Inner Thoughts).json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='sysprompt/Sphiratrioth - Story - 1st Person.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='sysprompt/Sphiratrioth - Story - 3rd Person.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-03-20 23:31:55+00:00"", ""cardData"": ""base_model:\n- mistralai/Mistral-Nemo-Instruct-2407\n- mistralai/Mistral-Small-Instruct-2409\n- TheDrummer/Rocinante-12B-v1.1\n- TheDrummer/Cydonia-22B-v1\n- TheDrummer/Theia-21B-v2\n- MarinaraSpaghetti/NemoMix-Unleashed-12B\n- anthracite-org/magnum-v2-12b\n- anthracite-org/magnum-v3-34b\n- Sao10K/Fimbulvetr-11B-v2\n- Sao10K/L3-8B-Stheno-v3.2\n- nothingiisreal/MN-12B-Celeste-V1.9\n- nothingiisreal/L3.1-8B-Celeste-V1.5\nlanguage:\n- en\nlicense: cc-by-nc-3.0\ntags:\n- art\n- roleplay\n- preset\n- presets\n- sillytavern\n- silly tavern\n- template\n- prompt"", ""transformersInfo"": null, ""_id"": ""66f847a2259df9e375501630"", ""modelId"": ""sphiratrioth666/SillyTavern-Presets-Sphiratrioth"", ""usedStorage"": 0}",0,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=sphiratrioth666/SillyTavern-Presets-Sphiratrioth&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bsphiratrioth666%2FSillyTavern-Presets-Sphiratrioth%5D(%2Fsphiratrioth666%2FSillyTavern-Presets-Sphiratrioth)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
         
     | 
    	
        TableGPT2-7B_finetunes_20250426_215237.csv_finetunes_20250426_215237.csv
    ADDED
    
    | 
         @@ -0,0 +1,234 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
         
     | 
| 2 | 
         
            +
            tablegpt/TableGPT2-7B,"---
         
     | 
| 3 | 
         
            +
            license: apache-2.0
         
     | 
| 4 | 
         
            +
            language:
         
     | 
| 5 | 
         
            +
            - zh
         
     | 
| 6 | 
         
            +
            - en
         
     | 
| 7 | 
         
            +
            base_model:
         
     | 
| 8 | 
         
            +
            - Qwen/Qwen2.5-7B
         
     | 
| 9 | 
         
            +
            ---
         
     | 
| 10 | 
         
            +
             
     | 
| 11 | 
         
            +
            # TableGPT2-7B
         
     | 
| 12 | 
         
            +
             
     | 
| 13 | 
         
            +
            ## Model details
         
     | 
| 14 | 
         
            +
             
     | 
| 15 | 
         
            +
            We developed and released TableGPT2-7B, a large-scale decoder specifically tailored for data-intensive tasks, with a focus on interpreting and analyzing tabular data. TableGPT2-7B is designed to bridge the gap between conventional LLM capabilities and the real-world demands of tabular/structured data tasks, such as those in business intelligence (BI), automated data-driven analysis, and application tasks tightly involving databases or data warehouses.
         
     | 
| 16 | 
         
            +
             
     | 
| 17 | 
         
            +
            **Model Developers**  
         
     | 
| 18 | 
         
            +
             
     | 
| 19 | 
         
            +
            Zhejiang University
         
     | 
| 20 | 
         
            +
             
     | 
| 21 | 
         
            +
            **Variations**  
         
     | 
| 22 | 
         
            +
             
     | 
| 23 | 
         
            +
            TableGPT2 is available in two configurations—7B and 72B parameters—both derived from the Qwen2.5 model family and optimized for handling structured data in tabular formats. Currently, we have released the 7B version to the public.
         
     | 
| 24 | 
         
            +
             
     | 
| 25 | 
         
            +
            **Input**
         
     | 
| 26 | 
         
            +
             
     | 
| 27 | 
         
            +
            TableGPT2-7B accepts both text and tabular data as input, with the tabular data structured as text in the format of a df.head() result.
         
     | 
| 28 | 
         
            +
             
     | 
| 29 | 
         
            +
            **Output** 
         
     | 
| 30 | 
         
            +
             
     | 
| 31 | 
         
            +
            TableGPT2-7B produces text-based outputs, specifically optimized for coding tasks, data interpretation, and BI-focused question answering.
         
     | 
| 32 | 
         
            +
             
     | 
| 33 | 
         
            +
            **Language**  
         
     | 
| 34 | 
         
            +
             
     | 
| 35 | 
         
            +
            Our model places a strong emphasis on Chinese corpora, and currently, queries in other languages may have limited support.
         
     | 
| 36 | 
         
            +
             
     | 
| 37 | 
         
            +
            **Other Requirements** 
         
     | 
| 38 | 
         
            +
             
     | 
| 39 | 
         
            +
            We highly recommend exploring [our repository on GitHub](https://github.com/tablegpt/tablegpt-agent), where users can integrate this model into our agent workflow for enhanced performance.
         
     | 
| 40 | 
         
            +
             
     | 
| 41 | 
         
            +
            **Model Architecture** 
         
     | 
| 42 | 
         
            +
             
     | 
| 43 | 
         
            +
            TableGPT2-7B is built upon the Qwen2.5 architecture and includes specialized encoding for tabular data. It features a unique semantic encoder designed to interpret tabular data, capturing insights from rows, columns, and entire tables. Continual Pretraining (CPT) and Supervised Fine-Tuning (SFT) have been applied to equip the model for real-world BI applications and complex query processing. 
         
     | 
| 44 | 
         
            +
             
     | 
| 45 | 
         
            +
            For now, the standalone decoder is open-sourced and fully functional without having to require assistance from the encoder. The encoder is currently under preparation, pending engineering considerations, primarily because we hope to provide a tighter integration with DeepSpeed and vLLM.
         
     | 
| 46 | 
         
            +
             
     | 
| 47 | 
         
            +
             
     | 
| 48 | 
         
            +
            |              | Training Data                                    | Params | Context Length | Tokens                            | Tables        |
         
     | 
| 49 | 
         
            +
            | ------------ | ------------------------------------------------ | ------ | -------------- | --------------------------------- | ------------- |
         
     | 
| 50 | 
         
            +
            | TableGPT2-7B | Multimodal data sources and BI-specific examples | 7B     | 128K           | 86B tokens CPT, 2.36M SFT samples | 593.8K tables |
         
     | 
| 51 | 
         
            +
             
     | 
| 52 | 
         
            +
            **Status**  
         
     | 
| 53 | 
         
            +
             
     | 
| 54 | 
         
            +
            This model is static, trained on an offline dataset. Future versions may be released to enhance its performance on specialized tasks.
         
     | 
| 55 | 
         
            +
             
     | 
| 56 | 
         
            +
            **QuickStart**
         
     | 
| 57 | 
         
            +
             
     | 
| 58 | 
         
            +
            This code snippet demonstrates how to build a prompt with table information, and shows how to load the tokenizer, load the model, and generate content.
         
     | 
| 59 | 
         
            +
             
     | 
| 60 | 
         
            +
            > Note that you need `transformers>=4.37.0` to use `TableGPT2`:
         
     | 
| 61 | 
         
            +
            > ```sh
         
     | 
| 62 | 
         
            +
            > pip install transformers>=4.37.0
         
     | 
| 63 | 
         
            +
            > ```
         
     | 
| 64 | 
         
            +
             
     | 
| 65 | 
         
            +
             
     | 
| 66 | 
         
            +
            ```python
         
     | 
| 67 | 
         
            +
            from transformers import AutoModelForCausalLM, AutoTokenizer
         
     | 
| 68 | 
         
            +
             
     | 
| 69 | 
         
            +
            # Using pandas to read some structured data
         
     | 
| 70 | 
         
            +
            import pandas as pd
         
     | 
| 71 | 
         
            +
            from io import StringIO
         
     | 
| 72 | 
         
            +
             
     | 
| 73 | 
         
            +
            # single table
         
     | 
| 74 | 
         
            +
            EXAMPLE_CSV_CONTENT = """"""
         
     | 
| 75 | 
         
            +
            ""Loss"",""Date"",""Score"",""Opponent"",""Record"",""Attendance""
         
     | 
| 76 | 
         
            +
            ""Hampton (14–12)"",""September 25"",""8–7"",""Padres"",""67–84"",""31,193""
         
     | 
| 77 | 
         
            +
            ""Speier (5–3)"",""September 26"",""3–1"",""Padres"",""67–85"",""30,711""
         
     | 
| 78 | 
         
            +
            ""Elarton (4–9)"",""September 22"",""3–1"",""@ Expos"",""65–83"",""9,707""
         
     | 
| 79 | 
         
            +
            ""Lundquist (0–1)"",""September 24"",""15–11"",""Padres"",""67–83"",""30,774""
         
     | 
| 80 | 
         
            +
            ""Hampton (13–11)"",""September 6"",""9–5"",""Dodgers"",""61–78"",""31,407""
         
     | 
| 81 | 
         
            +
            """"""
         
     | 
| 82 | 
         
            +
             
     | 
| 83 | 
         
            +
            csv_file = StringIO(EXAMPLE_CSV_CONTENT)
         
     | 
| 84 | 
         
            +
            df = pd.read_csv(csv_file)
         
     | 
| 85 | 
         
            +
             
     | 
| 86 | 
         
            +
            model_name = ""tablegpt/TableGPT2-7B""
         
     | 
| 87 | 
         
            +
             
     | 
| 88 | 
         
            +
            model = AutoModelForCausalLM.from_pretrained(
         
     | 
| 89 | 
         
            +
                model_name, torch_dtype=""auto"", device_map=""auto""
         
     | 
| 90 | 
         
            +
            )
         
     | 
| 91 | 
         
            +
            tokenizer = AutoTokenizer.from_pretrained(model_name)
         
     | 
| 92 | 
         
            +
             
     | 
| 93 | 
         
            +
            example_prompt_template = """"""Given access to several pandas dataframes, write the Python code to answer the user's question.
         
     | 
| 94 | 
         
            +
             
     | 
| 95 | 
         
            +
            /*
         
     | 
| 96 | 
         
            +
            ""{var_name}.head(5).to_string(index=False)"" as follows:
         
     | 
| 97 | 
         
            +
            {df_info}
         
     | 
| 98 | 
         
            +
            */
         
     | 
| 99 | 
         
            +
             
     | 
| 100 | 
         
            +
            Question: {user_question}
         
     | 
| 101 | 
         
            +
            """"""
         
     | 
| 102 | 
         
            +
            question = ""哪些比赛的战绩达到了40胜40负?""
         
     | 
| 103 | 
         
            +
             
     | 
| 104 | 
         
            +
            prompt = example_prompt_template.format(
         
     | 
| 105 | 
         
            +
                var_name=""df"",
         
     | 
| 106 | 
         
            +
                df_info=df.head(5).to_string(index=False),
         
     | 
| 107 | 
         
            +
                user_question=question,
         
     | 
| 108 | 
         
            +
            )
         
     | 
| 109 | 
         
            +
             
     | 
| 110 | 
         
            +
            messages = [
         
     | 
| 111 | 
         
            +
                {""role"": ""system"", ""content"": ""You are a helpful assistant.""},
         
     | 
| 112 | 
         
            +
                {""role"": ""user"", ""content"": prompt},
         
     | 
| 113 | 
         
            +
            ]
         
     | 
| 114 | 
         
            +
            text = tokenizer.apply_chat_template(
         
     | 
| 115 | 
         
            +
                messages, tokenize=False, add_generation_prompt=True
         
     | 
| 116 | 
         
            +
            )
         
     | 
| 117 | 
         
            +
            model_inputs = tokenizer([text], return_tensors=""pt"").to(model.device)
         
     | 
| 118 | 
         
            +
             
     | 
| 119 | 
         
            +
            generated_ids = model.generate(**model_inputs, max_new_tokens=512)
         
     | 
| 120 | 
         
            +
            generated_ids = [
         
     | 
| 121 | 
         
            +
                output_ids[len(input_ids) :]
         
     | 
| 122 | 
         
            +
                for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
         
     | 
| 123 | 
         
            +
            ]
         
     | 
| 124 | 
         
            +
             
     | 
| 125 | 
         
            +
            response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
         
     | 
| 126 | 
         
            +
            ```
         
     | 
| 127 | 
         
            +
             
     | 
| 128 | 
         
            +
            **Complex Usage Scenarios**
         
     | 
| 129 | 
         
            +
             
     | 
| 130 | 
         
            +
            For complex usage scenarios, we provide a [tablegpt-agent]((https://github.com/tablegpt/tablegpt-agent)) toolkit to help you more conveniently handle various types of tabular inputs.
         
     | 
| 131 | 
         
            +
             
     | 
| 132 | 
         
            +
            This agent is built on top of the `Langgraph` library and provides a user-friendly interface for interacting with `TableGPT2`.
         
     | 
| 133 | 
         
            +
             
     | 
| 134 | 
         
            +
             
     | 
| 135 | 
         
            +
            **Deployment**
         
     | 
| 136 | 
         
            +
             
     | 
| 137 | 
         
            +
            For deployment, we recommend using vLLM.
         
     | 
| 138 | 
         
            +
            * **Install vLLM**: You can install vLLM by running the following command.
         
     | 
| 139 | 
         
            +
              ```bash
         
     | 
| 140 | 
         
            +
              pip install ""vllm>=0.5.5""
         
     | 
| 141 | 
         
            +
              ```
         
     | 
| 142 | 
         
            +
            * **Model Deployment**: Use vLLM to deploy your model. For example, you can use the command to set up a server similar to openAI:
         
     | 
| 143 | 
         
            +
              ```bash
         
     | 
| 144 | 
         
            +
              python -m vllm.entrypoints.openai.api_server --served-model-name TableGPT2-7B --model path/to/weights
         
     | 
| 145 | 
         
            +
              ```
         
     | 
| 146 | 
         
            +
              Then you can access the Chat API by:
         
     | 
| 147 | 
         
            +
             
     | 
| 148 | 
         
            +
              ```bash
         
     | 
| 149 | 
         
            +
              curl http://localhost:8000/v1/chat/completions \
         
     | 
| 150 | 
         
            +
                  -H ""Content-Type: application/json"" \
         
     | 
| 151 | 
         
            +
                  -d '{
         
     | 
| 152 | 
         
            +
                  ""model"": ""TableGPT2-7B"",
         
     | 
| 153 | 
         
            +
                  ""messages"": [
         
     | 
| 154 | 
         
            +
                      {""role"": ""system"", ""content"": ""You are a helpful assistant.""},
         
     | 
| 155 | 
         
            +
                      {""role"": ""user"", ""content"": ""Hey, who are you?""}
         
     | 
| 156 | 
         
            +
                  ]
         
     | 
| 157 | 
         
            +
                  }'
         
     | 
| 158 | 
         
            +
              
         
     | 
| 159 | 
         
            +
              ```
         
     | 
| 160 | 
         
            +
              For more details about how to use TableGPT2, please refer to [our repository on GitHub](https://github.com/tablegpt/tablegpt-agent)
         
     | 
| 161 | 
         
            +
             
     | 
| 162 | 
         
            +
             
     | 
| 163 | 
         
            +
            **License**  
         
     | 
| 164 | 
         
            +
             
     | 
| 165 | 
         
            +
            TableGPT2-7B is under apache-2.0 license.
         
     | 
| 166 | 
         
            +
             
     | 
| 167 | 
         
            +
            <!-- The TableGPT2-7B license permits both research and commercial use, with further details available in the [GitHub repository](https://github.com/tablegpt/tablegpt-agent). -->
         
     | 
| 168 | 
         
            +
             
     | 
| 169 | 
         
            +
            **Research Paper**  
         
     | 
| 170 | 
         
            +
             
     | 
| 171 | 
         
            +
            TableGPT2-7B is introduced and validated in the paper ""[TableGPT2: A Large Multimodal Model with Tabular Data Integration](https://arxiv.org/abs/2411.02059)"" available on arXiv.
         
     | 
| 172 | 
         
            +
             
     | 
| 173 | 
         
            +
            **Where to send questions or comments about the model**  
         
     | 
| 174 | 
         
            +
             
     | 
| 175 | 
         
            +
            Inquiries and feedback are welcome at [j.zhao@zju.edu.cn](mailto:j.zhao@zju.edu.cn).
         
     | 
| 176 | 
         
            +
             
     | 
| 177 | 
         
            +
            ## Training Data
         
     | 
| 178 | 
         
            +
             
     | 
| 179 | 
         
            +
            **Overview**  
         
     | 
| 180 | 
         
            +
             
     | 
| 181 | 
         
            +
            Training for TableGPT2-7B involved more than 593,800 curated tables, over 86 billion tokens for continual pretraining (CPT) and the construction of over 2.36 million high-quality query-table-output tuples for supervised fine-tuning. This extensive dataset aims to meet the rigorous demands of modern applications involving structured or tabular data.
         
     | 
| 182 | 
         
            +
             
     | 
| 183 | 
         
            +
            **Data Freshness**  
         
     | 
| 184 | 
         
            +
             
     | 
| 185 | 
         
            +
            The training data has a cutoff of October 2024.
         
     | 
| 186 | 
         
            +
             
     | 
| 187 | 
         
            +
            ## Evaluation Results
         
     | 
| 188 | 
         
            +
             
     | 
| 189 | 
         
            +
            Evaluation has shown that TableGPT2-7B performs consistently well across benchmarks for tabular comprehension, code generation, and structured data reasoning, achieving a **35.20%** performance increase over comparable models on standard benchmarks and **49.32%** on BI-focused assessments. The RealTabBench benchmark further demonstrated the model’s robustness in handling unconventional tables and complex queries. Below, we present the results on public table-related benchmarks.
         
     | 
| 190 | 
         
            +
             
     | 
| 191 | 
         
            +
            | **Benchmark**                 | **Metric** | GPT-4o | TableLLM (Qwen2) | TableLLM (CodeQwen) | TableLLM (LLaMA3) | TableLLM (LLaMA3.1) | TableLLM (DeepSeek) | TableLLM-13B | DeepSeek-lite | Yi-Coder | Qwen2.5-Coder | Qwen2.5-Instruct | **TableGPT2-7B** | **TableGPT2-72B** |
         
     | 
| 192 | 
         
            +
            | ----------------------------- | ---------- | ------ | ---------------- | ------------------- | ----------------- | ------------------- | ------------------- | ------------ | ------------- | -------- | ------------- | ---------------- | -------------- | --------------- |
         
     | 
| 193 | 
         
            +
            | **Table Understanding**       |            |        |                  |                     |                   |                     |                     |              |               |          |               |                  |                |                 |
         
     | 
| 194 | 
         
            +
            | Col Type Annot.               | F1         | 31.75  | 10.10            | 5.71                | 1.47              | 1.59                | 6.04                | 12.70        | 20.58         | 5.38     | 32.59         | 22.19            | **85.88**      | 85.67           |
         
     | 
| 195 | 
         
            +
            | Relation Extract.             | F1         | 52.95  | 1.60             | 3.79                | 2.39              | 2.00                | 3.34                | 18.16        | 8.67          | 2.25     | 31.00         | 15.92            | **83.35**      | 79.50           |
         
     | 
| 196 | 
         
            +
            | Entity Linking                | Acc        | 90.80  | 47.10            | 39.70               | 0.20              | 0.60                | 15.50               | 66.25        | 70.15         | 41.75    | 71.70         | 82.25            | 92.00          | **93.30**       |
         
     | 
| 197 | 
         
            +
            | Row Pop.                      | MAP        | 53.40  | 2.20             | 5.14                | 1.93              | 6.23                | 3.13                | 14.25        | 1.20          | 1.00     | 13.23         | 12.30            | **59.97**      | 55.83           |
         
     | 
| 198 | 
         
            +
            | **Question Answering**        |            |        |                  |                     |                   |                     |                     |              |               |          |               |                  |                |                 |
         
     | 
| 199 | 
         
            +
            | HiTab                         | Exec Acc   | 48.40  | 11.74            | 0.00                | 0.00              | 0.00                | 39.08               | 6.30         | 0.76          | 0.00     | 1.70          | 10.73            | 70.27          | **75.57**       |
         
     | 
| 200 | 
         
            +
            | FetaQA                        | BLEU       | 21.70  | 12.24            | 8.69                | 2.42              | 3.10                | 7.94                | 10.83        | 15.08         | 11.17    | 13.00         | 16.91            | 28.97          | **32.25**       |
         
     | 
| 201 | 
         
            +
            | HybridQA                      | Acc        | 58.60  | 27.12            | 20.14               | 27.35             | 27.61               | 19.53               | 51.88        | 42.58         | 29.83    | 51.10         | 51.13            | 53.17          | **56.41**       |
         
     | 
| 202 | 
         
            +
            | WikiSQL                       | Acc        | 47.60  | 46.50            | 37.20               | 39.26             | 39.00               | 36.14               | 41.10        | 38.30         | 25.34    | 46.90         | 47.42            | 53.74          | **57.32**       |
         
     | 
| 203 | 
         
            +
            | WikiTQ                        | Acc        | 68.40  | 64.16            | 36.05               | 34.95             | 38.84               | 36.05               | 66.30        | 47.65         | 43.37    | **74.50**     | 68.55            | 61.42          | 71.45           |
         
     | 
| 204 | 
         
            +
            | **Fact Verification**         |            |        |                  |                     |                   |                     |                     |              |               |          |               |                  |                |                 |
         
     | 
| 205 | 
         
            +
            | TabFact                       | Acc        | 74.40  | 72.00            | 53.20               | 40.06             | 27.13               | 60.76               | 68.95        | 62.27         | 79.6     | 77.26         | 84.60            | 77.80          | **85.43**       |
         
     | 
| 206 | 
         
            +
            | FEVEROUS                      | Acc        | 71.60  | 20.10            | 46.90               | 51.50             | 42.30               | 18.39               | 21.45        | 7.80          | 38.10    | 60.70         | 63.30            | **78.05**      | 76.80           |
         
     | 
| 207 | 
         
            +
            | **Table to Text**             |            |        |                  |                     |                   |                     |                     |              |               |          |               |                  |                |                 |
         
     | 
| 208 | 
         
            +
            | ToTTo                         | BLEU       | 12.21  | 6.95             | 3.10                | 5.50              | 6.23                | 3.81                | 5.36         | 8.76          | 2.64     | 10.50         | 11.91            | 14.10          | **22.69**       |
         
     | 
| 209 | 
         
            +
            | **Natural Language to SQL**   |            |        |                  |                     |                   |                     |                     |              |               |          |               |                  |                |                 |
         
     | 
| 210 | 
         
            +
            | BIRD(dev)                     | Exec Acc   | -      | 9.13             | 7.37                | 1.83              | 2.48                | 0.39                | 0.72         | 25.10         | 24.19    | 27.18         | 18.97            | 31.42          | **38.40**       |
         
     | 
| 211 | 
         
            +
            | BIRD(dev-knowledge)           | Exec Acc   | -      | 15.45            | 18.19               | 3.39              | 3.72                | 0.39                | 1.83         | 36.51         | 39.96    | 42.96         | 31.42            | 49.28          | **60.76**       |
         
     | 
| 212 | 
         
            +
            | Spider(dev)                   | Exec Acc   | -      | 42.26            | 32.88               | 12.86             | 18.96               | 2.71                | 4.26         | 66.44         | 58.12    | 70.99         | 61.70            | 76.31          | **79.40**       |
         
     | 
| 213 | 
         
            +
            | Spider(test)                  | Exec Acc   | -      | 40.29            | 34.93               | 12.02             | 16.35               | 7.33                | 2.93         | 66.65         | 56.87    | 69.73         | 60.18            | 74.38          | **78.48**       |
         
     | 
| 214 | 
         
            +
            | **Holistic Table Evaluation** |            |        |                  |                     |                   |                     |                     |              |               |          |               |                  |                |                 |
         
     | 
| 215 | 
         
            +
            | TableBench                    | DP         | -      | 26.62            | 26.44               | 26.71             | 26.73               | 26.15               | 3.88         | 29.60         | 21.94    | 28.67         | 25.18            | 32.03          | **38.90**       |
         
     | 
| 216 | 
         
            +
            | TableBench                    | TCoT       | -      | 37.08            | 31.33               | 29.79             | 30.01               | 28.65               | 3.85         | 30.93         | 22.8     | 36.25         | 29.77            | 42.34          | **50.06**       |
         
     | 
| 217 | 
         
            +
            | TableBench                    | SCoT       | -      | 14.11            | 17.78               | 9.60              | 12.38               | 22.39               | 2.88         | 22.61         | 8.43     | 25.95         | 24.35            | 25.01          | **30.47**       |
         
     | 
| 218 | 
         
            +
            | TableBench                    | PoT@1      | -      | 21.05            | 26.39               | 31.96             | 25.80               | 28.39               | 2.94         | 10.90         | 11.36    | 16.15         | 22.58            | **33.52**      | 28.98           |
         
     | 
| 219 | 
         
            +
             
     | 
| 220 | 
         
            +
            ## Citation
         
     | 
| 221 | 
         
            +
             
     | 
| 222 | 
         
            +
            If you find our work helpful, please cite us by
         
     | 
| 223 | 
         
            +
             
     | 
| 224 | 
         
            +
            ```bibtex
         
     | 
| 225 | 
         
            +
            @misc{su2024tablegpt2largemultimodalmodel,
         
     | 
| 226 | 
         
            +
                  title={TableGPT2: A Large Multimodal Model with Tabular Data Integration}, 
         
     | 
| 227 | 
         
            +
                  author={Aofeng Su and Aowen Wang and Chao Ye and Chen Zhou and Ga Zhang and Guangcheng Zhu and Haobo Wang and Haokai Xu and Hao Chen and Haoze Li and Haoxuan Lan and Jiaming Tian and Jing Yuan and Junbo Zhao and Junlin Zhou and Kaizhe Shou and Liangyu Zha and Lin Long and Liyao Li and Pengzuo Wu and Qi Zhang and Qingyi Huang and Saisai Yang and Tao Zhang and Wentao Ye and Wufang Zhu and Xiaomeng Hu and Xijun Gu and Xinjie Sun and Xiang Li and Yuhang Yang and Zhiqing Xiao},
         
     | 
| 228 | 
         
            +
                  year={2024},
         
     | 
| 229 | 
         
            +
                  eprint={2411.02059},
         
     | 
| 230 | 
         
            +
                  archivePrefix={arXiv},
         
     | 
| 231 | 
         
            +
                  primaryClass={cs.LG},
         
     | 
| 232 | 
         
            +
                  url={https://arxiv.org/abs/2411.02059}, 
         
     | 
| 233 | 
         
            +
            }
         
     | 
| 234 | 
         
            +
            ```","{""id"": ""tablegpt/TableGPT2-7B"", ""author"": ""tablegpt"", ""sha"": ""c0ef9cb136c28f3617d51e1900e474ffb780037c"", ""last_modified"": ""2025-02-13 10:29:23+00:00"", ""created_at"": ""2024-11-01 01:35:22+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 21658, ""downloads_all_time"": null, ""likes"": 201, ""library_name"": null, ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""safetensors"", ""qwen2"", ""zh"", ""en"", ""arxiv:2411.02059"", ""base_model:Qwen/Qwen2.5-7B"", ""base_model:finetune:Qwen/Qwen2.5-7B"", ""license:apache-2.0"", ""region:us""], ""pipeline_tag"": null, ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- Qwen/Qwen2.5-7B\nlanguage:\n- zh\n- en\nlicense: apache-2.0"", ""widget_data"": null, ""model_index"": null, ""config"": {""architectures"": [""Qwen2ForCausalLM""], ""model_type"": ""qwen2"", ""tokenizer_config"": {""bos_token"": null, ""chat_template"": ""{%- if tools %}\n    {{- '<|im_start|>system\\n' }}\n    {%- if messages[0]['role'] == 'system' %}\n        {{- messages[0]['content'] }}\n    {%- else %}\n        {{- 'You are Qwen, created by Alibaba Cloud. You are a helpful assistant.' }}\n    {%- endif %}\n    {{- \""\\n\\n# Tools\\n\\nYou may call one or more functions to assist with the user query.\\n\\nYou are provided with function signatures within <tools></tools> XML tags:\\n<tools>\"" }}\n    {%- for tool in tools %}\n        {{- \""\\n\"" }}\n        {{- tool | tojson }}\n    {%- endfor %}\n    {{- \""\\n</tools>\\n\\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\\n<tool_call>\\n{\\\""name\\\"": <function-name>, \\\""arguments\\\"": <args-json-object>}\\n</tool_call><|im_end|>\\n\"" }}\n{%- else %}\n    {%- if messages[0]['role'] == 'system' %}\n        {{- '<|im_start|>system\\n' + messages[0]['content'] + '<|im_end|>\\n' }}\n    {%- else %}\n        {{- '<|im_start|>system\\nYou are Qwen, created by Alibaba Cloud. You are a helpful assistant.<|im_end|>\\n' }}\n    {%- endif %}\n{%- endif %}\n{%- for message in messages %}\n    {%- if (message.role == \""user\"") or (message.role == \""system\"" and not loop.first) or (message.role == \""assistant\"" and not message.tool_calls) %}\n        {{- '<|im_start|>' + message.role + '\\n' + message.content + '<|im_end|>' + '\\n' }}\n    {%- elif message.role == \""assistant\"" %}\n        {{- '<|im_start|>' + message.role }}\n        {%- if message.content %}\n            {{- '\\n' + message.content }}\n        {%- endif %}\n        {%- for tool_call in message.tool_calls %}\n            {%- if tool_call.function is defined %}\n                {%- set tool_call = tool_call.function %}\n            {%- endif %}\n            {{- '\\n<tool_call>\\n{\""name\"": \""' }}\n            {{- tool_call.name }}\n            {{- '\"", \""arguments\"": ' }}\n            {{- tool_call.arguments | tojson }}\n            {{- '}\\n</tool_call>' }}\n        {%- endfor %}\n        {{- '<|im_end|>\\n' }}\n    {%- elif message.role == \""tool\"" %}\n        {%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != \""tool\"") %}\n            {{- '<|im_start|>user' }}\n        {%- endif %}\n        {{- '\\n<tool_response>\\n' }}\n        {{- message.content }}\n        {{- '\\n</tool_response>' }}\n        {%- if loop.last or (messages[loop.index0 + 1].role != \""tool\"") %}\n            {{- '<|im_end|>\\n' }}\n        {%- endif %}\n    {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n    {{- '<|im_start|>assistant\\n' }}\n{%- endif %}\n"", ""eos_token"": ""<|im_end|>"", ""pad_token"": ""<|endoftext|>"", ""unk_token"": null}}, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='.gitignore', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='LICENSE', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='added_tokens.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='latest', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='merges.txt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00001-of-00004.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00002-of-00004.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00003-of-00004.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00004-of-00004.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='trainer_state.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='training_args.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vocab.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='zero_to_fp32.py', size=None, blob_id=None, lfs=None)""], ""spaces"": [""DexterSptizu/table-gpt-query-csv-file"", ""xinjiboy1004/tablegpt_test""], ""safetensors"": {""parameters"": {""BF16"": 7615616512}, ""total"": 7615616512}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-02-13 10:29:23+00:00"", ""cardData"": ""base_model:\n- Qwen/Qwen2.5-7B\nlanguage:\n- zh\n- en\nlicense: apache-2.0"", ""transformersInfo"": null, ""_id"": ""6724305a0b75fec22c333ce8"", ""modelId"": ""tablegpt/TableGPT2-7B"", ""usedStorage"": 15747289676}",0,,0,,0,"https://huggingface.co/mradermacher/TableGPT2-7B-GGUF, https://huggingface.co/FelisDwan/TableGPT2-7B-Q4_K_M-GGUF, https://huggingface.co/kaiserhui/TableGPT2-7B-Q4_K_M-GGUF, https://huggingface.co/mradermacher/TableGPT2-7B-i1-GGUF",4,,0,"DexterSptizu/table-gpt-query-csv-file, huggingface/InferenceSupport/discussions/new?title=tablegpt/TableGPT2-7B&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Btablegpt%2FTableGPT2-7B%5D(%2Ftablegpt%2FTableGPT2-7B)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, xinjiboy1004/tablegpt_test",3
         
     | 
    	
        TemporalNet_finetunes_20250426_014322.csv_finetunes_20250426_014322.csv
    ADDED
    
    | 
         @@ -0,0 +1,41 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
         
     | 
| 2 | 
         
            +
            CiaraRowles/TemporalNet,"---
         
     | 
| 3 | 
         
            +
            license: openrail
         
     | 
| 4 | 
         
            +
            tags:
         
     | 
| 5 | 
         
            +
            - controlnet
         
     | 
| 6 | 
         
            +
            - stable-diffusion
         
     | 
| 7 | 
         
            +
            - diffusers
         
     | 
| 8 | 
         
            +
            base_model: runwayml/stable-diffusion-v1-5
         
     | 
| 9 | 
         
            +
            ---
         
     | 
| 10 | 
         
            +
            Introducing the Beta Version of TemporalNet
         
     | 
| 11 | 
         
            +
             
     | 
| 12 | 
         
            +
            TemporalNet is a ControlNet model designed to enhance the temporal consistency of generated outputs, as demonstrated in this example: https://twitter.com/CiaraRowles1/status/1637486561917906944. While it does not eliminate all flickering, it significantly reduces it, particularly at higher denoise levels. For optimal results, it is recommended to use TemporalNet in combination with other methods.
         
     | 
| 13 | 
         
            +
             
     | 
| 14 | 
         
            +
            Instructions for Use:
         
     | 
| 15 | 
         
            +
             
     | 
| 16 | 
         
            +
            1) Add the model ""diff_control_sd15_temporalnet_fp16.safetensors"" to your models folder in the ControlNet extension in Automatic1111's Web UI.
         
     | 
| 17 | 
         
            +
             
     | 
| 18 | 
         
            +
            2) Create a folder that contains:
         
     | 
| 19 | 
         
            +
             
     | 
| 20 | 
         
            +
            - A subfolder named ""Input_Images"" with the input frames
         
     | 
| 21 | 
         
            +
            - A PNG file called ""init.png"" that is pre-stylized in your desired style
         
     | 
| 22 | 
         
            +
            - The ""temporalvideo.py"" script
         
     | 
| 23 | 
         
            +
             
     | 
| 24 | 
         
            +
            3) Customize the ""temporalvideo.py"" script according to your preferences, such as the image resolution, prompt, and control net settings.
         
     | 
| 25 | 
         
            +
             
     | 
| 26 | 
         
            +
            4) Launch Automatic1111's Web UI with the --api setting enabled.
         
     | 
| 27 | 
         
            +
             
     | 
| 28 | 
         
            +
            5) Execute the Python script.
         
     | 
| 29 | 
         
            +
             
     | 
| 30 | 
         
            +
            *Please note that the ""init.png"" image will not significantly influence the style of the output video. Its primary purpose is to prevent a drastic change in aesthetics during the first few frames.*
         
     | 
| 31 | 
         
            +
             
     | 
| 32 | 
         
            +
            Also, I highly recommend you use this in conjunction with the hed model, the settings are already in the script.
         
     | 
| 33 | 
         
            +
             
     | 
| 34 | 
         
            +
            ToDo:
         
     | 
| 35 | 
         
            +
             
     | 
| 36 | 
         
            +
            Write an Extension for the web ui.
         
     | 
| 37 | 
         
            +
             
     | 
| 38 | 
         
            +
            Write a feature that automatically generates an ""init.png"" image if none is provided.
         
     | 
| 39 | 
         
            +
             
     | 
| 40 | 
         
            +
             ̶C̶h̶a̶n̶g̶e̶ ̶t̶h̶e̶ ̶e̶x̶t̶e̶n̶s̶i̶o̶n̶ ̶t̶o̶ ̶.̶s̶a̶f̶e̶t̶e̶n̶s̶o̶r̶s̶ ̶a̶n̶d̶ ̶i̶n̶v̶e̶s̶t̶i̶g̶a̶t̶e̶ ̶c̶o̶m̶p̶r̶e̶s̶s̶i̶o̶n̶.̶
         
     | 
| 41 | 
         
            +
            ","{""id"": ""CiaraRowles/TemporalNet"", ""author"": ""CiaraRowles"", ""sha"": ""d47bd9b63c1717bdfecda4e75653dcf0d7219f85"", ""last_modified"": ""2023-04-05 22:59:34+00:00"", ""created_at"": ""2023-03-23 22:31:31+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 11430, ""downloads_all_time"": null, ""likes"": 353, ""library_name"": ""diffusers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""diffusers"", ""safetensors"", ""controlnet"", ""stable-diffusion"", ""base_model:runwayml/stable-diffusion-v1-5"", ""base_model:adapter:runwayml/stable-diffusion-v1-5"", ""license:openrail"", ""region:us""], ""pipeline_tag"": null, ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: runwayml/stable-diffusion-v1-5\nlicense: openrail\ntags:\n- controlnet\n- stable-diffusion\n- diffusers"", ""widget_data"": null, ""model_index"": null, ""config"": {}, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='cldm_v15.yaml', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='diff_control_sd15_temporalnet_fp16.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='diffusion_pytorch_model.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='diffusion_pytorch_model.fp16.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='diffusion_pytorch_model.fp16.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='diffusion_pytorch_model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='temporalnetv3.ckpt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='temporalvideo.py', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2023-04-05 22:59:34+00:00"", ""cardData"": ""base_model: runwayml/stable-diffusion-v1-5\nlicense: openrail\ntags:\n- controlnet\n- stable-diffusion\n- diffusers"", ""transformersInfo"": null, ""_id"": ""641cd343e428c7ddcbe089ca"", ""modelId"": ""CiaraRowles/TemporalNet"", ""usedStorage"": 17746418164}",0,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=CiaraRowles/TemporalNet&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BCiaraRowles%2FTemporalNet%5D(%2FCiaraRowles%2FTemporalNet)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
         
     | 
    	
        Wizard-Vicuna-13B-Uncensored_finetunes_20250426_171734.csv_finetunes_20250426_171734.csv
    ADDED
    
    | 
         @@ -0,0 +1,38 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
         
     | 
| 2 | 
         
            +
            cognitivecomputations/Wizard-Vicuna-13B-Uncensored,"---
         
     | 
| 3 | 
         
            +
            license: other
         
     | 
| 4 | 
         
            +
            datasets:
         
     | 
| 5 | 
         
            +
            - ehartford/wizard_vicuna_70k_unfiltered
         
     | 
| 6 | 
         
            +
            language:
         
     | 
| 7 | 
         
            +
            - en
         
     | 
| 8 | 
         
            +
            tags:
         
     | 
| 9 | 
         
            +
            - uncensored
         
     | 
| 10 | 
         
            +
            ---
         
     | 
| 11 | 
         
            +
             
     | 
| 12 | 
         
            +
            This is [wizard-vicuna-13b](https://huggingface.co/junelee/wizard-vicuna-13b) trained with a subset of the dataset - responses that contained alignment / moralizing were removed. The intent is to train a WizardLM that doesn't have alignment built-in, so that alignment (of any sort) can be added separately with for example with a RLHF LoRA.
         
     | 
| 13 | 
         
            +
             
     | 
| 14 | 
         
            +
            Shout out to the open source AI/ML community, and everyone who helped me out.
         
     | 
| 15 | 
         
            +
             
     | 
| 16 | 
         
            +
            Note:  
         
     | 
| 17 | 
         
            +
             
     | 
| 18 | 
         
            +
            An uncensored model has no guardrails.  
         
     | 
| 19 | 
         
            +
             
     | 
| 20 | 
         
            +
            You are responsible for anything you do with the model, just as you are responsible for anything you do with any dangerous object such as a knife, gun, lighter, or car.
         
     | 
| 21 | 
         
            +
             
     | 
| 22 | 
         
            +
            Publishing anything this model generates is the same as publishing it yourself.
         
     | 
| 23 | 
         
            +
             
     | 
| 24 | 
         
            +
            You are responsible for the content you publish, and you cannot blame the model any more than you can blame the knife, gun, lighter, or car for what you do with it.
         
     | 
| 25 | 
         
            +
            # [Open LLM Leaderboard Evaluation Results](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)
         
     | 
| 26 | 
         
            +
            Detailed results can be found [here](https://huggingface.co/datasets/open-llm-leaderboard/details_ehartford__Wizard-Vicuna-13B-Uncensored)
         
     | 
| 27 | 
         
            +
             
     | 
| 28 | 
         
            +
            | Metric                | Value                     |
         
     | 
| 29 | 
         
            +
            |-----------------------|---------------------------|
         
     | 
| 30 | 
         
            +
            | Avg.                  | 49.52   |
         
     | 
| 31 | 
         
            +
            | ARC (25-shot)         | 58.96          |
         
     | 
| 32 | 
         
            +
            | HellaSwag (10-shot)   | 81.95    |
         
     | 
| 33 | 
         
            +
            | MMLU (5-shot)         | 47.92         |
         
     | 
| 34 | 
         
            +
            | TruthfulQA (0-shot)   | 51.69   |
         
     | 
| 35 | 
         
            +
            | Winogrande (5-shot)   | 75.69   |
         
     | 
| 36 | 
         
            +
            | GSM8K (5-shot)        | 8.64        |
         
     | 
| 37 | 
         
            +
            | DROP (3-shot)         | 21.79         |
         
     | 
| 38 | 
         
            +
            ","{""id"": ""cognitivecomputations/Wizard-Vicuna-13B-Uncensored"", ""author"": ""cognitivecomputations"", ""sha"": ""682f6583699ecd916a7d106393f68c44a1c7abf2"", ""last_modified"": ""2023-11-18 03:51:02+00:00"", ""created_at"": ""2023-05-11 00:26:57+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 626, ""downloads_all_time"": null, ""likes"": 299, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""pytorch"", ""llama"", ""text-generation"", ""uncensored"", ""en"", ""dataset:ehartford/wizard_vicuna_70k_unfiltered"", ""license:other"", ""autotrain_compatible"", ""text-generation-inference"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""datasets:\n- ehartford/wizard_vicuna_70k_unfiltered\nlanguage:\n- en\nlicense: other\ntags:\n- uncensored"", ""widget_data"": [{""text"": ""My name is Julien and I like to""}, {""text"": ""I like traveling by train because""}, {""text"": ""Paris is an amazing place to visit,""}, {""text"": ""Once upon a time,""}], ""model_index"": null, ""config"": {""architectures"": [""LlamaForCausalLM""], ""model_type"": ""llama"", ""tokenizer_config"": {""bos_token"": {""__type"": ""AddedToken"", ""content"": ""<s>"", ""lstrip"": false, ""normalized"": true, ""rstrip"": false, ""single_word"": false}, ""eos_token"": {""__type"": ""AddedToken"", ""content"": ""</s>"", ""lstrip"": false, ""normalized"": true, ""rstrip"": false, ""single_word"": false}, ""pad_token"": null, ""unk_token"": {""__type"": ""AddedToken"", ""content"": ""<unk>"", ""lstrip"": false, ""normalized"": true, ""rstrip"": false, ""single_word"": false}}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='.gitignore', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00001-of-00006.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00002-of-00006.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00003-of-00006.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00004-of-00006.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00005-of-00006.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00006-of-00006.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model.bin.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.model', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='trainer_state.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='training_args.bin', size=None, blob_id=None, lfs=None)""], ""spaces"": [""h2oai/h2ogpt-chatbot"", ""h2oai/h2ogpt-chatbot2"", ""Intel/low_bit_open_llm_leaderboard"", ""BAAI/open_cn_llm_leaderboard"", ""gsaivinay/open_llm_leaderboard"", ""GTBench/GTBench"", ""Vikhrmodels/small-shlepa-lb"", ""kz-transformers/kaz-llm-lb"", ""felixz/open_llm_leaderboard"", ""OPTML-Group/UnlearnCanvas-Benchmark"", ""BAAI/open_flageval_vlm_leaderboard"", ""Onekee/ehartford-Wizard-Vicuna-13B-Uncensored"", ""b1sheng/kg_llm_leaderboard_test"", ""neubla/neubla-llm-evaluation-board"", ""lapsapking/h2ogpt-chatbot"", ""rodrigomasini/data_only_open_llm_leaderboard"", ""Docfile/open_llm_leaderboard"", ""his0/h2ogpt-chatbot"", ""atimughal662/InfoFusion"", ""m1thrandir/ehartford-Wizard-Vicuna-13B-Uncensored"", ""Tj/ehartford-Wizard-Vicuna-13B-Uncensored"", ""acetaminophenPT/ehartford-Wizard-Vicuna-13B-Uncensored"", ""FroggyQc/ehartford-Wizard-Vicuna-13B-Uncensored"", ""feuillet1234/ehartford-Wizard-Vicuna-13B-Uncensored"", ""pgmcb99/ehartford-Wizard-Vicuna-13B-Uncensored"", ""JustMe4Real/ehartford-Wizard-Vicuna-13B-Uncensored"", ""Soxakore/ehartford-Wizard-Vicuna-13B-Uncensored"", ""akashkj/H2OGPT"", ""ZeroGravityYz/ehartford-Wizard-Vicuna-13B-Uncensored"", ""Joker55/ehartford-Wizard-Vicuna-13B-Uncensored"", ""Nonoxx/ehartford-Wizard-Vicuna-13B-Uncensored"", ""ariel0330/h2osiri"", ""neilgiovanni/ehartford-Wizard-Vicuna-13B-Uncensored"", ""jdxp/ehartford-Wizard-Vicuna-13B-Uncensored"", ""ClaudiuHNS/ehartford-Wizard-Vicuna-13B-Uncensored"", ""nabdtran/ehartford-Wizard-Vicuna-13B-Uncensored"", ""ccoreilly/aigua-xat"", ""Sambhavnoobcoder/h2ogpt-chatbot"", ""martinkropf/ehartford-Wizard-Vicuna-13B-Uncensored"", ""El-diota/ehartford-Wizard-Vicuna-13B-Uncensored"", ""Pritam69/ehartford-Wizard-Vicuna-13B-Uncensored"", ""intellecttech/ehartford-Wizard-Vicuna-13B-Uncensored"", ""WEARYy/ehartford-Wizard-Vicuna-13B-Uncensored"", ""sarayating/ehartford-Wizard-Vicuna-13B-Uncensored"", ""Iciser/cognitivecomputations-Wizard-Vicuna-13B-Uncensored"", ""Yarumo/ehartford-Wizard-Vicuna-13B-Uncensored"", ""ardaerturk/cognitivecomputations-Wizard-Vicuna-13B-Uncensored"", ""gptaisolutions/cognitivecomputations-Wizard-Vicuna-13B-Uncensored"", ""smothiki/open_llm_leaderboard"", ""pngwn/open_llm_leaderboard"", ""theSscorpio/ehartford-Wizard-Vicuna-13B-Uncensored"", ""theSscorpio/ehartford-Wizard-Vicuna-13B-Uncensored2"", ""theSscorpio/ehartford-Wizard-Vicuna-13B-Uncensored3"", ""pngwn/open_llm_leaderboard_two"", ""iblfe/test"", ""Jaaarrr1/ehartford-Wizard-Vicuna-13B-Uncensored"", ""AnonymousSub/Ayurveda_Chatbot"", ""K00B404/Research-chatbot"", ""0x1668/open_llm_leaderboard"", ""pngwn/open_llm_leaderboard-check"", ""asir0z/open_llm_leaderboard"", ""kelvin-t-lu/chatbot"", ""ALexAbes/cognitivecomputations-Wizard-Vicuna-13B-Uncensored"", ""kbmlcoding/open_llm_leaderboard_free"", ""dancers/ehartford-Wizard-Vicuna-13B-Uncensored"", ""ALIOJ/ehartford-Wizard-Vicuna-13B-Uncensored"", ""Stoxe/ehartford-Wizard-Vicuna-13B-Uncensored"", ""aichampions/open_llm_leaderboard"", ""Adeco/open_llm_leaderboard"", ""tmzh/chatbot-using-rag-on-faq"", ""anirudh937/open_llm_leaderboard"", ""smothiki/open_llm_leaderboard2"", ""cw332/h2ogpt-chatbot"", ""mjalg/IFEvalTR"", ""abugaber/test""], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2023-11-18 03:51:02+00:00"", ""cardData"": ""datasets:\n- ehartford/wizard_vicuna_70k_unfiltered\nlanguage:\n- en\nlicense: other\ntags:\n- uncensored"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""_id"": ""645c365111b04b05ad07460d"", ""modelId"": ""cognitivecomputations/Wizard-Vicuna-13B-Uncensored"", ""usedStorage"": 156191236636}",0,,0,,0,"https://huggingface.co/TheBloke/Wizard-Vicuna-13B-Uncensored-GPTQ, https://huggingface.co/TheBloke/Wizard-Vicuna-13B-Uncensored-GGUF, https://huggingface.co/second-state/Wizard-Vicuna-13B-Uncensored-GGUF, https://huggingface.co/mradermacher/Wizard-Vicuna-13B-Uncensored-i1-GGUF, https://huggingface.co/TheBloke/Wizard-Vicuna-13B-Uncensored-AWQ, https://huggingface.co/mradermacher/Wizard-Vicuna-13B-Uncensored-GGUF",6,,0,"BAAI/open_cn_llm_leaderboard, BAAI/open_flageval_vlm_leaderboard, GTBench/GTBench, Intel/low_bit_open_llm_leaderboard, OPTML-Group/UnlearnCanvas-Benchmark, Vikhrmodels/small-shlepa-lb, dancers/ehartford-Wizard-Vicuna-13B-Uncensored, gsaivinay/open_llm_leaderboard, h2oai/h2ogpt-chatbot, h2oai/h2ogpt-chatbot2, huggingface/InferenceSupport/discussions/new?title=cognitivecomputations/Wizard-Vicuna-13B-Uncensored&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bcognitivecomputations%2FWizard-Vicuna-13B-Uncensored%5D(%2Fcognitivecomputations%2FWizard-Vicuna-13B-Uncensored)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, kz-transformers/kaz-llm-lb, lapsapking/h2ogpt-chatbot",13
         
     | 
    	
        Wizard-Vicuna-30B-Uncensored-GPTQ_finetunes_20250425_165642.csv_finetunes_20250425_165642.csv
    ADDED
    
    | 
         @@ -0,0 +1,325 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
         
     | 
| 2 | 
         
            +
            TheBloke/Wizard-Vicuna-30B-Uncensored-GPTQ,"---
         
     | 
| 3 | 
         
            +
            language:
         
     | 
| 4 | 
         
            +
            - en
         
     | 
| 5 | 
         
            +
            license: other
         
     | 
| 6 | 
         
            +
            tags:
         
     | 
| 7 | 
         
            +
            - uncensored
         
     | 
| 8 | 
         
            +
            datasets:
         
     | 
| 9 | 
         
            +
            - ehartford/wizard_vicuna_70k_unfiltered
         
     | 
| 10 | 
         
            +
            model_name: Wizard Vicuna 30B Uncensored
         
     | 
| 11 | 
         
            +
            base_model: ehartford/Wizard-Vicuna-30B-Uncensored
         
     | 
| 12 | 
         
            +
            inference: false
         
     | 
| 13 | 
         
            +
            model_creator: Eric Hartford
         
     | 
| 14 | 
         
            +
            model_type: llama
         
     | 
| 15 | 
         
            +
            prompt_template: 'A chat between a curious user and an artificial intelligence assistant.
         
     | 
| 16 | 
         
            +
              The assistant gives helpful, detailed, and polite answers to the user''s questions.
         
     | 
| 17 | 
         
            +
              USER: {prompt} ASSISTANT:
         
     | 
| 18 | 
         
            +
             
     | 
| 19 | 
         
            +
              '
         
     | 
| 20 | 
         
            +
            quantized_by: TheBloke
         
     | 
| 21 | 
         
            +
            ---
         
     | 
| 22 | 
         
            +
             
     | 
| 23 | 
         
            +
            <!-- header start -->
         
     | 
| 24 | 
         
            +
            <!-- 200823 -->
         
     | 
| 25 | 
         
            +
            <div style=""width: auto; margin-left: auto; margin-right: auto"">
         
     | 
| 26 | 
         
            +
            <img src=""https://i.imgur.com/EBdldam.jpg"" alt=""TheBlokeAI"" style=""width: 100%; min-width: 400px; display: block; margin: auto;"">
         
     | 
| 27 | 
         
            +
            </div>
         
     | 
| 28 | 
         
            +
            <div style=""display: flex; justify-content: space-between; width: 100%;"">
         
     | 
| 29 | 
         
            +
                <div style=""display: flex; flex-direction: column; align-items: flex-start;"">
         
     | 
| 30 | 
         
            +
                    <p style=""margin-top: 0.5em; margin-bottom: 0em;""><a href=""https://discord.gg/theblokeai"">Chat & support: TheBloke's Discord server</a></p>
         
     | 
| 31 | 
         
            +
                </div>
         
     | 
| 32 | 
         
            +
                <div style=""display: flex; flex-direction: column; align-items: flex-end;"">
         
     | 
| 33 | 
         
            +
                    <p style=""margin-top: 0.5em; margin-bottom: 0em;""><a href=""https://www.patreon.com/TheBlokeAI"">Want to contribute? TheBloke's Patreon page</a></p>
         
     | 
| 34 | 
         
            +
                </div>
         
     | 
| 35 | 
         
            +
            </div>
         
     | 
| 36 | 
         
            +
            <div style=""text-align:center; margin-top: 0em; margin-bottom: 0em""><p style=""margin-top: 0.25em; margin-bottom: 0em;"">TheBloke's LLM work is generously supported by a grant from <a href=""https://a16z.com"">andreessen horowitz (a16z)</a></p></div>
         
     | 
| 37 | 
         
            +
            <hr style=""margin-top: 1.0em; margin-bottom: 1.0em;"">
         
     | 
| 38 | 
         
            +
            <!-- header end -->
         
     | 
| 39 | 
         
            +
             
     | 
| 40 | 
         
            +
            # Wizard Vicuna 30B Uncensored - GPTQ
         
     | 
| 41 | 
         
            +
            - Model creator: [Eric Hartford](https://huggingface.co/ehartford)
         
     | 
| 42 | 
         
            +
            - Original model: [Wizard Vicuna 30B Uncensored](https://huggingface.co/ehartford/Wizard-Vicuna-30B-Uncensored)
         
     | 
| 43 | 
         
            +
             
     | 
| 44 | 
         
            +
            <!-- description start -->
         
     | 
| 45 | 
         
            +
            ## Description
         
     | 
| 46 | 
         
            +
             
     | 
| 47 | 
         
            +
            This repo contains GPTQ model files for [Eric Hartford's Wizard-Vicuna-30B-Uncensored](https://huggingface.co/ehartford/Wizard-Vicuna-30B-Uncensored).
         
     | 
| 48 | 
         
            +
             
     | 
| 49 | 
         
            +
            Multiple GPTQ parameter permutations are provided; see Provided Files below for details of the options provided, their parameters, and the software used to create them.
         
     | 
| 50 | 
         
            +
             
     | 
| 51 | 
         
            +
            <!-- description end -->
         
     | 
| 52 | 
         
            +
            <!-- repositories-available start -->
         
     | 
| 53 | 
         
            +
            ## Repositories available
         
     | 
| 54 | 
         
            +
             
     | 
| 55 | 
         
            +
            * [AWQ model(s) for GPU inference.](https://huggingface.co/TheBloke/Wizard-Vicuna-30B-Uncensored-AWQ)
         
     | 
| 56 | 
         
            +
            * [GPTQ models for GPU inference, with multiple quantisation parameter options.](https://huggingface.co/TheBloke/Wizard-Vicuna-30B-Uncensored-GPTQ)
         
     | 
| 57 | 
         
            +
            * [2, 3, 4, 5, 6 and 8-bit GGUF models for CPU+GPU inference](https://huggingface.co/TheBloke/Wizard-Vicuna-30B-Uncensored-GGUF)
         
     | 
| 58 | 
         
            +
            * [Eric Hartford's original unquantised fp16 model in pytorch format, for GPU inference and for further conversions](https://huggingface.co/TheBloke/Wizard-Vicuna-30B-Uncensored-fp16)
         
     | 
| 59 | 
         
            +
            <!-- repositories-available end -->
         
     | 
| 60 | 
         
            +
             
     | 
| 61 | 
         
            +
            <!-- prompt-template start -->
         
     | 
| 62 | 
         
            +
            ## Prompt template: Vicuna
         
     | 
| 63 | 
         
            +
             
     | 
| 64 | 
         
            +
            ```
         
     | 
| 65 | 
         
            +
            A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions. USER: {prompt} ASSISTANT:
         
     | 
| 66 | 
         
            +
             
     | 
| 67 | 
         
            +
            ```
         
     | 
| 68 | 
         
            +
             
     | 
| 69 | 
         
            +
            <!-- prompt-template end -->
         
     | 
| 70 | 
         
            +
             
     | 
| 71 | 
         
            +
             
     | 
| 72 | 
         
            +
            <!-- README_GPTQ.md-provided-files start -->
         
     | 
| 73 | 
         
            +
            ## Provided files and GPTQ parameters
         
     | 
| 74 | 
         
            +
             
     | 
| 75 | 
         
            +
            Multiple quantisation parameters are provided, to allow you to choose the best one for your hardware and requirements.
         
     | 
| 76 | 
         
            +
             
     | 
| 77 | 
         
            +
            Each separate quant is in a different branch.  See below for instructions on fetching from different branches.
         
     | 
| 78 | 
         
            +
             
     | 
| 79 | 
         
            +
            All recent GPTQ files are made with AutoGPTQ, and all files in non-main branches are made with AutoGPTQ. Files in the `main` branch which were uploaded before August 2023 were made with GPTQ-for-LLaMa.
         
     | 
| 80 | 
         
            +
             
     | 
| 81 | 
         
            +
            <details>
         
     | 
| 82 | 
         
            +
              <summary>Explanation of GPTQ parameters</summary>
         
     | 
| 83 | 
         
            +
             
     | 
| 84 | 
         
            +
            - Bits: The bit size of the quantised model.
         
     | 
| 85 | 
         
            +
            - GS: GPTQ group size. Higher numbers use less VRAM, but have lower quantisation accuracy. ""None"" is the lowest possible value.
         
     | 
| 86 | 
         
            +
            - Act Order: True or False. Also known as `desc_act`. True results in better quantisation accuracy. Some GPTQ clients have had issues with models that use Act Order plus Group Size, but this is generally resolved now.
         
     | 
| 87 | 
         
            +
            - Damp %: A GPTQ parameter that affects how samples are processed for quantisation. 0.01 is default, but 0.1 results in slightly better accuracy.
         
     | 
| 88 | 
         
            +
            - GPTQ dataset: The dataset used for quantisation. Using a dataset more appropriate to the model's training can improve quantisation accuracy. Note that the GPTQ dataset is not the same as the dataset used to train the model - please refer to the original model repo for details of the training dataset(s).
         
     | 
| 89 | 
         
            +
            - Sequence Length: The length of the dataset sequences used for quantisation. Ideally this is the same as the model sequence length. For some very long sequence models (16+K), a lower sequence length may have to be used.  Note that a lower sequence length does not limit the sequence length of the quantised model. It only impacts the quantisation accuracy on longer inference sequences.
         
     | 
| 90 | 
         
            +
            - ExLlama Compatibility: Whether this file can be loaded with ExLlama, which currently only supports Llama models in 4-bit.
         
     | 
| 91 | 
         
            +
             
     | 
| 92 | 
         
            +
            </details>
         
     | 
| 93 | 
         
            +
             
     | 
| 94 | 
         
            +
            | Branch | Bits | GS | Act Order | Damp % | GPTQ Dataset | Seq Len | Size | ExLlama | Desc |
         
     | 
| 95 | 
         
            +
            | ------ | ---- | -- | --------- | ------ | ------------ | ------- | ---- | ------- | ---- |
         
     | 
| 96 | 
         
            +
            | [main](https://huggingface.co/TheBloke/Wizard-Vicuna-30B-Uncensored-GPTQ/tree/main) | 4 | None | Yes | 0.01 | [wikitext](https://huggingface.co/datasets/wikitext/viewer/wikitext-2-v1/test) | 2048 | 16.94 GB | Yes | 4-bit, with Act Order. No group size, to lower VRAM requirements. | 
         
     | 
| 97 | 
         
            +
            | [gptq-4bit-32g-actorder_True](https://huggingface.co/TheBloke/Wizard-Vicuna-30B-Uncensored-GPTQ/tree/gptq-4bit-32g-actorder_True) | 4 | 32 | Yes | 0.01 | [wikitext](https://huggingface.co/datasets/wikitext/viewer/wikitext-2-v1/test) | 2048 | 19.44 GB | Yes | 4-bit, with Act Order and group size 32g. Gives highest possible inference quality, with maximum VRAM usage. | 
         
     | 
| 98 | 
         
            +
            | [gptq-4bit-64g-actorder_True](https://huggingface.co/TheBloke/Wizard-Vicuna-30B-Uncensored-GPTQ/tree/gptq-4bit-64g-actorder_True) | 4 | 64 | Yes | 0.01 | [wikitext](https://huggingface.co/datasets/wikitext/viewer/wikitext-2-v1/test) | 2048 | 18.18 GB | Yes | 4-bit, with Act Order and group size 64g. Uses less VRAM than 32g, but with slightly lower accuracy. | 
         
     | 
| 99 | 
         
            +
            | [gptq-4bit-128g-actorder_True](https://huggingface.co/TheBloke/Wizard-Vicuna-30B-Uncensored-GPTQ/tree/gptq-4bit-128g-actorder_True) | 4 | 128 | Yes | 0.01 | [wikitext](https://huggingface.co/datasets/wikitext/viewer/wikitext-2-v1/test) | 2048 | 17.55 GB | Yes | 4-bit, with Act Order and group size 128g. Uses even less VRAM than 64g, but with slightly lower accuracy. | 
         
     | 
| 100 | 
         
            +
            | [gptq-8bit--1g-actorder_True](https://huggingface.co/TheBloke/Wizard-Vicuna-30B-Uncensored-GPTQ/tree/gptq-8bit--1g-actorder_True) | 8 | None | Yes | 0.01 | [wikitext](https://huggingface.co/datasets/wikitext/viewer/wikitext-2-v1/test) | 2048 | 32.99 GB | No | 8-bit, with Act Order. No group size, to lower VRAM requirements. | 
         
     | 
| 101 | 
         
            +
            | [gptq-8bit-128g-actorder_False](https://huggingface.co/TheBloke/Wizard-Vicuna-30B-Uncensored-GPTQ/tree/gptq-8bit-128g-actorder_False) | 8 | 128 | No | 0.01 | [wikitext](https://huggingface.co/datasets/wikitext/viewer/wikitext-2-v1/test) | 2048 | 33.73 GB | No | 8-bit, with group size 128g for higher inference quality and without Act Order to improve AutoGPTQ speed. | 
         
     | 
| 102 | 
         
            +
            | [gptq-3bit--1g-actorder_True](https://huggingface.co/TheBloke/Wizard-Vicuna-30B-Uncensored-GPTQ/tree/gptq-3bit--1g-actorder_True) | 3 | None | Yes | 0.01 | [wikitext](https://huggingface.co/datasets/wikitext/viewer/wikitext-2-v1/test) | 2048 | 12.92 GB | No | 3-bit, with Act Order and no group size. Lowest possible VRAM requirements. May be lower quality than 3-bit 128g. | 
         
     | 
| 103 | 
         
            +
            | [gptq-3bit-128g-actorder_False](https://huggingface.co/TheBloke/Wizard-Vicuna-30B-Uncensored-GPTQ/tree/gptq-3bit-128g-actorder_False) | 3 | 128 | No | 0.01 | [wikitext](https://huggingface.co/datasets/wikitext/viewer/wikitext-2-v1/test) | 2048 | 13.51 GB | No | 3-bit, with group size 128g but no act-order. Slightly higher VRAM requirements than 3-bit None. |
         
     | 
| 104 | 
         
            +
             
     | 
| 105 | 
         
            +
            <!-- README_GPTQ.md-provided-files end -->
         
     | 
| 106 | 
         
            +
             
     | 
| 107 | 
         
            +
            <!-- README_GPTQ.md-download-from-branches start -->
         
     | 
| 108 | 
         
            +
            ## How to download from branches
         
     | 
| 109 | 
         
            +
             
     | 
| 110 | 
         
            +
            - In text-generation-webui, you can add `:branch` to the end of the download name, eg `TheBloke/Wizard-Vicuna-30B-Uncensored-GPTQ:main`
         
     | 
| 111 | 
         
            +
            - With Git, you can clone a branch with:
         
     | 
| 112 | 
         
            +
            ```
         
     | 
| 113 | 
         
            +
            git clone --single-branch --branch main https://huggingface.co/TheBloke/Wizard-Vicuna-30B-Uncensored-GPTQ
         
     | 
| 114 | 
         
            +
            ```
         
     | 
| 115 | 
         
            +
            - In Python Transformers code, the branch is the `revision` parameter; see below.
         
     | 
| 116 | 
         
            +
            <!-- README_GPTQ.md-download-from-branches end -->
         
     | 
| 117 | 
         
            +
            <!-- README_GPTQ.md-text-generation-webui start -->
         
     | 
| 118 | 
         
            +
            ## How to easily download and use this model in [text-generation-webui](https://github.com/oobabooga/text-generation-webui).
         
     | 
| 119 | 
         
            +
             
     | 
| 120 | 
         
            +
            Please make sure you're using the latest version of [text-generation-webui](https://github.com/oobabooga/text-generation-webui).
         
     | 
| 121 | 
         
            +
             
     | 
| 122 | 
         
            +
            It is strongly recommended to use the text-generation-webui one-click-installers unless you're sure you know how to make a manual install.
         
     | 
| 123 | 
         
            +
             
     | 
| 124 | 
         
            +
            1. Click the **Model tab**.
         
     | 
| 125 | 
         
            +
            2. Under **Download custom model or LoRA**, enter `TheBloke/Wizard-Vicuna-30B-Uncensored-GPTQ`.
         
     | 
| 126 | 
         
            +
              - To download from a specific branch, enter for example `TheBloke/Wizard-Vicuna-30B-Uncensored-GPTQ:main`
         
     | 
| 127 | 
         
            +
              - see Provided Files above for the list of branches for each option.
         
     | 
| 128 | 
         
            +
            3. Click **Download**.
         
     | 
| 129 | 
         
            +
            4. The model will start downloading. Once it's finished it will say ""Done"".
         
     | 
| 130 | 
         
            +
            5. In the top left, click the refresh icon next to **Model**.
         
     | 
| 131 | 
         
            +
            6. In the **Model** dropdown, choose the model you just downloaded: `Wizard-Vicuna-30B-Uncensored-GPTQ`
         
     | 
| 132 | 
         
            +
            7. The model will automatically load, and is now ready for use!
         
     | 
| 133 | 
         
            +
            8. If you want any custom settings, set them and then click **Save settings for this model** followed by **Reload the Model** in the top right.
         
     | 
| 134 | 
         
            +
              * Note that you do not need to and should not set manual GPTQ parameters any more. These are set automatically from the file `quantize_config.json`.
         
     | 
| 135 | 
         
            +
            9. Once you're ready, click the **Text Generation tab** and enter a prompt to get started!
         
     | 
| 136 | 
         
            +
            <!-- README_GPTQ.md-text-generation-webui end -->
         
     | 
| 137 | 
         
            +
             
     | 
| 138 | 
         
            +
            <!-- README_GPTQ.md-use-from-python start -->
         
     | 
| 139 | 
         
            +
            ## How to use this GPTQ model from Python code
         
     | 
| 140 | 
         
            +
             
     | 
| 141 | 
         
            +
            ### Install the necessary packages
         
     | 
| 142 | 
         
            +
             
     | 
| 143 | 
         
            +
            Requires: Transformers 4.32.0 or later, Optimum 1.12.0 or later, and AutoGPTQ 0.4.2 or later.
         
     | 
| 144 | 
         
            +
             
     | 
| 145 | 
         
            +
            ```shell
         
     | 
| 146 | 
         
            +
            pip3 install transformers>=4.32.0 optimum>=1.12.0
         
     | 
| 147 | 
         
            +
            pip3 install auto-gptq --extra-index-url https://huggingface.github.io/autogptq-index/whl/cu118/  # Use cu117 if on CUDA 11.7
         
     | 
| 148 | 
         
            +
            ```
         
     | 
| 149 | 
         
            +
             
     | 
| 150 | 
         
            +
            If you have problems installing AutoGPTQ using the pre-built wheels, install it from source instead:
         
     | 
| 151 | 
         
            +
             
     | 
| 152 | 
         
            +
            ```shell
         
     | 
| 153 | 
         
            +
            pip3 uninstall -y auto-gptq
         
     | 
| 154 | 
         
            +
            git clone https://github.com/PanQiWei/AutoGPTQ
         
     | 
| 155 | 
         
            +
            cd AutoGPTQ
         
     | 
| 156 | 
         
            +
            pip3 install .
         
     | 
| 157 | 
         
            +
            ```
         
     | 
| 158 | 
         
            +
             
     | 
| 159 | 
         
            +
            ### For CodeLlama models only: you must use Transformers 4.33.0 or later.
         
     | 
| 160 | 
         
            +
             
     | 
| 161 | 
         
            +
            If 4.33.0 is not yet released when you read this, you will need to install Transformers from source:
         
     | 
| 162 | 
         
            +
            ```shell
         
     | 
| 163 | 
         
            +
            pip3 uninstall -y transformers
         
     | 
| 164 | 
         
            +
            pip3 install git+https://github.com/huggingface/transformers.git
         
     | 
| 165 | 
         
            +
            ```
         
     | 
| 166 | 
         
            +
             
     | 
| 167 | 
         
            +
            ### You can then use the following code
         
     | 
| 168 | 
         
            +
             
     | 
| 169 | 
         
            +
            ```python
         
     | 
| 170 | 
         
            +
            from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
         
     | 
| 171 | 
         
            +
             
     | 
| 172 | 
         
            +
            model_name_or_path = ""TheBloke/Wizard-Vicuna-30B-Uncensored-GPTQ""
         
     | 
| 173 | 
         
            +
            # To use a different branch, change revision
         
     | 
| 174 | 
         
            +
            # For example: revision=""main""
         
     | 
| 175 | 
         
            +
            model = AutoModelForCausalLM.from_pretrained(model_name_or_path,
         
     | 
| 176 | 
         
            +
                                                         device_map=""auto"",
         
     | 
| 177 | 
         
            +
                                                         trust_remote_code=False,
         
     | 
| 178 | 
         
            +
                                                         revision=""main"")
         
     | 
| 179 | 
         
            +
             
     | 
| 180 | 
         
            +
            tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, use_fast=True)
         
     | 
| 181 | 
         
            +
             
     | 
| 182 | 
         
            +
            prompt = ""Tell me about AI""
         
     | 
| 183 | 
         
            +
            prompt_template=f'''A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions. USER: {prompt} ASSISTANT:
         
     | 
| 184 | 
         
            +
             
     | 
| 185 | 
         
            +
            '''
         
     | 
| 186 | 
         
            +
             
     | 
| 187 | 
         
            +
            print(""\n\n*** Generate:"")
         
     | 
| 188 | 
         
            +
             
     | 
| 189 | 
         
            +
            input_ids = tokenizer(prompt_template, return_tensors='pt').input_ids.cuda()
         
     | 
| 190 | 
         
            +
            output = model.generate(inputs=input_ids, temperature=0.7, do_sample=True, top_p=0.95, top_k=40, max_new_tokens=512)
         
     | 
| 191 | 
         
            +
            print(tokenizer.decode(output[0]))
         
     | 
| 192 | 
         
            +
             
     | 
| 193 | 
         
            +
            # Inference can also be done using transformers' pipeline
         
     | 
| 194 | 
         
            +
             
     | 
| 195 | 
         
            +
            print(""*** Pipeline:"")
         
     | 
| 196 | 
         
            +
            pipe = pipeline(
         
     | 
| 197 | 
         
            +
                ""text-generation"",
         
     | 
| 198 | 
         
            +
                model=model,
         
     | 
| 199 | 
         
            +
                tokenizer=tokenizer,
         
     | 
| 200 | 
         
            +
                max_new_tokens=512,
         
     | 
| 201 | 
         
            +
                do_sample=True,
         
     | 
| 202 | 
         
            +
                temperature=0.7,
         
     | 
| 203 | 
         
            +
                top_p=0.95,
         
     | 
| 204 | 
         
            +
                top_k=40,
         
     | 
| 205 | 
         
            +
                repetition_penalty=1.1
         
     | 
| 206 | 
         
            +
            )
         
     | 
| 207 | 
         
            +
             
     | 
| 208 | 
         
            +
            print(pipe(prompt_template)[0]['generated_text'])
         
     | 
| 209 | 
         
            +
            ```
         
     | 
| 210 | 
         
            +
            <!-- README_GPTQ.md-use-from-python end -->
         
     | 
| 211 | 
         
            +
             
     | 
| 212 | 
         
            +
            <!-- README_GPTQ.md-compatibility start -->
         
     | 
| 213 | 
         
            +
            ## Compatibility
         
     | 
| 214 | 
         
            +
             
     | 
| 215 | 
         
            +
            The files provided are tested to work with AutoGPTQ, both via Transformers and using AutoGPTQ directly. They should also work with [Occ4m's GPTQ-for-LLaMa fork](https://github.com/0cc4m/KoboldAI).
         
     | 
| 216 | 
         
            +
             
     | 
| 217 | 
         
            +
            [ExLlama](https://github.com/turboderp/exllama) is compatible with Llama models in 4-bit. Please see the Provided Files table above for per-file compatibility.
         
     | 
| 218 | 
         
            +
             
     | 
| 219 | 
         
            +
            [Huggingface Text Generation Inference (TGI)](https://github.com/huggingface/text-generation-inference) is compatible with all GPTQ models.
         
     | 
| 220 | 
         
            +
            <!-- README_GPTQ.md-compatibility end -->
         
     | 
| 221 | 
         
            +
             
     | 
| 222 | 
         
            +
            <!-- footer start -->
         
     | 
| 223 | 
         
            +
            <!-- 200823 -->
         
     | 
| 224 | 
         
            +
            ## Discord
         
     | 
| 225 | 
         
            +
             
     | 
| 226 | 
         
            +
            For further support, and discussions on these models and AI in general, join us at:
         
     | 
| 227 | 
         
            +
             
     | 
| 228 | 
         
            +
            [TheBloke AI's Discord server](https://discord.gg/theblokeai)
         
     | 
| 229 | 
         
            +
             
     | 
| 230 | 
         
            +
            ## Thanks, and how to contribute
         
     | 
| 231 | 
         
            +
             
     | 
| 232 | 
         
            +
            Thanks to the [chirper.ai](https://chirper.ai) team!
         
     | 
| 233 | 
         
            +
             
     | 
| 234 | 
         
            +
            Thanks to Clay from [gpus.llm-utils.org](llm-utils)!
         
     | 
| 235 | 
         
            +
             
     | 
| 236 | 
         
            +
            I've had a lot of people ask if they can contribute. I enjoy providing models and helping people, and would love to be able to spend even more time doing it, as well as expanding into new projects like fine tuning/training.
         
     | 
| 237 | 
         
            +
             
     | 
| 238 | 
         
            +
            If you're able and willing to contribute it will be most gratefully received and will help me to keep providing more models, and to start work on new AI projects.
         
     | 
| 239 | 
         
            +
             
     | 
| 240 | 
         
            +
            Donaters will get priority support on any and all AI/LLM/model questions and requests, access to a private Discord room, plus other benefits.
         
     | 
| 241 | 
         
            +
             
     | 
| 242 | 
         
            +
            * Patreon: https://patreon.com/TheBlokeAI
         
     | 
| 243 | 
         
            +
            * Ko-Fi: https://ko-fi.com/TheBlokeAI
         
     | 
| 244 | 
         
            +
             
     | 
| 245 | 
         
            +
            **Special thanks to**: Aemon Algiz.
         
     | 
| 246 | 
         
            +
             
     | 
| 247 | 
         
            +
            **Patreon special mentions**: Alicia Loh, Stephen Murray, K, Ajan Kanaga, RoA, Magnesian, Deo Leter, Olakabola, Eugene Pentland, zynix, Deep Realms, Raymond Fosdick, Elijah Stavena, Iucharbius, Erik Bjäreholt, Luis Javier Navarrete Lozano, Nicholas, theTransient, John Detwiler, alfie_i, knownsqashed, Mano Prime, Willem Michiel, Enrico Ros, LangChain4j, OG, Michael Dempsey, Pierre Kircher, Pedro Madruga, James Bentley, Thomas Belote, Luke @flexchar, Leonard Tan, Johann-Peter Hartmann, Illia Dulskyi, Fen Risland, Chadd, S_X, Jeff Scroggin, Ken Nordquist, Sean Connelly, Artur Olbinski, Swaroop Kallakuri, Jack West, Ai Maven, David Ziegler, Russ Johnson, transmissions 11, John Villwock, Alps Aficionado, Clay Pascal, Viktor Bowallius, Subspace Studios, Rainer Wilmers, Trenton Dambrowitz, vamX, Michael Levine, 준교 김, Brandon Frisco, Kalila, Trailburnt, Randy H, Talal Aujan, Nathan Dryer, Vadim, 阿明, ReadyPlayerEmma, Tiffany J. Kim, George Stoitzev, Spencer Kim, Jerry Meng, Gabriel Tamborski, Cory Kujawski, Jeffrey Morgan, Spiking Neurons AB, Edmond Seymore, Alexandros Triantafyllidis, Lone Striker, Cap'n Zoog, Nikolai Manek, danny, ya boyyy, Derek Yates, usrbinkat, Mandus, TL, Nathan LeClaire, subjectnull, Imad Khwaja, webtim, Raven Klaugh, Asp the Wyvern, Gabriel Puliatti, Caitlyn Gatomon, Joseph William Delisle, Jonathan Leane, Luke Pendergrass, SuperWojo, Sebastain Graf, Will Dee, Fred von Graf, Andrey, Dan Guido, Daniel P. Andersen, Nitin Borwankar, Elle, Vitor Caleffi, biorpg, jjj, NimbleBox.ai, Pieter, Matthew Berman, terasurfer, Michael Davis, Alex, Stanislav Ovsiannikov
         
     | 
| 248 | 
         
            +
             
     | 
| 249 | 
         
            +
             
     | 
| 250 | 
         
            +
            Thank you to all my generous patrons and donaters!
         
     | 
| 251 | 
         
            +
             
     | 
| 252 | 
         
            +
            And thank you again to a16z for their generous grant.
         
     | 
| 253 | 
         
            +
             
     | 
| 254 | 
         
            +
            <!-- footer end -->
         
     | 
| 255 | 
         
            +
             
     | 
| 256 | 
         
            +
            # Original model card: Eric Hartford's Wizard-Vicuna-30B-Uncensored
         
     | 
| 257 | 
         
            +
             
     | 
| 258 | 
         
            +
             
     | 
| 259 | 
         
            +
            <!-- header start -->
         
     | 
| 260 | 
         
            +
            <div style=""width: 100%;"">
         
     | 
| 261 | 
         
            +
                <img src=""https://i.imgur.com/EBdldam.jpg"" alt=""TheBlokeAI"" style=""width: 100%; min-width: 400px; display: block; margin: auto;"">
         
     | 
| 262 | 
         
            +
            </div>
         
     | 
| 263 | 
         
            +
            <div style=""display: flex; justify-content: space-between; width: 100%;"">
         
     | 
| 264 | 
         
            +
                <div style=""display: flex; flex-direction: column; align-items: flex-start;"">
         
     | 
| 265 | 
         
            +
                    <p><a href=""https://discord.gg/Jq4vkcDakD"">Chat & support: my new Discord server</a></p>
         
     | 
| 266 | 
         
            +
                </div>
         
     | 
| 267 | 
         
            +
                <div style=""display: flex; flex-direction: column; align-items: flex-end;"">
         
     | 
| 268 | 
         
            +
                    <p><a href=""https://www.patreon.com/TheBlokeAI"">Want to contribute? TheBloke's Patreon page</a></p>
         
     | 
| 269 | 
         
            +
                </div>
         
     | 
| 270 | 
         
            +
            </div>
         
     | 
| 271 | 
         
            +
            <!-- header end -->
         
     | 
| 272 | 
         
            +
             
     | 
| 273 | 
         
            +
            # Eric Hartford's Wizard-Vicuna-30B-Uncensored GPTQ
         
     | 
| 274 | 
         
            +
             
     | 
| 275 | 
         
            +
            This is an fp16 models of [Eric Hartford's Wizard-Vicuna 30B](https://huggingface.co/ehartford/Wizard-Vicuna-30B-Uncensored).
         
     | 
| 276 | 
         
            +
             
     | 
| 277 | 
         
            +
            It is the result of converting Eric's original fp32 upload to fp16.
         
     | 
| 278 | 
         
            +
             
     | 
| 279 | 
         
            +
            ## Repositories available
         
     | 
| 280 | 
         
            +
             
     | 
| 281 | 
         
            +
            * [4bit GPTQ models for GPU inference](https://huggingface.co/TheBloke/Wizard-Vicuna-30B-Uncensored-GPTQ).
         
     | 
| 282 | 
         
            +
            * [4bit and 5bit GGML models for CPU inference](https://huggingface.co/TheBloke/Wizard-Vicuna-30B-Uncensored-GGML).
         
     | 
| 283 | 
         
            +
            * [float16 HF format model for GPU inference and further conversions](https://huggingface.co/TheBloke/Wizard-Vicuna-30B-Uncensored-fp16).
         
     | 
| 284 | 
         
            +
             
     | 
| 285 | 
         
            +
            <!-- footer start -->
         
     | 
| 286 | 
         
            +
            ## Discord
         
     | 
| 287 | 
         
            +
             
     | 
| 288 | 
         
            +
            For further support, and discussions on these models and AI in general, join us at:
         
     | 
| 289 | 
         
            +
             
     | 
| 290 | 
         
            +
            [TheBloke AI's Discord server](https://discord.gg/Jq4vkcDakD)
         
     | 
| 291 | 
         
            +
             
     | 
| 292 | 
         
            +
            ## Thanks, and how to contribute.
         
     | 
| 293 | 
         
            +
             
     | 
| 294 | 
         
            +
            Thanks to the [chirper.ai](https://chirper.ai) team!
         
     | 
| 295 | 
         
            +
             
     | 
| 296 | 
         
            +
            I've had a lot of people ask if they can contribute. I enjoy providing models and helping people, and would love to be able to spend even more time doing it, as well as expanding into new projects like fine tuning/training.
         
     | 
| 297 | 
         
            +
             
     | 
| 298 | 
         
            +
            If you're able and willing to contribute it will be most gratefully received and will help me to keep providing more models, and to start work on new AI projects.
         
     | 
| 299 | 
         
            +
             
     | 
| 300 | 
         
            +
            Donaters will get priority support on any and all AI/LLM/model questions and requests, access to a private Discord room, plus other benefits.
         
     | 
| 301 | 
         
            +
             
     | 
| 302 | 
         
            +
            * Patreon: https://patreon.com/TheBlokeAI
         
     | 
| 303 | 
         
            +
            * Ko-Fi: https://ko-fi.com/TheBlokeAI
         
     | 
| 304 | 
         
            +
             
     | 
| 305 | 
         
            +
            **Patreon special mentions**: Aemon Algiz, Dmitriy Samsonov, Nathan LeClaire, Trenton Dambrowitz, Mano Prime, David Flickinger, vamX, Nikolai Manek, senxiiz, Khalefa Al-Ahmad, Illia Dulskyi, Jonathan Leane, Talal Aujan, V. Lukas, Joseph William Delisle, Pyrater, Oscar Rangel, Lone Striker, Luke Pendergrass, Eugene Pentland, Sebastain Graf, Johann-Peter Hartman.
         
     | 
| 306 | 
         
            +
             
     | 
| 307 | 
         
            +
            Thank you to all my generous patrons and donaters!
         
     | 
| 308 | 
         
            +
            <!-- footer end -->
         
     | 
| 309 | 
         
            +
             
     | 
| 310 | 
         
            +
            # Original model card
         
     | 
| 311 | 
         
            +
             
     | 
| 312 | 
         
            +
            This is [wizard-vicuna-13b](https://huggingface.co/junelee/wizard-vicuna-13b) trained with a subset of the dataset - responses that contained alignment / moralizing were removed. The intent is to train a WizardLM that doesn't have alignment built-in, so that alignment (of any sort) can be added separately with for example with a RLHF LoRA.
         
     | 
| 313 | 
         
            +
             
     | 
| 314 | 
         
            +
            Shout out to the open source AI/ML community, and everyone who helped me out.
         
     | 
| 315 | 
         
            +
             
     | 
| 316 | 
         
            +
            Note:
         
     | 
| 317 | 
         
            +
             
     | 
| 318 | 
         
            +
            An uncensored model has no guardrails.
         
     | 
| 319 | 
         
            +
             
     | 
| 320 | 
         
            +
            You are responsible for anything you do with the model, just as you are responsible for anything you do with any dangerous object such as a knife, gun, lighter, or car.
         
     | 
| 321 | 
         
            +
             
     | 
| 322 | 
         
            +
            Publishing anything this model generates is the same as publishing it yourself.
         
     | 
| 323 | 
         
            +
             
     | 
| 324 | 
         
            +
            You are responsible for the content you publish, and you cannot blame the model any more than you can blame the knife, gun, lighter, or car for what you do with it.
         
     | 
| 325 | 
         
            +
            ","{""id"": ""TheBloke/Wizard-Vicuna-30B-Uncensored-GPTQ"", ""author"": ""TheBloke"", ""sha"": ""3af62c796031ef5a6ece16c163a8444609d9c376"", ""last_modified"": ""2023-09-27 12:44:25+00:00"", ""created_at"": ""2023-05-30 03:11:00+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 442, ""downloads_all_time"": null, ""likes"": 579, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""llama"", ""text-generation"", ""uncensored"", ""en"", ""dataset:ehartford/wizard_vicuna_70k_unfiltered"", ""base_model:cognitivecomputations/Wizard-Vicuna-30B-Uncensored"", ""base_model:quantized:cognitivecomputations/Wizard-Vicuna-30B-Uncensored"", ""license:other"", ""autotrain_compatible"", ""text-generation-inference"", ""4-bit"", ""gptq"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: ehartford/Wizard-Vicuna-30B-Uncensored\ndatasets:\n- ehartford/wizard_vicuna_70k_unfiltered\nlanguage:\n- en\nlicense: other\nmodel_name: Wizard Vicuna 30B Uncensored\ntags:\n- uncensored\ninference: false\nmodel_creator: Eric Hartford\nmodel_type: llama\nprompt_template: 'A chat between a curious user and an artificial intelligence assistant.\n  The assistant gives helpful, detailed, and polite answers to the user''s questions.\n  USER: {prompt} ASSISTANT:\n\n  '\nquantized_by: TheBloke"", ""widget_data"": [{""text"": ""My name is Julien and I like to""}, {""text"": ""I like traveling by train because""}, {""text"": ""Paris is an amazing place to visit,""}, {""text"": ""Once upon a time,""}], ""model_index"": null, ""config"": {""architectures"": [""LlamaForCausalLM""], ""model_type"": ""llama"", ""quantization_config"": {""bits"": 4, ""quant_method"": ""gptq""}, ""tokenizer_config"": {""bos_token"": {""__type"": ""AddedToken"", ""content"": ""<s>"", ""lstrip"": false, ""normalized"": true, ""rstrip"": false, ""single_word"": false}, ""eos_token"": {""__type"": ""AddedToken"", ""content"": ""</s>"", ""lstrip"": false, ""normalized"": true, ""rstrip"": false, ""single_word"": false}, ""pad_token"": null, ""unk_token"": {""__type"": ""AddedToken"", ""content"": ""<unk>"", ""lstrip"": false, ""normalized"": true, ""rstrip"": false, ""single_word"": false}}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='quantize_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.model', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='trainer_state.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [""Intel/low_bit_open_llm_leaderboard"", ""BAAI/open_cn_llm_leaderboard"", ""gsaivinay/open_llm_leaderboard"", ""KBaba7/Quant"", ""GTBench/GTBench"", ""Vikhrmodels/small-shlepa-lb"", ""kz-transformers/kaz-llm-lb"", ""felixz/open_llm_leaderboard"", ""bhaskartripathi/LLM_Quantization"", ""OPTML-Group/UnlearnCanvas-Benchmark"", ""dar-tau/selfie"", ""totolook/Quant"", ""FallnAI/Quantize-HF-Models"", ""BAAI/open_flageval_vlm_leaderboard"", ""neubla/neubla-llm-evaluation-board"", ""rodrigomasini/data_only_open_llm_leaderboard"", ""Docfile/open_llm_leaderboard"", ""JS-Junior/WizardLM-WizardCoder-15B-V1.0"", ""ruslanmv/convert_to_gguf"", ""JacopoCirica/AIChatbot"", ""dmar1313/true"", ""smothiki/open_llm_leaderboard"", ""csalabs/AI-EMBD"", ""csalabs/Replicate-7b-chat-Llama-streamlit"", ""0x1668/open_llm_leaderboard"", ""pngwn/open_llm_leaderboard-check"", ""asir0z/open_llm_leaderboard"", ""dkdaniz/katara"", ""kbmlcoding/open_llm_leaderboard_free"", ""aichampions/open_llm_leaderboard"", ""Adeco/open_llm_leaderboard"", ""anirudh937/open_llm_leaderboard"", ""smothiki/open_llm_leaderboard2"", ""mjalg/IFEvalTR"", ""coool123132314/cool"", ""K00B404/LLM_Quantization""], ""safetensors"": {""parameters"": {""F32"": 8298240, ""I32"": 4013287680, ""F16"": 426789376}, ""total"": 4448375296}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2023-09-27 12:44:25+00:00"", ""cardData"": ""base_model: ehartford/Wizard-Vicuna-30B-Uncensored\ndatasets:\n- ehartford/wizard_vicuna_70k_unfiltered\nlanguage:\n- en\nlicense: other\nmodel_name: Wizard Vicuna 30B Uncensored\ntags:\n- uncensored\ninference: false\nmodel_creator: Eric Hartford\nmodel_type: llama\nprompt_template: 'A chat between a curious user and an artificial intelligence assistant.\n  The assistant gives helpful, detailed, and polite answers to the user''s questions.\n  USER: {prompt} ASSISTANT:\n\n  '\nquantized_by: TheBloke"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""_id"": ""6475694482907acdddfa6e86"", ""modelId"": ""TheBloke/Wizard-Vicuna-30B-Uncensored-GPTQ"", ""usedStorage"": 348325828923}",0,,0,,0,https://huggingface.co/PrunaAI/TheBloke-Wizard-Vicuna-30B-Uncensored-GPTQ-GGUF-smashed,1,,0,"BAAI/open_cn_llm_leaderboard, BAAI/open_flageval_vlm_leaderboard, FallnAI/Quantize-HF-Models, GTBench/GTBench, Intel/low_bit_open_llm_leaderboard, K00B404/LLM_Quantization, KBaba7/Quant, OPTML-Group/UnlearnCanvas-Benchmark, Vikhrmodels/small-shlepa-lb, bhaskartripathi/LLM_Quantization, gsaivinay/open_llm_leaderboard, huggingface/InferenceSupport/discussions/new?title=TheBloke/Wizard-Vicuna-30B-Uncensored-GPTQ&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BTheBloke%2FWizard-Vicuna-30B-Uncensored-GPTQ%5D(%2FTheBloke%2FWizard-Vicuna-30B-Uncensored-GPTQ)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, kz-transformers/kaz-llm-lb",13
         
     | 
    	
        YOLOv8_finetunes_20250426_212347.csv_finetunes_20250426_212347.csv
    ADDED
    
    | 
         The diff for this file is too large to render. 
		See raw diff 
     | 
| 
         | 
    	
        Yi-Coder-9B-Chat_finetunes_20250426_215237.csv_finetunes_20250426_215237.csv
    ADDED
    
    | 
         @@ -0,0 +1,232 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
         
     | 
| 2 | 
         
            +
            01-ai/Yi-Coder-9B-Chat,"---
         
     | 
| 3 | 
         
            +
            library_name: transformers
         
     | 
| 4 | 
         
            +
            license: apache-2.0
         
     | 
| 5 | 
         
            +
            base_model: 01-ai/Yi-Coder-9B
         
     | 
| 6 | 
         
            +
            pipeline_tag: text-generation
         
     | 
| 7 | 
         
            +
            ---
         
     | 
| 8 | 
         
            +
            <div align=""center"">
         
     | 
| 9 | 
         
            +
             
     | 
| 10 | 
         
            +
            <picture> 
         
     | 
| 11 | 
         
            +
              <img src=""https://raw.githubusercontent.com/01-ai/Yi/main/assets/img/Yi_logo_icon_light.svg"" width=""120px"">
         
     | 
| 12 | 
         
            +
            </picture>
         
     | 
| 13 | 
         
            +
             
     | 
| 14 | 
         
            +
            </div>
         
     | 
| 15 | 
         
            +
             
     | 
| 16 | 
         
            +
            <p align=""center"">
         
     | 
| 17 | 
         
            +
              <a href=""https://github.com/01-ai"">🐙 GitHub</a> •
         
     | 
| 18 | 
         
            +
              <a href=""https://discord.gg/hYUwWddeAu"">👾 Discord</a> •
         
     | 
| 19 | 
         
            +
              <a href=""https://twitter.com/01ai_yi"">🐤 Twitter</a> •
         
     | 
| 20 | 
         
            +
              <a href=""https://github.com/01-ai/Yi-1.5/issues/2"">💬 WeChat</a> 
         
     | 
| 21 | 
         
            +
              <br/>
         
     | 
| 22 | 
         
            +
              <a href=""https://arxiv.org/abs/2403.04652"">📝 Paper</a> •
         
     | 
| 23 | 
         
            +
              <a href=""https://01-ai.github.io/"">💪 Tech Blog</a> •
         
     | 
| 24 | 
         
            +
              <a href=""https://github.com/01-ai/Yi/tree/main?tab=readme-ov-file#faq"">🙌 FAQ</a> •
         
     | 
| 25 | 
         
            +
              <a href=""https://github.com/01-ai/Yi/tree/main?tab=readme-ov-file#learning-hub"">📗 Learning Hub</a>
         
     | 
| 26 | 
         
            +
            </p>
         
     | 
| 27 | 
         
            +
             
     | 
| 28 | 
         
            +
            # Intro
         
     | 
| 29 | 
         
            +
             
     | 
| 30 | 
         
            +
            Yi-Coder is a series of open-source code language models that delivers state-of-the-art coding performance with fewer than 10 billion parameters. 
         
     | 
| 31 | 
         
            +
             
     | 
| 32 | 
         
            +
            Key features:
         
     | 
| 33 | 
         
            +
            - Excelling in long-context understanding with a maximum context length of 128K tokens.
         
     | 
| 34 | 
         
            +
            - Supporting 52 major programming languages:
         
     | 
| 35 | 
         
            +
            ```bash
         
     | 
| 36 | 
         
            +
              'java', 'markdown', 'python', 'php', 'javascript', 'c++', 'c#', 'c', 'typescript', 'html', 'go', 'java_server_pages', 'dart', 'objective-c', 'kotlin', 'tex', 'swift', 'ruby', 'sql', 'rust', 'css', 'yaml', 'matlab', 'lua', 'json', 'shell', 'visual_basic', 'scala', 'rmarkdown', 'pascal', 'fortran', 'haskell', 'assembly', 'perl', 'julia', 'cmake', 'groovy', 'ocaml', 'powershell', 'elixir', 'clojure', 'makefile', 'coffeescript', 'erlang', 'lisp', 'toml', 'batchfile', 'cobol', 'dockerfile', 'r', 'prolog', 'verilog'
         
     | 
| 37 | 
         
            +
              ```
         
     | 
| 38 | 
         
            +
             
     | 
| 39 | 
         
            +
            For model details and benchmarks, see [Yi-Coder blog](https://01-ai.github.io/) and [Yi-Coder README](https://github.com/01-ai/Yi-Coder).
         
     | 
| 40 | 
         
            +
             
     | 
| 41 | 
         
            +
            <p align=""left""> 
         
     | 
| 42 | 
         
            +
              <img src=""https://github.com/01-ai/Yi/blob/main/assets/img/coder/yi-coder-calculator-demo.gif?raw=true"" alt=""demo1"" width=""500""/> 
         
     | 
| 43 | 
         
            +
            </p>
         
     | 
| 44 | 
         
            +
             
     | 
| 45 | 
         
            +
            # Models
         
     | 
| 46 | 
         
            +
             
     | 
| 47 | 
         
            +
            | Name               | Type |  Length | Download                                                                                                                                          |
         
     | 
| 48 | 
         
            +
            |--------------------|------|----------------|---------------------------------------------------------------------------------------------------------------------------------------------------|
         
     | 
| 49 | 
         
            +
            | Yi-Coder-9B-Chat   | Chat |      128K      | [🤗 Hugging Face](https://huggingface.co/01-ai/Yi-Coder-9B-Chat) • [🤖 ModelScope](https://www.modelscope.cn/models/01ai/Yi-Coder-9B-Chat) • [🟣 wisemodel](https://wisemodel.cn/models/01.AI/Yi-Coder-9B-Chat) |
         
     | 
| 50 | 
         
            +
            | Yi-Coder-1.5B-Chat | Chat |      128K      | [🤗 Hugging Face](https://huggingface.co/01-ai/Yi-Coder-1.5B-Chat) • [🤖 ModelScope](https://www.modelscope.cn/models/01ai/Yi-Coder-1.5B-Chat) • [🟣 wisemodel](https://wisemodel.cn/models/01.AI/Yi-Coder-1.5B-Chat) |
         
     | 
| 51 | 
         
            +
            | Yi-Coder-9B        | Base |      128K      | [🤗 Hugging Face](https://huggingface.co/01-ai/Yi-Coder-9B) • [🤖 ModelScope](https://www.modelscope.cn/models/01ai/Yi-Coder-9B) • [🟣 wisemodel](https://wisemodel.cn/models/01.AI/Yi-Coder-9B) |
         
     | 
| 52 | 
         
            +
            | Yi-Coder-1.5B      | Base |      128K      | [🤗 Hugging Face](https://huggingface.co/01-ai/Yi-Coder-1.5B) • [🤖 ModelScope](https://www.modelscope.cn/models/01ai/Yi-Coder-1.5B) • [🟣 wisemodel](https://wisemodel.cn/models/01.AI/Yi-Coder-1.5B) |
         
     | 
| 53 | 
         
            +
            |                    | 
         
     | 
| 54 | 
         
            +
             
     | 
| 55 | 
         
            +
            # Benchmarks
         
     | 
| 56 | 
         
            +
             
     | 
| 57 | 
         
            +
            As illustrated in the figure below, Yi-Coder-9B-Chat achieved an impressive 23% pass rate in LiveCodeBench, making it the only model with under 10B parameters to surpass 20%. It also outperforms DeepSeekCoder-33B-Ins at 22.3%, CodeGeex4-9B-all at 17.8%, CodeLLama-34B-Ins at 13.3%, and CodeQwen1.5-7B-Chat at 12%.
         
     | 
| 58 | 
         
            +
             
     | 
| 59 | 
         
            +
            <p align=""left""> 
         
     | 
| 60 | 
         
            +
              <img src=""https://github.com/01-ai/Yi/blob/main/assets/img/coder/bench1.webp?raw=true"" alt=""bench1"" width=""1000""/> 
         
     | 
| 61 | 
         
            +
            </p>
         
     | 
| 62 | 
         
            +
             
     | 
| 63 | 
         
            +
            # Quick Start
         
     | 
| 64 | 
         
            +
             
     | 
| 65 | 
         
            +
            You can use transformers to run inference with Yi-Coder models (both chat and base versions) as follows:
         
     | 
| 66 | 
         
            +
            ```python
         
     | 
| 67 | 
         
            +
            from transformers import AutoTokenizer, AutoModelForCausalLM
         
     | 
| 68 | 
         
            +
             
     | 
| 69 | 
         
            +
            device = ""cuda"" # the device to load the model onto
         
     | 
| 70 | 
         
            +
            model_path = ""01-ai/Yi-Coder-9B-Chat""
         
     | 
| 71 | 
         
            +
             
     | 
| 72 | 
         
            +
            tokenizer = AutoTokenizer.from_pretrained(model_path)
         
     | 
| 73 | 
         
            +
            model = AutoModelForCausalLM.from_pretrained(model_path, device_map=""auto"").eval()
         
     | 
| 74 | 
         
            +
             
     | 
| 75 | 
         
            +
            prompt = ""Write a quick sort algorithm.""
         
     | 
| 76 | 
         
            +
            messages = [
         
     | 
| 77 | 
         
            +
                {""role"": ""system"", ""content"": ""You are a helpful assistant.""},
         
     | 
| 78 | 
         
            +
                {""role"": ""user"", ""content"": prompt}
         
     | 
| 79 | 
         
            +
            ]
         
     | 
| 80 | 
         
            +
            text = tokenizer.apply_chat_template(
         
     | 
| 81 | 
         
            +
                messages,
         
     | 
| 82 | 
         
            +
                tokenize=False,
         
     | 
| 83 | 
         
            +
                add_generation_prompt=True
         
     | 
| 84 | 
         
            +
            )
         
     | 
| 85 | 
         
            +
            model_inputs = tokenizer([text], return_tensors=""pt"").to(device)
         
     | 
| 86 | 
         
            +
             
     | 
| 87 | 
         
            +
            generated_ids = model.generate(
         
     | 
| 88 | 
         
            +
                model_inputs.input_ids,
         
     | 
| 89 | 
         
            +
                max_new_tokens=1024,
         
     | 
| 90 | 
         
            +
                eos_token_id=tokenizer.eos_token_id  
         
     | 
| 91 | 
         
            +
            )
         
     | 
| 92 | 
         
            +
            generated_ids = [
         
     | 
| 93 | 
         
            +
                output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
         
     | 
| 94 | 
         
            +
            ]
         
     | 
| 95 | 
         
            +
             
     | 
| 96 | 
         
            +
            response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
         
     | 
| 97 | 
         
            +
            print(response)
         
     | 
| 98 | 
         
            +
            ```
         
     | 
| 99 | 
         
            +
             
     | 
| 100 | 
         
            +
            For getting up and running with Yi-Coder series models quickly, see [Yi-Coder README](https://github.com/01-ai/Yi-Coder).","{""id"": ""01-ai/Yi-Coder-9B-Chat"", ""author"": ""01-ai"", ""sha"": ""356a1f8d4e4a606d0b879e54191ca809918576b8"", ""last_modified"": ""2024-09-12 06:51:12+00:00"", ""created_at"": ""2024-08-21 02:11:52+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 2217, ""downloads_all_time"": null, ""likes"": 202, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""llama"", ""text-generation"", ""conversational"", ""arxiv:2403.04652"", ""base_model:01-ai/Yi-Coder-9B"", ""base_model:finetune:01-ai/Yi-Coder-9B"", ""license:apache-2.0"", ""autotrain_compatible"", ""text-generation-inference"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: 01-ai/Yi-Coder-9B\nlibrary_name: transformers\nlicense: apache-2.0\npipeline_tag: text-generation"", ""widget_data"": [{""text"": ""Hi, what can you help me with?""}, {""text"": ""What is 84 * 3 / 2?""}, {""text"": ""Tell me an interesting fact about the universe!""}, {""text"": ""Explain quantum computing in simple terms.""}], ""model_index"": null, ""config"": {""architectures"": [""LlamaForCausalLM""], ""model_type"": ""llama"", ""tokenizer_config"": {""bos_token"": ""<|startoftext|>"", ""chat_template"": ""{% if messages[0]['role'] == 'system' %}{% set system_message = messages[0]['content'] %}{% endif %}{% if system_message is defined %}{{ '<|im_start|>system\n' + system_message + '<|im_end|>\n' }}{% endif %}{% for message in messages %}{% set content = message['content'] %}{% if message['role'] == 'user' %}{{ '<|im_start|>user\n' + content + '<|im_end|>\n<|im_start|>assistant\n' }}{% elif message['role'] == 'assistant' %}{{ content + '<|im_end|>' + '\n' }}{% endif %}{% endfor %}"", ""eos_token"": ""<|im_end|>"", ""pad_token"": ""<unk>"", ""unk_token"": ""<unk>"", ""use_default_system_prompt"": false}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00001-of-00004.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00002-of-00004.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00003-of-00004.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00004-of-00004.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.model', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [""eduagarcia/open_pt_llm_leaderboard"", ""Tonic/Yi-Coder-9B"", ""KwabsHug/GameConfigIdea"", ""victoramit/01-ai-Yi-Coder-9B-Chat"", ""patrol114/YetiAI"", ""C2MV/PROYECTO_2024"", ""C2MV/Biotech2"", ""Lilitgkh/MagicTreeCrownsKeysStarsLights"", ""krooks/01-ai-Yi-Coder-9B-Chat""], ""safetensors"": {""parameters"": {""BF16"": 8829407232}, ""total"": 8829407232}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-09-12 06:51:12+00:00"", ""cardData"": ""base_model: 01-ai/Yi-Coder-9B\nlibrary_name: transformers\nlicense: apache-2.0\npipeline_tag: text-generation"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""_id"": ""66c54ce88b740fe8580a280e"", ""modelId"": ""01-ai/Yi-Coder-9B-Chat"", ""usedStorage"": 52977577257}",0,https://huggingface.co/BenevolenceMessiah/Yi-Coder-9B-Chat-8x-MoE,1,,0,"https://huggingface.co/MaziyarPanahi/Yi-Coder-9B-Chat-GGUF, https://huggingface.co/lmstudio-community/Yi-Coder-9B-Chat-GGUF, https://huggingface.co/second-state/Yi-Coder-9B-Chat-GGUF, https://huggingface.co/gaianet/Yi-Coder-9B-Chat-GGUF, https://huggingface.co/bartowski/Yi-Coder-9B-Chat-GGUF, https://huggingface.co/altomek/Yi-Coder-9B-Chat-8bpw-EXL2, https://huggingface.co/CISCai/Yi-Coder-9B-Chat-SOTA-GGUF, https://huggingface.co/legraphista/Yi-Coder-9B-Chat-IMat-GGUF, https://huggingface.co/mradermacher/Yi-Coder-9B-Chat-GGUF, https://huggingface.co/mradermacher/Yi-Coder-9B-Chat-i1-GGUF, https://huggingface.co/wsxiaoys/Yi-Coder-9B-Chat-Q8_0-GGUF, https://huggingface.co/stelterlab/Yi-Coder-9B-Chat-AWQ, https://huggingface.co/DBigD/Yi-Coder-9B-Chat-Q4_K_M-GGUF, https://huggingface.co/denkijin/Yi-Coder-9B-Chat-Q8_0-GGUF, https://huggingface.co/Solshine/Yi-Coder-9B-Chat-Q4_K_M-GGUF, https://huggingface.co/mukel/Yi-Coder-9B-Chat-GGUF, https://huggingface.co/tensorblock/Yi-Coder-9B-Chat-GGUF, https://huggingface.co/itlwas/Yi-Coder-9B-Chat-Q4_K_M-GGUF",18,"https://huggingface.co/BenevolenceMessiah/Yi-Coder-9B-Chat-Instruct-TIES, https://huggingface.co/QuantFactory/Yi-Coder-9B-Chat-Instruct-TIES-GGUF",2,"C2MV/Biotech2, C2MV/PROYECTO_2024, KwabsHug/GameConfigIdea, Lilitgkh/MagicTreeCrownsKeysStarsLights, Tonic/Yi-Coder-9B, eduagarcia/open_pt_llm_leaderboard, huggingface/InferenceSupport/discussions/new?title=01-ai/Yi-Coder-9B-Chat&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5B01-ai%2FYi-Coder-9B-Chat%5D(%2F01-ai%2FYi-Coder-9B-Chat)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, krooks/01-ai-Yi-Coder-9B-Chat, patrol114/YetiAI, victoramit/01-ai-Yi-Coder-9B-Chat",10
         
     | 
| 101 | 
         
            +
            BenevolenceMessiah/Yi-Coder-9B-Chat-8x-MoE,"---
         
     | 
| 102 | 
         
            +
            base_model:
         
     | 
| 103 | 
         
            +
            - 01-ai/Yi-Coder-9B-Chat
         
     | 
| 104 | 
         
            +
            library_name: transformers
         
     | 
| 105 | 
         
            +
            tags:
         
     | 
| 106 | 
         
            +
            - mergekit
         
     | 
| 107 | 
         
            +
            - merge
         
     | 
| 108 | 
         
            +
            license: apache-2.0
         
     | 
| 109 | 
         
            +
            ---
         
     | 
| 110 | 
         
            +
            # merge
         
     | 
| 111 | 
         
            +
             
     | 
| 112 | 
         
            +
            This is a merge of pre-trained language models created using [mergekit](https://github.com/cg123/mergekit).
         
     | 
| 113 | 
         
            +
             
     | 
| 114 | 
         
            +
            ## Merge Details
         
     | 
| 115 | 
         
            +
            ### Merge Method
         
     | 
| 116 | 
         
            +
             
     | 
| 117 | 
         
            +
            This model was merged using the [MoE](https://arxiv.org/abs/2306.01708) merge method using [01-ai/Yi-Coder-9B-Chat](https://huggingface.co/01-ai/Yi-Coder-9B-Chat) as a base.
         
     | 
| 118 | 
         
            +
             
     | 
| 119 | 
         
            +
            ### Models Merged
         
     | 
| 120 | 
         
            +
             
     | 
| 121 | 
         
            +
            The following models were included in the merge:
         
     | 
| 122 | 
         
            +
            * [01-ai/Yi-Coder-9B-Chat](https://huggingface.co/01-ai/Yi-Coder-9B-Chat)
         
     | 
| 123 | 
         
            +
             
     | 
| 124 | 
         
            +
            ### Configuration
         
     | 
| 125 | 
         
            +
             
     | 
| 126 | 
         
            +
            The following YAML configuration was used to produce this model:
         
     | 
| 127 | 
         
            +
             
     | 
| 128 | 
         
            +
            ```yaml
         
     | 
| 129 | 
         
            +
            base_model: 01-ai/Yi-Coder-9B-Chat
         
     | 
| 130 | 
         
            +
            gate_mode: random
         
     | 
| 131 | 
         
            +
            dtype: bfloat16
         
     | 
| 132 | 
         
            +
            experts:
         
     | 
| 133 | 
         
            +
              - source_model: 01-ai/Yi-Coder-9B-Chat
         
     | 
| 134 | 
         
            +
              - source_model: 01-ai/Yi-Coder-9B-Chat
         
     | 
| 135 | 
         
            +
              - source_model: 01-ai/Yi-Coder-9B-Chat
         
     | 
| 136 | 
         
            +
              - source_model: 01-ai/Yi-Coder-9B-Chat
         
     | 
| 137 | 
         
            +
              - source_model: 01-ai/Yi-Coder-9B-Chat
         
     | 
| 138 | 
         
            +
              - source_model: 01-ai/Yi-Coder-9B-Chat
         
     | 
| 139 | 
         
            +
              - source_model: 01-ai/Yi-Coder-9B-Chat
         
     | 
| 140 | 
         
            +
              - source_model: 01-ai/Yi-Coder-9B-Chat
         
     | 
| 141 | 
         
            +
            ```
         
     | 
| 142 | 
         
            +
            <picture> 
         
     | 
| 143 | 
         
            +
              <img src=""https://raw.githubusercontent.com/01-ai/Yi/main/assets/img/Yi_logo_icon_light.svg"" width=""120px"">
         
     | 
| 144 | 
         
            +
            </picture>
         
     | 
| 145 | 
         
            +
             
     | 
| 146 | 
         
            +
            </div>
         
     | 
| 147 | 
         
            +
             
     | 
| 148 | 
         
            +
            <p align=""center"">
         
     | 
| 149 | 
         
            +
              <a href=""https://github.com/01-ai"">🐙 GitHub</a> •
         
     | 
| 150 | 
         
            +
              <a href=""https://discord.gg/hYUwWddeAu"">👾 Discord</a> •
         
     | 
| 151 | 
         
            +
              <a href=""https://twitter.com/01ai_yi"">🐤 Twitter</a> •
         
     | 
| 152 | 
         
            +
              <a href=""https://github.com/01-ai/Yi-1.5/issues/2"">💬 WeChat</a> 
         
     | 
| 153 | 
         
            +
              <br/>
         
     | 
| 154 | 
         
            +
              <a href=""https://arxiv.org/abs/2403.04652"">📝 Paper</a> •
         
     | 
| 155 | 
         
            +
              <a href=""https://01-ai.github.io/"">💪 Tech Blog</a> •
         
     | 
| 156 | 
         
            +
              <a href=""https://github.com/01-ai/Yi/tree/main?tab=readme-ov-file#faq"">🙌 FAQ</a> •
         
     | 
| 157 | 
         
            +
              <a href=""https://github.com/01-ai/Yi/tree/main?tab=readme-ov-file#learning-hub"">📗 Learning Hub</a>
         
     | 
| 158 | 
         
            +
            </p>
         
     | 
| 159 | 
         
            +
             
     | 
| 160 | 
         
            +
            # Intro
         
     | 
| 161 | 
         
            +
             
     | 
| 162 | 
         
            +
            Yi-Coder is a series of open-source code language models that delivers state-of-the-art coding performance with fewer than 10 billion parameters. 
         
     | 
| 163 | 
         
            +
             
     | 
| 164 | 
         
            +
            Key features:
         
     | 
| 165 | 
         
            +
            - Excelling in long-context understanding with a maximum context length of 128K tokens.
         
     | 
| 166 | 
         
            +
            - Supporting 52 major programming languages:
         
     | 
| 167 | 
         
            +
            ```bash
         
     | 
| 168 | 
         
            +
              'java', 'markdown', 'python', 'php', 'javascript', 'c++', 'c#', 'c', 'typescript', 'html', 'go', 'java_server_pages', 'dart', 'objective-c', 'kotlin', 'tex', 'swift', 'ruby', 'sql', 'rust', 'css', 'yaml', 'matlab', 'lua', 'json', 'shell', 'visual_basic', 'scala', 'rmarkdown', 'pascal', 'fortran', 'haskell', 'assembly', 'perl', 'julia', 'cmake', 'groovy', 'ocaml', 'powershell', 'elixir', 'clojure', 'makefile', 'coffeescript', 'erlang', 'lisp', 'toml', 'batchfile', 'cobol', 'dockerfile', 'r', 'prolog', 'verilog'
         
     | 
| 169 | 
         
            +
              ```
         
     | 
| 170 | 
         
            +
             
     | 
| 171 | 
         
            +
            For model details and benchmarks, see [Yi-Coder blog](https://01-ai.github.io/) and [Yi-Coder README](https://github.com/01-ai/Yi-Coder).
         
     | 
| 172 | 
         
            +
             
     | 
| 173 | 
         
            +
            <p align=""left""> 
         
     | 
| 174 | 
         
            +
              <img src=""https://github.com/01-ai/Yi/blob/main/assets/img/coder/yi-coder-calculator-demo.gif?raw=true"" alt=""demo1"" width=""500""/> 
         
     | 
| 175 | 
         
            +
            </p>
         
     | 
| 176 | 
         
            +
             
     | 
| 177 | 
         
            +
            # Models
         
     | 
| 178 | 
         
            +
             
     | 
| 179 | 
         
            +
            | Name               | Type |  Length | Download                                                                                                                                          |
         
     | 
| 180 | 
         
            +
            |--------------------|------|----------------|---------------------------------------------------------------------------------------------------------------------------------------------------|
         
     | 
| 181 | 
         
            +
            | Yi-Coder-9B-Chat   | Chat |      128K      | [🤗 Hugging Face](https://huggingface.co/01-ai/Yi-Coder-9B-Chat) • [🤖 ModelScope](https://www.modelscope.cn/models/01ai/Yi-Coder-9B-Chat) • [🟣 wisemodel](https://wisemodel.cn/models/01.AI/Yi-Coder-9B-Chat) |
         
     | 
| 182 | 
         
            +
            | Yi-Coder-1.5B-Chat | Chat |      128K      | [🤗 Hugging Face](https://huggingface.co/01-ai/Yi-Coder-1.5B-Chat) • [🤖 ModelScope](https://www.modelscope.cn/models/01ai/Yi-Coder-1.5B-Chat) • [🟣 wisemodel](https://wisemodel.cn/models/01.AI/Yi-Coder-1.5B-Chat) |
         
     | 
| 183 | 
         
            +
            | Yi-Coder-9B        | Base |      128K      | [🤗 Hugging Face](https://huggingface.co/01-ai/Yi-Coder-9B) • [🤖 ModelScope](https://www.modelscope.cn/models/01ai/Yi-Coder-9B) • [🟣 wisemodel](https://wisemodel.cn/models/01.AI/Yi-Coder-9B) |
         
     | 
| 184 | 
         
            +
            | Yi-Coder-1.5B      | Base |      128K      | [🤗 Hugging Face](https://huggingface.co/01-ai/Yi-Coder-1.5B) • [🤖 ModelScope](https://www.modelscope.cn/models/01ai/Yi-Coder-1.5B) • [🟣 wisemodel](https://wisemodel.cn/models/01.AI/Yi-Coder-1.5B) |
         
     | 
| 185 | 
         
            +
            |                    | 
         
     | 
| 186 | 
         
            +
             
     | 
| 187 | 
         
            +
            # Benchmarks
         
     | 
| 188 | 
         
            +
             
     | 
| 189 | 
         
            +
            As illustrated in the figure below, Yi-Coder-9B-Chat achieved an impressive 23% pass rate in LiveCodeBench, making it the only model with under 10B parameters to surpass 20%. It also outperforms DeepSeekCoder-33B-Ins at 22.3%, CodeGeex4-9B-all at 17.8%, CodeLLama-34B-Ins at 13.3%, and CodeQwen1.5-7B-Chat at 12%.
         
     | 
| 190 | 
         
            +
             
     | 
| 191 | 
         
            +
            <p align=""left""> 
         
     | 
| 192 | 
         
            +
              <img src=""https://github.com/01-ai/Yi/blob/main/assets/img/coder/bench1.webp?raw=true"" alt=""bench1"" width=""1000""/> 
         
     | 
| 193 | 
         
            +
            </p>
         
     | 
| 194 | 
         
            +
             
     | 
| 195 | 
         
            +
            # Quick Start
         
     | 
| 196 | 
         
            +
             
     | 
| 197 | 
         
            +
            You can use transformers to run inference with Yi-Coder models (both chat and base versions) as follows:
         
     | 
| 198 | 
         
            +
            ```python
         
     | 
| 199 | 
         
            +
            from transformers import AutoTokenizer, AutoModelForCausalLM
         
     | 
| 200 | 
         
            +
             
     | 
| 201 | 
         
            +
            device = ""cuda"" # the device to load the model onto
         
     | 
| 202 | 
         
            +
            model_path = ""01-ai/Yi-Coder-9B-Chat""
         
     | 
| 203 | 
         
            +
             
     | 
| 204 | 
         
            +
            tokenizer = AutoTokenizer.from_pretrained(model_path)
         
     | 
| 205 | 
         
            +
            model = AutoModelForCausalLM.from_pretrained(model_path, device_map=""auto"").eval()
         
     | 
| 206 | 
         
            +
             
     | 
| 207 | 
         
            +
            prompt = ""Write a quick sort algorithm.""
         
     | 
| 208 | 
         
            +
            messages = [
         
     | 
| 209 | 
         
            +
                {""role"": ""system"", ""content"": ""You are a helpful assistant.""},
         
     | 
| 210 | 
         
            +
                {""role"": ""user"", ""content"": prompt}
         
     | 
| 211 | 
         
            +
            ]
         
     | 
| 212 | 
         
            +
            text = tokenizer.apply_chat_template(
         
     | 
| 213 | 
         
            +
                messages,
         
     | 
| 214 | 
         
            +
                tokenize=False,
         
     | 
| 215 | 
         
            +
                add_generation_prompt=True
         
     | 
| 216 | 
         
            +
            )
         
     | 
| 217 | 
         
            +
            model_inputs = tokenizer([text], return_tensors=""pt"").to(device)
         
     | 
| 218 | 
         
            +
             
     | 
| 219 | 
         
            +
            generated_ids = model.generate(
         
     | 
| 220 | 
         
            +
                model_inputs.input_ids,
         
     | 
| 221 | 
         
            +
                max_new_tokens=1024,
         
     | 
| 222 | 
         
            +
                eos_token_id=tokenizer.eos_token_id  
         
     | 
| 223 | 
         
            +
            )
         
     | 
| 224 | 
         
            +
            generated_ids = [
         
     | 
| 225 | 
         
            +
                output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
         
     | 
| 226 | 
         
            +
            ]
         
     | 
| 227 | 
         
            +
             
     | 
| 228 | 
         
            +
            response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
         
     | 
| 229 | 
         
            +
            print(response)
         
     | 
| 230 | 
         
            +
            ```
         
     | 
| 231 | 
         
            +
             
     | 
| 232 | 
         
            +
            For getting up and running with Yi-Coder series models quickly, see [Yi-Coder ","{""id"": ""BenevolenceMessiah/Yi-Coder-9B-Chat-8x-MoE"", ""author"": ""BenevolenceMessiah"", ""sha"": ""038b592b45c6565a03ffc6e56f624a28f9b119c3"", ""last_modified"": ""2024-09-18 00:14:35+00:00"", ""created_at"": ""2024-09-14 22:23:35+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 1, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""mixtral"", ""text-generation"", ""mergekit"", ""merge"", ""conversational"", ""arxiv:2306.01708"", ""arxiv:2403.04652"", ""base_model:01-ai/Yi-Coder-9B-Chat"", ""base_model:finetune:01-ai/Yi-Coder-9B-Chat"", ""license:apache-2.0"", ""autotrain_compatible"", ""text-generation-inference"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- 01-ai/Yi-Coder-9B-Chat\nlibrary_name: transformers\nlicense: apache-2.0\ntags:\n- mergekit\n- merge"", ""widget_data"": [{""text"": ""Hi, what can you help me with?""}, {""text"": ""What is 84 * 3 / 2?""}, {""text"": ""Tell me an interesting fact about the universe!""}, {""text"": ""Explain quantum computing in simple terms.""}], ""model_index"": null, ""config"": {""architectures"": [""MixtralForCausalLM""], ""model_type"": ""mixtral"", ""tokenizer_config"": {""bos_token"": ""<|startoftext|>"", ""chat_template"": ""{% if messages[0]['role'] == 'system' %}{% set system_message = messages[0]['content'] %}{% endif %}{% if system_message is defined %}{{ '<|im_start|>system\n' + system_message + '<|im_end|>\n' }}{% endif %}{% for message in messages %}{% set content = message['content'] %}{% if message['role'] == 'user' %}{{ '<|im_start|>user\n' + content + '<|im_end|>\n<|im_start|>assistant\n' }}{% elif message['role'] == 'assistant' %}{{ content + '<|im_end|>' + '\n' }}{% endif %}{% endfor %}"", ""eos_token"": ""<|im_end|>"", ""pad_token"": ""<|startoftext|>"", ""unk_token"": ""<unk>"", ""use_default_system_prompt"": false}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='mergekit_moe_config.yml', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00001-of-00023.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00002-of-00023.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00003-of-00023.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00004-of-00023.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00005-of-00023.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00006-of-00023.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00007-of-00023.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00008-of-00023.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00009-of-00023.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00010-of-00023.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00011-of-00023.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00012-of-00023.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00013-of-00023.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00014-of-00023.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00015-of-00023.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00016-of-00023.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00017-of-00023.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00018-of-00023.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00019-of-00023.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00020-of-00023.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00021-of-00023.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00022-of-00023.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00023-of-00023.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.model', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": {""parameters"": {""BF16"": 54280458240}, ""total"": 54280458240}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-09-18 00:14:35+00:00"", ""cardData"": ""base_model:\n- 01-ai/Yi-Coder-9B-Chat\nlibrary_name: transformers\nlicense: apache-2.0\ntags:\n- mergekit\n- merge"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""_id"": ""66e60ce7000f04a8a18b0e65"", ""modelId"": ""BenevolenceMessiah/Yi-Coder-9B-Chat-8x-MoE"", ""usedStorage"": 108562141353}",1,,0,,0,"https://huggingface.co/mradermacher/Yi-Coder-9B-Chat-8x-MoE-GGUF, https://huggingface.co/mradermacher/Yi-Coder-9B-Chat-8x-MoE-i1-GGUF",2,,0,huggingface/InferenceSupport/discussions/new?title=BenevolenceMessiah/Yi-Coder-9B-Chat-8x-MoE&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BBenevolenceMessiah%2FYi-Coder-9B-Chat-8x-MoE%5D(%2FBenevolenceMessiah%2FYi-Coder-9B-Chat-8x-MoE)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
         
     | 
    	
        Ziya-LLaMA-13B-v1_finetunes_20250426_171734.csv_finetunes_20250426_171734.csv
    ADDED
    
    | 
         @@ -0,0 +1,218 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
         
     | 
| 2 | 
         
            +
            IDEA-CCNL/Ziya-LLaMA-13B-v1,"---
         
     | 
| 3 | 
         
            +
            license: gpl-3.0
         
     | 
| 4 | 
         
            +
            language:
         
     | 
| 5 | 
         
            +
            - en
         
     | 
| 6 | 
         
            +
            - zh
         
     | 
| 7 | 
         
            +
             
     | 
| 8 | 
         
            +
            inference: false
         
     | 
| 9 | 
         
            +
             
     | 
| 10 | 
         
            +
            ---
         
     | 
| 11 | 
         
            +
             
     | 
| 12 | 
         
            +
            # Ziya-LLaMA-13B-v1
         
     | 
| 13 | 
         
            +
             
     | 
| 14 | 
         
            +
            - Main Page:[Fengshenbang](https://fengshenbang-lm.com/)
         
     | 
| 15 | 
         
            +
            - Github: [Fengshenbang-LM](https://github.com/IDEA-CCNL/Fengshenbang-LM)
         
     | 
| 16 | 
         
            +
             
     | 
| 17 | 
         
            +
            (LLaMA权重的许可证限制,我们无法直接发布完整的模型权重,用户需要参考[使用说明](#-使用-usage-)进行合并)
         
     | 
| 18 | 
         
            +
             
     | 
| 19 | 
         
            +
            # 姜子牙系列模型
         
     | 
| 20 | 
         
            +
             
     | 
| 21 | 
         
            +
            - [Ziya-LLaMA-13B-v1.1](https://huggingface.co/IDEA-CCNL/Ziya-LLaMA-13B-v1.1)
         
     | 
| 22 | 
         
            +
            - [Ziya-LLaMA-13B-v1](https://huggingface.co/IDEA-CCNL/Ziya-LLaMA-13B-v1)
         
     | 
| 23 | 
         
            +
            - [Ziya-LLaMA-7B-Reward](https://huggingface.co/IDEA-CCNL/Ziya-LLaMA-7B-Reward)
         
     | 
| 24 | 
         
            +
            - [Ziya-LLaMA-13B-Pretrain-v1](https://huggingface.co/IDEA-CCNL/Ziya-LLaMA-13B-Pretrain-v1)
         
     | 
| 25 | 
         
            +
            - [Ziya-BLIP2-14B-Visual-v1](https://huggingface.co/IDEA-CCNL/Ziya-BLIP2-14B-Visual-v1)
         
     | 
| 26 | 
         
            +
             
     | 
| 27 | 
         
            +
            ## 简介 Brief Introduction
         
     | 
| 28 | 
         
            +
             
     | 
| 29 | 
         
            +
            姜子牙通用大模型V1是基于LLaMa的130亿参数的大规模预训练模型,具备翻译,编程,文本分类,信息抽取,摘要,文案生成,常识问答和数学计算等能力。目前姜子牙通用大模型已完成大规模预训练、多任务有监督微调和人类反馈学习三阶段的训练过程。
         
     | 
| 30 | 
         
            +
             
     | 
| 31 | 
         
            +
             
     | 
| 32 | 
         
            +
            The Ziya-LLaMA-13B-v1 is a large-scale pre-trained model based on LLaMA with 13 billion parameters. It has the ability to perform tasks such as translation, programming, text classification, information extraction, summarization, copywriting, common sense Q&A, and mathematical calculation. The Ziya-LLaMA-13B-v1 has undergone three stages of training: large-scale continual pre-training (PT), multi-task supervised fine-tuning (SFT), and human feedback learning (RM, PPO).
         
     | 
| 33 | 
         
            +
             
     | 
| 34 | 
         
            +
            ## 软件依赖
         
     | 
| 35 | 
         
            +
            ```
         
     | 
| 36 | 
         
            +
            pip install torch==1.12.1 tokenizers==0.13.3 git+https://github.com/huggingface/transformers
         
     | 
| 37 | 
         
            +
            ```
         
     | 
| 38 | 
         
            +
             
     | 
| 39 | 
         
            +
            ## 模型分类 Model Taxonomy
         
     | 
| 40 | 
         
            +
             
     | 
| 41 | 
         
            +
            |  需求 Demand  | 任务 Task       | 系列 Series      | 模型 Model    | 参数 Parameter | 额外 Extra |
         
     | 
| 42 | 
         
            +
            |  :----:  | :----:  | :----:  | :----:  | :----:  | :----:  |
         
     | 
| 43 | 
         
            +
            | 通用 General | AGI模型 | 姜子牙 Ziya | LLaMA |     13B    |     English&Chinese     |
         
     | 
| 44 | 
         
            +
             
     | 
| 45 | 
         
            +
            ## 模型信息 Model Information
         
     | 
| 46 | 
         
            +
             
     | 
| 47 | 
         
            +
            ### 继续预训练 Continual pretraining
         
     | 
| 48 | 
         
            +
             
     | 
| 49 | 
         
            +
            原始数据包含英文和中文,其中英文数据来自openwebtext、Books、Wikipedia和Code,中文数据来自清洗后的悟道数据集、自建的中文数据集。在对原始数据进行去重、模型打分、数据分桶、规则过滤、敏感主题过滤和数据评估后,最终得到125B tokens的有效数据。
         
     | 
| 50 | 
         
            +
             
     | 
| 51 | 
         
            +
            为了解决LLaMA原生分词对中文编解码效率低下的问题,我们在LLaMA词表的基础上增加了7k+个常见中文字,通过和LLaMA原生的词表去重,最终得到一个39410大小的词表,并通过复用Transformers里LlamaTokenizer来实现了这一效果。
         
     | 
| 52 | 
         
            +
             
     | 
| 53 | 
         
            +
            在增量训练过程中,我们使用了160张40GB的A100,采用2.6M tokens的训练集样本数量和FP 16的混合精度,吞吐量达到118 TFLOP per GPU per second。因此我们能够在8天的时间里在原生的LLaMA-13B模型基础上,增量训练110B tokens的数据。
         
     | 
| 54 | 
         
            +
             
     | 
| 55 | 
         
            +
            训练期间,虽然遇到了机器宕机、底层框架bug、loss spike等各种问题,但我们通过快速调整,保证了增量训练的稳定性。我们也放出训练过程的loss曲线,让大家了解可能出现的问题。
         
     | 
| 56 | 
         
            +
             
     | 
| 57 | 
         
            +
            The original data contains both English and Chinese, with English data from openwebtext, Books, Wikipedia, and Code, and Chinese data from the cleaned Wudao dataset and self-built Chinese dataset. After deduplication, model scoring, data bucketing, rule filtering, sensitive topic filtering, and data evaluation, we finally obtained 125 billion tokens of valid data.
         
     | 
| 58 | 
         
            +
             
     | 
| 59 | 
         
            +
            To address the issue of low efficiency in Chinese encoding and decoding caused by the native word segmentation of LLaMa, we added 8,000 commonly used Chinese characters to the LLaMa vocabulary. By removing duplicates with the original LLaMa vocabulary, we finally obtained a vocabulary of size 39,410. We achieved this by reusing the LlamaTokenizer in Transformers.
         
     | 
| 60 | 
         
            +
             
     | 
| 61 | 
         
            +
            During the incremental training process, we used 160 A100s with a total of 40GB memory, using a training dataset with 2.6 million tokens and mixed precision of FP16. The throughput reached 118 TFLOP per GPU per second. As a result, we were able to incrementally train 110 billion tokens of data on top of the native LLaMa-13B model in just 8 days.
         
     | 
| 62 | 
         
            +
             
     | 
| 63 | 
         
            +
            Throughout the training process, we encountered various issues such as machine crashes, underlying framework bugs, and loss spikes. However, we ensured the stability of the incremental training by making rapid adjustments. We have also released the loss curve during the training process to help everyone understand the potential issues that may arise.
         
     | 
| 64 | 
         
            +
             
     | 
| 65 | 
         
            +
            <img src=""https://huggingface.co/datasets/suolyer/testb/blob/main/loss.png"" width=1000 height=600>
         
     | 
| 66 | 
         
            +
             
     | 
| 67 | 
         
            +
            ### 多任务有监督微调 Supervised finetuning
         
     | 
| 68 | 
         
            +
             
     | 
| 69 | 
         
            +
            在多任务有监督微调阶段,采用了课程学习(curiculum learning)和增量训练(continual learning)的策略,用大模型辅助划分已有的数据难度,然后通过“Easy To Hard”的方式,分多个阶段进行SFT训练。
         
     | 
| 70 | 
         
            +
             
     | 
| 71 | 
         
            +
            SFT训练数据包含多个高质量的数据集,均经过人工筛选和校验:
         
     | 
| 72 | 
         
            +
            - Self-Instruct构造的数据(约2M):BELLE、Alpaca、Alpaca-GPT4等多个数据集
         
     | 
| 73 | 
         
            +
            - 内部收集Code数据(300K):包含leetcode、多种Code任务形式
         
     | 
| 74 | 
         
            +
            - 内部收集推理/逻辑相关数据(500K):推理、申论、数学应用题、数值计算等
         
     | 
| 75 | 
         
            +
            - 中英平行语料(2M):中英互译语料、COT类型翻译语料、古文翻译语料等
         
     | 
| 76 | 
         
            +
            - 多轮对话语料(500K):Self-Instruct生成、任务型多轮对话、Role-Playing型多轮对话等
         
     | 
| 77 | 
         
            +
             
     | 
| 78 | 
         
            +
            During the supervised fine-tuning (SFT) phase of multitask learning, we used a strategy of curriculum learning and incremental training. We used the large model to assist in partitioning the existing data by difficulty and then conducted SFT training in multiple stages using the ""easy to hard"" approach.
         
     | 
| 79 | 
         
            +
             
     | 
| 80 | 
         
            +
            The SFT training data consists of multiple high-quality datasets that have been manually selected and verified, including approximately 2 million samples from datasets such as BELLE, Alpaca, and Alpaca-GPT4, 300,000 samples of internally collected code data including LeetCode and various code tasks, 500,000 samples of internally collected inference/logic-related data such as reasoning, argumentative essays, mathematical application questions, and numerical calculations, 2 million samples of Chinese-English parallel corpora including translation, COT-type translation, and classical Chinese translation, and 500,000 samples of multi-turn dialogue corpora including self-instructed generation, task-oriented multi-turn dialogue, and role-playing multi-turn dialogue.
         
     | 
| 81 | 
         
            +
             
     | 
| 82 | 
         
            +
            ### 人类反馈学习 Human-Feedback training
         
     | 
| 83 | 
         
            +
             
     | 
| 84 | 
         
            +
            为了进一步提升模型的综合表现,使其能够充分理解人类意图、减少“幻觉”和不安全的输出,基于指令微调后的模型,进行了人类反馈训练(Human-Feedback Training,HFT)。在训练中,我们采用了以人类反馈强化学习(RM、PPO)为主,结合多种其他手段联合训练的方法,手段包括人类反馈微调(Human-Feedback Fine-tuning,HFFT)、后见链微调(Chain-of-Hindsight Fine-tuning,COHFT)、AI反馈(AI Feedback)和基于规则的奖励系统(Rule-based Reward System,RBRS)等,用来弥补PPO方法的短板,加速训练。
         
     | 
| 85 | 
         
            +
             
     | 
| 86 | 
         
            +
            我们在内部自研的框架上实现了HFT的训练流程,该框架可以利用最少8张40G的A100显卡完成Ziya-LLaMA-13B-v1的全参数训练。在PPO训练中,我们没有限制生成样本的长度,以确保长文本任务的奖励准确性。每次训练的总经验池尺寸超过100k样本,确保了训练的充分性。
         
     | 
| 87 | 
         
            +
             
     | 
| 88 | 
         
            +
            To further improve the overall performance of the model, enabling it to fully understand human intentions, reduce ""hallucinations"" and unsafe outputs, we conducted Human-Feedback Training (HFT) based on the model fine-tuned with instructions. In the training process, we used a variety of methods, including human feedback reinforcement learning (RM, PPO), combined with other methods such as Human-Feedback Fine-tuning (HFFT), Chain-of-Hindsight Fine-tuning (COHFT), AI feedback, and Rule-based Reward System (RBRS), to supplement the shortcomings of the PPO method and accelerate training.
         
     | 
| 89 | 
         
            +
             
     | 
| 90 | 
         
            +
            We implemented the HFT training process on an internally developed framework, which can use a minimum of 8 40GB A100 GPUs to complete the full parameter training of Ziya-LLaMA-13B-v1. In the PPO training, we did not limit the length of the generated samples to ensure the accuracy of rewards for long-text tasks. The total experience pool size for each training exceeded 100k samples, ensuring the sufficiency of the training.
         
     | 
| 91 | 
         
            +
             
     | 
| 92 | 
         
            +
            ### 效果评估 Performance
         
     | 
| 93 | 
         
            +
             
     | 
| 94 | 
         
            +
            <img src=""https://huggingface.co/IDEA-CCNL/Ziya-LLaMA-13B-v1/resolve/main/pk.png"" width=1000 height=600>
         
     | 
| 95 | 
         
            +
             
     | 
| 96 | 
         
            +
            ## <span id=""jump""> 使用 Usage </span>
         
     | 
| 97 | 
         
            +
             
     | 
| 98 | 
         
            +
            由于LLaMA权重的许可限制,该模型不能用于商业用途,请严格遵守LLaMA的使用政策。考虑到LLaMA权重的许可证限制,我们无法直接发布完整的模型权重。因此,我们使用了[FastChat开源工具](https://github.com/lm-sys/FastChat/blob/main/fastchat/model/apply_delta.py)作为基础,并对其进行了进一步的优化。我们计算并发布了Ziya-LLaMA-13B-v1权重与原始LLaMA权重之间的差值。用户可以按照以下步骤操作以获得Ziya-LLaMA-13B-v1完整权重,具体步骤如下:
         
     | 
| 99 | 
         
            +
             
     | 
| 100 | 
         
            +
            Step 1:获取[LLaMA](https://docs.google.com/forms/d/e/1FAIpQLSfqNECQnMkycAp2jP4Z9TFX0cGR4uf7b_fBxjY_OjhJILlKGA/viewform)权重并转成Hugging Face Transformers模型格式,可参考转换[脚本](https://github.com/huggingface/transformers/blob/main/src/transformers/models/llama/convert_llama_weights_to_hf.py)(若已经有huggingface权重则跳过)
         
     | 
| 101 | 
         
            +
            ```
         
     | 
| 102 | 
         
            +
            python src/transformers/models/llama/convert_llama_weights_to_hf.py \
         
     | 
| 103 | 
         
            +
                --input_dir /path/to/downloaded/llama/weights --model_size 13B --output_dir /output/path
         
     | 
| 104 | 
         
            +
            ```
         
     | 
| 105 | 
         
            +
             
     | 
| 106 | 
         
            +
            Step 2:下载Ziya-LLaMA-13B-v1的delta权重以及step 1中转换好的原始LLaMA权重,使用如下脚本转换:https://github.com/IDEA-CCNL/Fengshenbang-LM/blob/main/fengshen/utils/apply_delta.py
         
     | 
| 107 | 
         
            +
            ```
         
     | 
| 108 | 
         
            +
            python3 -m apply_delta --base ~/model_weights/llama-13b --target ~/model_weights/Ziya-LLaMA-13B --delta ~/model_weights/Ziya-LLaMA-13B-v1
         
     | 
| 109 | 
         
            +
            ```
         
     | 
| 110 | 
         
            +
             
     | 
| 111 | 
         
            +
            Step 3: 加载step 2得到的模型推理
         
     | 
| 112 | 
         
            +
            ```python3
         
     | 
| 113 | 
         
            +
            from transformers import AutoTokenizer
         
     | 
| 114 | 
         
            +
            from transformers import LlamaForCausalLM
         
     | 
| 115 | 
         
            +
            import torch
         
     | 
| 116 | 
         
            +
             
     | 
| 117 | 
         
            +
            device = torch.device(""cuda"")
         
     | 
| 118 | 
         
            +
            ckpt = '基于delta参数合并后的完整模型权重'
         
     | 
| 119 | 
         
            +
             
     | 
| 120 | 
         
            +
            query=""帮我写一份去西安的旅游计划""
         
     | 
| 121 | 
         
            +
            model = LlamaForCausalLM.from_pretrained(ckpt, torch_dtype=torch.float16, device_map=""auto"")
         
     | 
| 122 | 
         
            +
            tokenizer = AutoTokenizer.from_pretrained(ckpt, use_fast=False)
         
     | 
| 123 | 
         
            +
            inputs = '<human>:' + query.strip() + '\n<bot>:'
         
     | 
| 124 | 
         
            +
                  
         
     | 
| 125 | 
         
            +
            input_ids = tokenizer(inputs, return_tensors=""pt"").input_ids.to(device)
         
     | 
| 126 | 
         
            +
            generate_ids = model.generate(
         
     | 
| 127 | 
         
            +
                        input_ids,
         
     | 
| 128 | 
         
            +
                        max_new_tokens=1024, 
         
     | 
| 129 | 
         
            +
                        do_sample = True, 
         
     | 
| 130 | 
         
            +
                        top_p = 0.85, 
         
     | 
| 131 | 
         
            +
                        temperature = 1.0, 
         
     | 
| 132 | 
         
            +
                        repetition_penalty=1., 
         
     | 
| 133 | 
         
            +
                        eos_token_id=2, 
         
     | 
| 134 | 
         
            +
                        bos_token_id=1, 
         
     | 
| 135 | 
         
            +
                        pad_token_id=0)
         
     | 
| 136 | 
         
            +
            output = tokenizer.batch_decode(generate_ids)[0]
         
     | 
| 137 | 
         
            +
            print(output)
         
     | 
| 138 | 
         
            +
             
     | 
| 139 | 
         
            +
            ```
         
     | 
| 140 | 
         
            +
            NOTE: Due to the licensing restrictions of LLaMA weights, the utilization of the model for commercial purposes is precluded. Please strictly respect LLaMA's usage policy. Considering the licensing limitations on LLaMA weights, we are unable to directly release the complete model weights. Therefore, we utilized [the open-source FastChat tool](https://github.com/lm-sys/FastChat/blob/main/fastchat/model/apply_delta.py) and further optimized it to calculate the differences between Ziya-LLaMA-13B-v1 weights and the original LLaMA weights. Users can follow the steps to obtain the complete weights of Ziya-LLaMA-13B-v1. The steps are as follows:
         
     | 
| 141 | 
         
            +
             
     | 
| 142 | 
         
            +
            Step 1: Obtain the [LLaMA](https://huggingface.co/docs/transformers/main/en/model_doc/llama#overview) weights and convert them into the Hugging Face Transformers format. You can refer to the [script](https://github.com/huggingface/transformers/blob/main/src/transformers/models/llama/convert_llama_weights_to_hf.py) (skip this step if you already have the Hugging Face weights).
         
     | 
| 143 | 
         
            +
            ```
         
     | 
| 144 | 
         
            +
            python src/transformers/models/llama/convert_llama_weights_to_hf.py \
         
     | 
| 145 | 
         
            +
                --input_dir /path/to/downloaded/llama/weights --model_size 13B --output_dir /output/path
         
     | 
| 146 | 
         
            +
            ```
         
     | 
| 147 | 
         
            +
             
     | 
| 148 | 
         
            +
            Step 2: Download the delta weights for Ziya-LLaMA-13B-v1 and the pre-converted original LLaMA weights from step 1. Use the following script for conversion: https://github.com/IDEA-CCNL/Fengshenbang-LM/blob/main/fengshen/utils/apply_delta.py
         
     | 
| 149 | 
         
            +
            ```
         
     | 
| 150 | 
         
            +
            python3 -m apply_delta --base ~/model_weights/llama-13b --target ~/model_weights/Ziya-LLaMA-13B --delta ~/model_weights/Ziya-LLaMA-13B-v1(huggingface下载)
         
     | 
| 151 | 
         
            +
            ```
         
     | 
| 152 | 
         
            +
            Step 3: Load the model obtained in Step 2 for inference.
         
     | 
| 153 | 
         
            +
            ```python3
         
     | 
| 154 | 
         
            +
            from transformers import AutoTokenizer
         
     | 
| 155 | 
         
            +
            from transformers import LlamaForCausalLM
         
     | 
| 156 | 
         
            +
            import torch
         
     | 
| 157 | 
         
            +
             
     | 
| 158 | 
         
            +
             
     | 
| 159 | 
         
            +
            device = torch.device(""cuda"")
         
     | 
| 160 | 
         
            +
            ckpt = '基于delta合并后完整模型权重'
         
     | 
| 161 | 
         
            +
             
     | 
| 162 | 
         
            +
            query=""帮我写一份去西安的旅游计划""
         
     | 
| 163 | 
         
            +
            model = LlamaForCausalLM.from_pretrained(ckpt, torch_dtype=torch.float16, device_map=""auto"")
         
     | 
| 164 | 
         
            +
            tokenizer = AutoTokenizer.from_pretrained(ckpt, use_fast=False)
         
     | 
| 165 | 
         
            +
            inputs = '<human>:' + query.strip() + '\n<bot>:'
         
     | 
| 166 | 
         
            +
                  
         
     | 
| 167 | 
         
            +
            input_ids = tokenizer(inputs, return_tensors=""pt"").input_ids.to(device)
         
     | 
| 168 | 
         
            +
            generate_ids = model.generate(
         
     | 
| 169 | 
         
            +
                        input_ids,
         
     | 
| 170 | 
         
            +
                        max_new_tokens=1024, 
         
     | 
| 171 | 
         
            +
                        do_sample = True, 
         
     | 
| 172 | 
         
            +
                        top_p = 0.85, 
         
     | 
| 173 | 
         
            +
                        temperature = 1.0, 
         
     | 
| 174 | 
         
            +
                        repetition_penalty=1., 
         
     | 
| 175 | 
         
            +
                        eos_token_id=2, 
         
     | 
| 176 | 
         
            +
                        bos_token_id=1, 
         
     | 
| 177 | 
         
            +
                        pad_token_id=0)
         
     | 
| 178 | 
         
            +
            output = tokenizer.batch_decode(generate_ids)[0]
         
     | 
| 179 | 
         
            +
            print(output)
         
     | 
| 180 | 
         
            +
             
     | 
| 181 | 
         
            +
            ```
         
     | 
| 182 | 
         
            +
             
     | 
| 183 | 
         
            +
            ## 微调示例 Finetune Example
         
     | 
| 184 | 
         
            +
             
     | 
| 185 | 
         
            +
            Refer to [ziya_finetune](https://github.com/IDEA-CCNL/Fengshenbang-LM/tree/main/fengshen/examples/ziya_llama)
         
     | 
| 186 | 
         
            +
             
     | 
| 187 | 
         
            +
            ## 推理量化示例 Inference & Quantization Example
         
     | 
| 188 | 
         
            +
             
     | 
| 189 | 
         
            +
            Refer to [ziya_inference](https://github.com/IDEA-CCNL/Fengshenbang-LM/tree/main/fengshen/examples/ziya_inference)
         
     | 
| 190 | 
         
            +
             
     | 
| 191 | 
         
            +
            ## 引用 Citation
         
     | 
| 192 | 
         
            +
             
     | 
| 193 | 
         
            +
            如果您在您的工作中使用了我们的模型,可以引用我们的[论文](https://arxiv.org/abs/2210.08590):
         
     | 
| 194 | 
         
            +
             
     | 
| 195 | 
         
            +
            If you are using the resource for your work, please cite the our [paper](https://arxiv.org/abs/2210.08590):
         
     | 
| 196 | 
         
            +
             
     | 
| 197 | 
         
            +
            ```text
         
     | 
| 198 | 
         
            +
            @article{fengshenbang,
         
     | 
| 199 | 
         
            +
              author    = {Jiaxing Zhang and Ruyi Gan and Junjie Wang and Yuxiang Zhang and Lin Zhang and Ping Yang and Xinyu Gao and Ziwei Wu and Xiaoqun Dong and Junqing He and Jianheng Zhuo and Qi Yang and Yongfeng Huang and Xiayu Li and Yanghan Wu and Junyu Lu and Xinyu Zhu and Weifeng Chen and Ting Han and Kunhao Pan and Rui Wang and Hao Wang and Xiaojun Wu and Zhongshen Zeng and Chongpei Chen},
         
     | 
| 200 | 
         
            +
              title     = {Fengshenbang 1.0: Being the Foundation of Chinese Cognitive Intelligence},
         
     | 
| 201 | 
         
            +
              journal   = {CoRR},
         
     | 
| 202 | 
         
            +
              volume    = {abs/2209.02970},
         
     | 
| 203 | 
         
            +
              year      = {2022}
         
     | 
| 204 | 
         
            +
            }
         
     | 
| 205 | 
         
            +
            ```
         
     | 
| 206 | 
         
            +
             
     | 
| 207 | 
         
            +
            You can also cite our [website](https://github.com/IDEA-CCNL/Fengshenbang-LM/):
         
     | 
| 208 | 
         
            +
             
     | 
| 209 | 
         
            +
            欢迎引用我们的[网站](https://github.com/IDEA-CCNL/Fengshenbang-LM/):
         
     | 
| 210 | 
         
            +
            ```text
         
     | 
| 211 | 
         
            +
            @misc{Fengshenbang-LM,
         
     | 
| 212 | 
         
            +
              title={Fengshenbang-LM},
         
     | 
| 213 | 
         
            +
              author={IDEA-CCNL},
         
     | 
| 214 | 
         
            +
              year={2021},
         
     | 
| 215 | 
         
            +
              howpublished={\url{https://github.com/IDEA-CCNL/Fengshenbang-LM}},
         
     | 
| 216 | 
         
            +
            }
         
     | 
| 217 | 
         
            +
            ```
         
     | 
| 218 | 
         
            +
            ","{""id"": ""IDEA-CCNL/Ziya-LLaMA-13B-v1"", ""author"": ""IDEA-CCNL"", ""sha"": ""64d931f346e1a49ea3bbca07a83137075bab1c66"", ""last_modified"": ""2023-09-13 08:50:47+00:00"", ""created_at"": ""2023-05-16 10:32:58+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 371, ""downloads_all_time"": null, ""likes"": 274, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""pytorch"", ""llama"", ""text-generation"", ""en"", ""zh"", ""arxiv:2210.08590"", ""license:gpl-3.0"", ""autotrain_compatible"", ""text-generation-inference"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""language:\n- en\n- zh\nlicense: gpl-3.0\ninference: false"", ""widget_data"": [{""text"": ""My name is Julien and I like to""}, {""text"": ""I like traveling by train because""}, {""text"": ""Paris is an amazing place to visit,""}, {""text"": ""Once upon a time,""}], ""model_index"": null, ""config"": {""architectures"": [""LlamaForCausalLM""], ""model_type"": ""llama"", ""tokenizer_config"": {""bos_token"": {""__type"": ""AddedToken"", ""content"": ""<s>"", ""lstrip"": false, ""normalized"": true, ""rstrip"": false, ""single_word"": false}, ""eos_token"": {""__type"": ""AddedToken"", ""content"": ""</s>"", ""lstrip"": false, ""normalized"": true, ""rstrip"": false, ""single_word"": false}, ""pad_token"": null, ""unk_token"": {""__type"": ""AddedToken"", ""content"": ""<unk>"", ""lstrip"": false, ""normalized"": true, ""rstrip"": false, ""single_word"": false}}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='added_tokens.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pk.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00001-of-00028.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00002-of-00028.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00003-of-00028.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00004-of-00028.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00005-of-00028.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00006-of-00028.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00007-of-00028.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00008-of-00028.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00009-of-00028.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00010-of-00028.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00011-of-00028.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00012-of-00028.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00013-of-00028.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00014-of-00028.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00015-of-00028.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00016-of-00028.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00017-of-00028.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00018-of-00028.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00019-of-00028.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00020-of-00028.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00021-of-00028.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00022-of-00028.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00023-of-00028.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00024-of-00028.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00025-of-00028.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00026-of-00028.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00027-of-00028.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00028-of-00028.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model.bin.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.model', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [""Intel/low_bit_open_llm_leaderboard"", ""BAAI/open_cn_llm_leaderboard"", ""gsaivinay/open_llm_leaderboard"", ""IDEA-CCNL/Ziya-BLIP2-14B-Visual-v1-Demo"", ""GTBench/GTBench"", ""Vikhrmodels/small-shlepa-lb"", ""IDEA-CCNL/Ziya-v1"", ""kz-transformers/kaz-llm-lb"", ""felixz/open_llm_leaderboard"", ""OPTML-Group/UnlearnCanvas-Benchmark"", ""BAAI/open_flageval_vlm_leaderboard"", ""b1sheng/kg_llm_leaderboard_test"", ""neubla/neubla-llm-evaluation-board"", ""rodrigomasini/data_only_open_llm_leaderboard"", ""Docfile/open_llm_leaderboard"", ""gogger/IDEA-CCNL-Ziya-LLaMA-13B-v1"", ""smothiki/open_llm_leaderboard"", ""blackwingedkite/gutalk"", ""pngwn/open_llm_leaderboard"", ""pngwn/open_llm_leaderboard_two"", ""blackwingedkite/alpaca2_clas"", ""0x1668/open_llm_leaderboard"", ""pngwn/open_llm_leaderboard-check"", ""asir0z/open_llm_leaderboard"", ""kbmlcoding/open_llm_leaderboard_free"", ""aichampions/open_llm_leaderboard"", ""xiwang444/ZCQA"", ""Adeco/open_llm_leaderboard"", ""xiwang444/ZJQA"", ""anirudh937/open_llm_leaderboard"", ""smothiki/open_llm_leaderboard2"", ""mjalg/IFEvalTR""], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2023-09-13 08:50:47+00:00"", ""cardData"": ""language:\n- en\n- zh\nlicense: gpl-3.0\ninference: false"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""_id"": ""64635bda12814d7541785e68"", ""modelId"": ""IDEA-CCNL/Ziya-LLaMA-13B-v1"", ""usedStorage"": 86076558304}",0,,0,,0,,0,,0,"BAAI/open_cn_llm_leaderboard, BAAI/open_flageval_vlm_leaderboard, GTBench/GTBench, IDEA-CCNL/Ziya-BLIP2-14B-Visual-v1-Demo, IDEA-CCNL/Ziya-v1, Intel/low_bit_open_llm_leaderboard, OPTML-Group/UnlearnCanvas-Benchmark, Vikhrmodels/small-shlepa-lb, blackwingedkite/gutalk, felixz/open_llm_leaderboard, gsaivinay/open_llm_leaderboard, huggingface/InferenceSupport/discussions/new?title=IDEA-CCNL/Ziya-LLaMA-13B-v1&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BIDEA-CCNL%2FZiya-LLaMA-13B-v1%5D(%2FIDEA-CCNL%2FZiya-LLaMA-13B-v1)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, kz-transformers/kaz-llm-lb",13
         
     | 
    	
        bce-embedding-base_v1_finetunes_20250426_014322.csv_finetunes_20250426_014322.csv
    ADDED
    
    | 
         @@ -0,0 +1,630 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
         
     | 
| 2 | 
         
            +
            maidalun1020/bce-embedding-base_v1,"---
         
     | 
| 3 | 
         
            +
            license: apache-2.0
         
     | 
| 4 | 
         
            +
            pipeline_tag: feature-extraction
         
     | 
| 5 | 
         
            +
            tags:
         
     | 
| 6 | 
         
            +
            - sentence-transformers
         
     | 
| 7 | 
         
            +
            - feature-extraction
         
     | 
| 8 | 
         
            +
            - sentence-similarity
         
     | 
| 9 | 
         
            +
            - transformers
         
     | 
| 10 | 
         
            +
            language:
         
     | 
| 11 | 
         
            +
            - en
         
     | 
| 12 | 
         
            +
            - zh
         
     | 
| 13 | 
         
            +
            ---
         
     | 
| 14 | 
         
            +
            <!--
         
     | 
| 15 | 
         
            +
             * @Description: 
         
     | 
| 16 | 
         
            +
             * @Author: shenlei
         
     | 
| 17 | 
         
            +
             * @Date: 2023-12-19 10:31:41
         
     | 
| 18 | 
         
            +
             * @LastEditTime: 2024-01-09 23:52:00
         
     | 
| 19 | 
         
            +
             * @LastEditors: shenlei
         
     | 
| 20 | 
         
            +
            -->
         
     | 
| 21 | 
         
            +
            <h1 align=""center"">BCEmbedding: Bilingual and Crosslingual Embedding for RAG</h1>
         
     | 
| 22 | 
         
            +
             
     | 
| 23 | 
         
            +
            <p align=""center"">
         
     | 
| 24 | 
         
            +
              <a href=""https://github.com/netease-youdao/BCEmbedding/blob/master/LICENSE"">
         
     | 
| 25 | 
         
            +
                <img src=""https://img.shields.io/badge/license-Apache--2.0-yellow"">
         
     | 
| 26 | 
         
            +
              </a>
         
     | 
| 27 | 
         
            +
              <a href=""https://twitter.com/YDopensource"">
         
     | 
| 28 | 
         
            +
                <img src=""https://img.shields.io/badge/follow-%40YDOpenSource-1DA1F2?logo=twitter&style={style}"">
         
     | 
| 29 | 
         
            +
              </a>
         
     | 
| 30 | 
         
            +
            </p>
         
     | 
| 31 | 
         
            +
             
     | 
| 32 | 
         
            +
            最新、最详细的bce-embedding-base_v1相关信息,请移步(The latest ""Updates"" should be checked in):
         
     | 
| 33 | 
         
            +
              
         
     | 
| 34 | 
         
            +
            <p align=""left"">
         
     | 
| 35 | 
         
            +
              <a href=""https://github.com/netease-youdao/BCEmbedding"">GitHub</a>
         
     | 
| 36 | 
         
            +
            </p>
         
     | 
| 37 | 
         
            +
             
     | 
| 38 | 
         
            +
            ## 主要特点(Key Features):
         
     | 
| 39 | 
         
            +
            - 中英双语,以及中英跨语种能力(Bilingual and Crosslingual capability in English and Chinese);
         
     | 
| 40 | 
         
            +
            - RAG优化,适配更多真实业务场景(RAG adaptation for more domains, including Education, Law, Finance, Medical, Literature, FAQ, Textbook, Wikipedia, etc.);
         
     | 
| 41 | 
         
            +
            - 方便集成进langchain和llamaindex(Easy integrations for langchain and llamaindex in <a href=""https://github.com/netease-youdao/BCEmbedding"">BCEmbedding</a>)。
         
     | 
| 42 | 
         
            +
            - `EmbeddingModel`不需要“精心设计”instruction,尽可能召回有用片段。 (No need for ""instruction"")
         
     | 
| 43 | 
         
            +
            - **最佳实践(Best practice)** :embedding召回top50-100片段,reranker对这50-100片段精排,最后取top5-10片段。(1. Get top 50-100 passages with [bce-embedding-base_v1](https://huggingface.co/maidalun1020/bce-embedding-base_v1) for ""`recall`"";    2. Rerank passages with [bce-reranker-base_v1](https://huggingface.co/maidalun1020/bce-reranker-base_v1) and get top 5-10 for ""`precision`"" finally. )
         
     | 
| 44 | 
         
            +
             
     | 
| 45 | 
         
            +
            ## News:
         
     | 
| 46 | 
         
            +
            - `BCEmbedding`技术博客( **Technical Blog** ): [为RAG而生-BCEmbedding技术报告](https://zhuanlan.zhihu.com/p/681370855)
         
     | 
| 47 | 
         
            +
            - Related link for **RerankerModel** : [bce-reranker-base_v1](https://huggingface.co/maidalun1020/bce-reranker-base_v1)
         
     | 
| 48 | 
         
            +
             
     | 
| 49 | 
         
            +
            ## Third-party Examples:
         
     | 
| 50 | 
         
            +
            - RAG applications: [QAnything](https://github.com/netease-youdao/qanything), [HuixiangDou](https://github.com/InternLM/HuixiangDou), [ChatPDF](https://github.com/shibing624/ChatPDF).
         
     | 
| 51 | 
         
            +
            - Efficient inference framework: [ChatLLM.cpp](https://github.com/foldl/chatllm.cpp), [Xinference](https://github.com/xorbitsai/inference), [mindnlp (Huawei GPU, 华为GPU)](https://github.com/mindspore-lab/mindnlp/tree/master/llm/inference/bce).
         
     | 
| 52 | 
         
            +
             
     | 
| 53 | 
         
            +
            
         
     | 
| 54 | 
         
            +
            
         
     | 
| 55 | 
         
            +
             
     | 
| 56 | 
         
            +
            -----------------------------------------
         
     | 
| 57 | 
         
            +
            <details open=""open"">
         
     | 
| 58 | 
         
            +
            <summary>Click to Open Contents</summary>
         
     | 
| 59 | 
         
            +
             
     | 
| 60 | 
         
            +
            - <a href=""#-bilingual-and-crosslingual-superiority"" target=""_Self"">🌐 Bilingual and Crosslingual Superiority</a>
         
     | 
| 61 | 
         
            +
            - <a href=""#-key-features"" target=""_Self"">💡 Key Features</a>
         
     | 
| 62 | 
         
            +
            - <a href=""#-latest-updates"" target=""_Self"">🚀 Latest Updates</a>
         
     | 
| 63 | 
         
            +
            - <a href=""#-model-list"" target=""_Self"">🍎 Model List</a>
         
     | 
| 64 | 
         
            +
            - <a href=""#-manual"" target=""_Self"">📖 Manual</a>
         
     | 
| 65 | 
         
            +
              - <a href=""#installation"" target=""_Self"">Installation</a>
         
     | 
| 66 | 
         
            +
              - <a href=""#quick-start"" target=""_Self"">Quick Start (`transformers`, `sentence-transformers`)</a>
         
     | 
| 67 | 
         
            +
              - <a href=""#integrations-for-rag-frameworks"" target=""_Self"">Integrations for RAG Frameworks (`langchain`, `llama_index`)</a>
         
     | 
| 68 | 
         
            +
            - <a href=""#%EF%B8%8F-evaluation"" target=""_Self"">⚙️ Evaluation</a>
         
     | 
| 69 | 
         
            +
              - <a href=""#evaluate-semantic-representation-by-mteb"" target=""_Self"">Evaluate Semantic Representation by MTEB</a>
         
     | 
| 70 | 
         
            +
              - <a href=""#evaluate-rag-by-llamaindex"" target=""_Self"">Evaluate RAG by LlamaIndex</a>
         
     | 
| 71 | 
         
            +
            - <a href=""#-leaderboard"" target=""_Self"">📈 Leaderboard</a>
         
     | 
| 72 | 
         
            +
              - <a href=""#semantic-representation-evaluations-in-mteb"" target=""_Self"">Semantic Representation Evaluations in MTEB</a>
         
     | 
| 73 | 
         
            +
              - <a href=""#rag-evaluations-in-llamaindex"" target=""_Self"">RAG Evaluations in LlamaIndex</a>
         
     | 
| 74 | 
         
            +
            - <a href=""#-youdaos-bcembedding-api"" target=""_Self"">🛠 Youdao's BCEmbedding API</a>
         
     | 
| 75 | 
         
            +
            - <a href=""#-wechat-group"" target=""_Self"">🧲 WeChat Group</a>
         
     | 
| 76 | 
         
            +
            - <a href=""#%EF%B8%8F-citation"" target=""_Self"">✏️ Citation</a>
         
     | 
| 77 | 
         
            +
            - <a href=""#-license"" target=""_Self"">🔐 License</a>
         
     | 
| 78 | 
         
            +
            - <a href=""#-related-links"" target=""_Self"">🔗 Related Links</a>
         
     | 
| 79 | 
         
            +
             
     | 
| 80 | 
         
            +
            </details>
         
     | 
| 81 | 
         
            +
            <br>
         
     | 
| 82 | 
         
            +
             
     | 
| 83 | 
         
            +
            **B**ilingual and **C**rosslingual **Embedding** (`BCEmbedding`), developed by NetEase Youdao, encompasses `EmbeddingModel` and `RerankerModel`. The `EmbeddingModel` specializes in generating semantic vectors, playing a crucial role in semantic search and question-answering, and the `RerankerModel` excels at refining search results and ranking tasks. 
         
     | 
| 84 | 
         
            +
             
     | 
| 85 | 
         
            +
            `BCEmbedding` serves as the cornerstone of Youdao's Retrieval Augmented Generation (RAG) implmentation, notably [QAnything](http://qanything.ai) [[github](https://github.com/netease-youdao/qanything)], an open-source implementation widely integrated in various Youdao products like [Youdao Speed Reading](https://read.youdao.com/#/home) and [Youdao Translation](https://fanyi.youdao.com/download-Mac?keyfrom=fanyiweb_navigation). 
         
     | 
| 86 | 
         
            +
             
     | 
| 87 | 
         
            +
            Distinguished for its bilingual and crosslingual proficiency, `BCEmbedding` excels in bridging Chinese and English linguistic gaps, which achieves
         
     | 
| 88 | 
         
            +
            - **A high performence on <a href=""#semantic-representation-evaluations-in-mteb"">Semantic Representation Evaluations in MTEB</a>**;
         
     | 
| 89 | 
         
            +
            - **A new benchmark in the realm of <a href=""#rag-evaluations-in-llamaindex"">RAG Evaluations in LlamaIndex</a>**.
         
     | 
| 90 | 
         
            +
             
     | 
| 91 | 
         
            +
              `BCEmbedding`是由网易有道开发的双语和跨语种语义表征算法模型库,其中包含`EmbeddingModel`和`RerankerModel`两类基础模型。`EmbeddingModel`专门用于生成语义向量,在语义搜索和问答中起着关键作用,而`RerankerModel`擅长优化语义搜索结果和语义相关顺序精排。
         
     | 
| 92 | 
         
            +
              
         
     | 
| 93 | 
         
            +
              `BCEmbedding`作为有道的检索增强生成式应用(RAG)的基石,特别是在[QAnything](http://qanything.ai) [[github](https://github.com/netease-youdao/qanything)]中发挥着重要作用。QAnything作为一个网易有道开源项目,在有道许多产品中有很好的应用实践,比如[有道速读](https://read.youdao.com/#/home)和[有道翻译](https://fanyi.youdao.com/download-Mac?keyfrom=fanyiweb_navigation)
         
     | 
| 94 | 
         
            +
              
         
     | 
| 95 | 
         
            +
              `BCEmbedding`以其出色的双语和跨语种能力而著称,在语义检索中消除中英语言之间的差异,从而实现:
         
     | 
| 96 | 
         
            +
              - **强大的双语和跨语种语义表征能力【<a href=""#semantic-representation-evaluations-in-mteb"">基于MTEB的语义表征评测指标</a>】。**
         
     | 
| 97 | 
         
            +
              - **基于LlamaIndex的RAG评测,表现SOTA【<a href=""#rag-evaluations-in-llamaindex"">基于LlamaIndex的RAG评测指标</a>】。**
         
     | 
| 98 | 
         
            +
             
     | 
| 99 | 
         
            +
            ## 🌐 Bilingual and Crosslingual Superiority
         
     | 
| 100 | 
         
            +
             
     | 
| 101 | 
         
            +
            Existing embedding models often encounter performance challenges in bilingual and crosslingual scenarios, particularly in Chinese, English and their crosslingual tasks. `BCEmbedding`, leveraging the strength of Youdao's translation engine, excels in delivering superior performance across monolingual, bilingual, and crosslingual settings.
         
     | 
| 102 | 
         
            +
             
     | 
| 103 | 
         
            +
            `EmbeddingModel` supports ***Chinese (ch) and English (en)*** (more languages support will come soon), while `RerankerModel` supports ***Chinese (ch), English (en), Japanese (ja) and Korean (ko)***.
         
     | 
| 104 | 
         
            +
             
     | 
| 105 | 
         
            +
              现有的单个语义表征模型在双语和跨语种场景中常常表现不佳,特别是在中文、英文及其跨语种任务中。`BCEmbedding`充分利用有道翻译引擎的优势,实现只需一个模型就可以在单语、双语和跨语种场景中表现出卓越的性能。
         
     | 
| 106 | 
         
            +
              
         
     | 
| 107 | 
         
            +
              `EmbeddingModel`支持***中文和英文***(之后会支持更多语种);`RerankerModel`支持***中文,英文,日文和韩文***。
         
     | 
| 108 | 
         
            +
             
     | 
| 109 | 
         
            +
            ## 💡 Key Features
         
     | 
| 110 | 
         
            +
             
     | 
| 111 | 
         
            +
            - **Bilingual and Crosslingual Proficiency**: Powered by Youdao's translation engine, excelling in Chinese, English and their crosslingual retrieval task, with upcoming support for additional languages.
         
     | 
| 112 | 
         
            +
             
     | 
| 113 | 
         
            +
            - **RAG-Optimized**: Tailored for diverse RAG tasks including **translation, summarization, and question answering**, ensuring accurate **query understanding**. See <a href=#rag-evaluations-in-llamaindex>RAG Evaluations in LlamaIndex</a>.
         
     | 
| 114 | 
         
            +
             
     | 
| 115 | 
         
            +
            - **Efficient and Precise Retrieval**: Dual-encoder for efficient retrieval of `EmbeddingModel` in first stage, and cross-encoder of `RerankerModel` for enhanced precision and deeper semantic analysis in second stage.
         
     | 
| 116 | 
         
            +
             
     | 
| 117 | 
         
            +
            - **Broad Domain Adaptability**: Trained on diverse datasets for superior performance across various fields.
         
     | 
| 118 | 
         
            +
             
     | 
| 119 | 
         
            +
            - **User-Friendly Design**: Instruction-free, versatile use for multiple tasks without specifying query instruction for each task.
         
     | 
| 120 | 
         
            +
             
     | 
| 121 | 
         
            +
            - **Meaningful Reranking Scores**: `RerankerModel` provides relevant scores to improve result quality and optimize large language model performance.
         
     | 
| 122 | 
         
            +
             
     | 
| 123 | 
         
            +
            - **Proven in Production**: Successfully implemented and validated in Youdao's products.
         
     | 
| 124 | 
         
            +
             
     | 
| 125 | 
         
            +
              - **双语和跨语种能力**:基于有道翻译引擎的强大能力,我们的`BCEmbedding`具备强大的中英双语和跨语种语义表征能力。
         
     | 
| 126 | 
         
            +
              
         
     | 
| 127 | 
         
            +
              - **RAG适配**:面向RAG做了针对性优化,可以适配大多数相关任务,比如**翻译,摘要,问答**等。此外,针对**问题理解**(query understanding)也做了针对优化,详见 <a href=""#rag-evaluations-in-llamaindex"">基于LlamaIndex的RAG评测指标</a>。
         
     | 
| 128 | 
         
            +
              
         
     | 
| 129 | 
         
            +
              - **高效且精确的语义检索**:`EmbeddingModel`采用双编码器,可以在第一阶段实现高效的语义检索。`RerankerModel`采用交叉编码器,可以在第二阶段实现更高精度的语义顺序精排。
         
     | 
| 130 | 
         
            +
              
         
     | 
| 131 | 
         
            +
              - **更好的领域泛化性**:为了在更多场景实���更好的效果,我们收集了多种多样的领域数据。
         
     | 
| 132 | 
         
            +
              
         
     | 
| 133 | 
         
            +
              - **用户友好**:语义检索时不需要特殊指令前缀。也就是,你不需要为各种任务绞尽脑汁设计指令前缀。
         
     | 
| 134 | 
         
            +
              
         
     | 
| 135 | 
         
            +
              - **有意义的重排序分数**:`RerankerModel`可以提供有意义的语义相关性分数(不仅仅是排序),可以用于过滤无意义文本片段,提高大模型生成效果。
         
     | 
| 136 | 
         
            +
              
         
     | 
| 137 | 
         
            +
              - **产品化检验**:`BCEmbedding`已经被有道众多真实产品检验。
         
     | 
| 138 | 
         
            +
             
     | 
| 139 | 
         
            +
            ## 🚀 Latest Updates
         
     | 
| 140 | 
         
            +
             
     | 
| 141 | 
         
            +
            - ***2024-01-03***: **Model Releases** - [bce-embedding-base_v1](https://huggingface.co/maidalun1020/bce-embedding-base_v1) and [bce-reranker-base_v1](https://huggingface.co/maidalun1020/bce-reranker-base_v1) are available.
         
     | 
| 142 | 
         
            +
            - ***2024-01-03***: **Eval Datasets** [[CrosslingualMultiDomainsDataset](https://huggingface.co/datasets/maidalun1020/CrosslingualMultiDomainsDataset)] - Evaluate the performence of RAG, using [LlamaIndex](https://github.com/run-llama/llama_index).
         
     | 
| 143 | 
         
            +
            - ***2024-01-03***: **Eval Datasets** [[Details](https://github.com/netease-youdao/BCEmbedding/blob/master/BCEmbedding/evaluation/c_mteb/Retrieval.py)] - Evaluate the performence of crosslingual semantic representation, using [MTEB](https://github.com/embeddings-benchmark/mteb).
         
     | 
| 144 | 
         
            +
             
     | 
| 145 | 
         
            +
              - ***2024-01-03***: **模型发布** - [bce-embedding-base_v1](https://huggingface.co/maidalun1020/bce-embedding-base_v1)和[bce-reranker-base_v1](https://huggingface.co/maidalun1020/bce-reranker-base_v1)已发布.
         
     | 
| 146 | 
         
            +
              - ***2024-01-03***: **RAG评测数据** [[CrosslingualMultiDomainsDataset](https://huggingface.co/datasets/maidalun1020/CrosslingualMultiDomainsDataset)] - 基于[LlamaIndex](https://github.com/run-llama/llama_index)的RAG评测数据已发布。
         
     | 
| 147 | 
         
            +
              - ***2024-01-03***: **跨语种语义表征评测数据** [[详情](https://github.com/netease-youdao/BCEmbedding/blob/master/BCEmbedding/evaluation/c_mteb/Retrieval.py)] - 基于[MTEB](https://github.com/embeddings-benchmark/mteb)的跨语种评测数据已发布.
         
     | 
| 148 | 
         
            +
             
     | 
| 149 | 
         
            +
            ## 🍎 Model List
         
     | 
| 150 | 
         
            +
             
     | 
| 151 | 
         
            +
            | Model Name | Model Type | Languages | Parameters | Weights |  
         
     | 
| 152 | 
         
            +
            |:-------------------------------|:--------:|:--------:|:--------:|:--------:|  
         
     | 
| 153 | 
         
            +
            | bce-embedding-base_v1 | `EmbeddingModel` | ch, en | 279M | [download](https://huggingface.co/maidalun1020/bce-embedding-base_v1) |  
         
     | 
| 154 | 
         
            +
            | bce-reranker-base_v1 | `RerankerModel` | ch, en, ja, ko | 279M | [download](https://huggingface.co/maidalun1020/bce-reranker-base_v1) |  
         
     | 
| 155 | 
         
            +
             
     | 
| 156 | 
         
            +
            ## 📖 Manual
         
     | 
| 157 | 
         
            +
             
     | 
| 158 | 
         
            +
            ### Installation
         
     | 
| 159 | 
         
            +
             
     | 
| 160 | 
         
            +
            First, create a conda environment and activate it.
         
     | 
| 161 | 
         
            +
             
     | 
| 162 | 
         
            +
            ```bash
         
     | 
| 163 | 
         
            +
            conda create --name bce python=3.10 -y
         
     | 
| 164 | 
         
            +
            conda activate bce
         
     | 
| 165 | 
         
            +
            ```
         
     | 
| 166 | 
         
            +
             
     | 
| 167 | 
         
            +
            Then install `BCEmbedding` for minimal installation:
         
     | 
| 168 | 
         
            +
             
     | 
| 169 | 
         
            +
            ```bash
         
     | 
| 170 | 
         
            +
            pip install BCEmbedding==0.1.1
         
     | 
| 171 | 
         
            +
            ```
         
     | 
| 172 | 
         
            +
             
     | 
| 173 | 
         
            +
            Or install from source:
         
     | 
| 174 | 
         
            +
             
     | 
| 175 | 
         
            +
            ```bash
         
     | 
| 176 | 
         
            +
            git clone git@github.com:netease-youdao/BCEmbedding.git
         
     | 
| 177 | 
         
            +
            cd BCEmbedding
         
     | 
| 178 | 
         
            +
            pip install -v -e .
         
     | 
| 179 | 
         
            +
            ```
         
     | 
| 180 | 
         
            +
             
     | 
| 181 | 
         
            +
            ### Quick Start
         
     | 
| 182 | 
         
            +
             
     | 
| 183 | 
         
            +
            #### 1. Based on `BCEmbedding`
         
     | 
| 184 | 
         
            +
             
     | 
| 185 | 
         
            +
            Use `EmbeddingModel`, and `cls` [pooler](./BCEmbedding/models/embedding.py#L24) is default.
         
     | 
| 186 | 
         
            +
             
     | 
| 187 | 
         
            +
            ```python
         
     | 
| 188 | 
         
            +
            from BCEmbedding import EmbeddingModel
         
     | 
| 189 | 
         
            +
             
     | 
| 190 | 
         
            +
            # list of sentences
         
     | 
| 191 | 
         
            +
            sentences = ['sentence_0', 'sentence_1', ...]
         
     | 
| 192 | 
         
            +
             
     | 
| 193 | 
         
            +
            # init embedding model
         
     | 
| 194 | 
         
            +
            model = EmbeddingModel(model_name_or_path=""maidalun1020/bce-embedding-base_v1"")
         
     | 
| 195 | 
         
            +
             
     | 
| 196 | 
         
            +
            # extract embeddings
         
     | 
| 197 | 
         
            +
            embeddings = model.encode(sentences)
         
     | 
| 198 | 
         
            +
            ```
         
     | 
| 199 | 
         
            +
             
     | 
| 200 | 
         
            +
            Use `RerankerModel` to calculate relevant scores and rerank:
         
     | 
| 201 | 
         
            +
             
     | 
| 202 | 
         
            +
            ```python
         
     | 
| 203 | 
         
            +
            from BCEmbedding import RerankerModel
         
     | 
| 204 | 
         
            +
             
     | 
| 205 | 
         
            +
            # your query and corresponding passages
         
     | 
| 206 | 
         
            +
            query = 'input_query'
         
     | 
| 207 | 
         
            +
            passages = ['passage_0', 'passage_1', ...]
         
     | 
| 208 | 
         
            +
             
     | 
| 209 | 
         
            +
            # construct sentence pairs
         
     | 
| 210 | 
         
            +
            sentence_pairs = [[query, passage] for passage in passages]
         
     | 
| 211 | 
         
            +
             
     | 
| 212 | 
         
            +
            # init reranker model
         
     | 
| 213 | 
         
            +
            model = RerankerModel(model_name_or_path=""maidalun1020/bce-reranker-base_v1"")
         
     | 
| 214 | 
         
            +
             
     | 
| 215 | 
         
            +
            # method 0: calculate scores of sentence pairs
         
     | 
| 216 | 
         
            +
            scores = model.compute_score(sentence_pairs)
         
     | 
| 217 | 
         
            +
             
     | 
| 218 | 
         
            +
            # method 1: rerank passages
         
     | 
| 219 | 
         
            +
            rerank_results = model.rerank(query, passages)
         
     | 
| 220 | 
         
            +
            ```
         
     | 
| 221 | 
         
            +
             
     | 
| 222 | 
         
            +
            NOTE:
         
     | 
| 223 | 
         
            +
             
     | 
| 224 | 
         
            +
            - In [`RerankerModel.rerank`](./BCEmbedding/models/reranker.py#L137) method, we provide an advanced preproccess that we use in production for making `sentence_pairs`, when ""passages"" are very long.
         
     | 
| 225 | 
         
            +
             
     | 
| 226 | 
         
            +
            #### 2. Based on `transformers`
         
     | 
| 227 | 
         
            +
             
     | 
| 228 | 
         
            +
            For `EmbeddingModel`:
         
     | 
| 229 | 
         
            +
             
     | 
| 230 | 
         
            +
            ```python
         
     | 
| 231 | 
         
            +
            from transformers import AutoModel, AutoTokenizer
         
     | 
| 232 | 
         
            +
             
     | 
| 233 | 
         
            +
            # list of sentences
         
     | 
| 234 | 
         
            +
            sentences = ['sentence_0', 'sentence_1', ...]
         
     | 
| 235 | 
         
            +
             
     | 
| 236 | 
         
            +
            # init model and tokenizer
         
     | 
| 237 | 
         
            +
            tokenizer = AutoTokenizer.from_pretrained('maidalun1020/bce-embedding-base_v1')
         
     | 
| 238 | 
         
            +
            model = AutoModel.from_pretrained('maidalun1020/bce-embedding-base_v1')
         
     | 
| 239 | 
         
            +
             
     | 
| 240 | 
         
            +
            device = 'cuda'  # if no GPU, set ""cpu""
         
     | 
| 241 | 
         
            +
            model.to(device)
         
     | 
| 242 | 
         
            +
             
     | 
| 243 | 
         
            +
            # get inputs
         
     | 
| 244 | 
         
            +
            inputs = tokenizer(sentences, padding=True, truncation=True, max_length=512, return_tensors=""pt"")
         
     | 
| 245 | 
         
            +
            inputs_on_device = {k: v.to(self.device) for k, v in inputs.items()}
         
     | 
| 246 | 
         
            +
             
     | 
| 247 | 
         
            +
            # get embeddings
         
     | 
| 248 | 
         
            +
            outputs = model(**inputs_on_device, return_dict=True)
         
     | 
| 249 | 
         
            +
            embeddings = outputs.last_hidden_state[:, 0]  # cls pooler
         
     | 
| 250 | 
         
            +
            embeddings = embeddings / embeddings.norm(dim=1, keepdim=True)  # normalize
         
     | 
| 251 | 
         
            +
            ```
         
     | 
| 252 | 
         
            +
             
     | 
| 253 | 
         
            +
            For `RerankerModel`:
         
     | 
| 254 | 
         
            +
             
     | 
| 255 | 
         
            +
            ```python
         
     | 
| 256 | 
         
            +
            import torch
         
     | 
| 257 | 
         
            +
            from transformers import AutoTokenizer, AutoModelForSequenceClassification
         
     | 
| 258 | 
         
            +
             
     | 
| 259 | 
         
            +
            # init model and tokenizer
         
     | 
| 260 | 
         
            +
            tokenizer = AutoTokenizer.from_pretrained('maidalun1020/bce-reranker-base_v1')
         
     | 
| 261 | 
         
            +
            model = AutoModelForSequenceClassification.from_pretrained('maidalun1020/bce-reranker-base_v1')
         
     | 
| 262 | 
         
            +
             
     | 
| 263 | 
         
            +
            device = 'cuda'  # if no GPU, set ""cpu""
         
     | 
| 264 | 
         
            +
            model.to(device)
         
     | 
| 265 | 
         
            +
             
     | 
| 266 | 
         
            +
            # get inputs
         
     | 
| 267 | 
         
            +
            inputs = tokenizer(sentence_pairs, padding=True, truncation=True, max_length=512, return_tensors=""pt"")
         
     | 
| 268 | 
         
            +
            inputs_on_device = {k: v.to(device) for k, v in inputs.items()}
         
     | 
| 269 | 
         
            +
             
     | 
| 270 | 
         
            +
            # calculate scores
         
     | 
| 271 | 
         
            +
            scores = model(**inputs_on_device, return_dict=True).logits.view(-1,).float()
         
     | 
| 272 | 
         
            +
            scores = torch.sigmoid(scores)
         
     | 
| 273 | 
         
            +
            ```
         
     | 
| 274 | 
         
            +
             
     | 
| 275 | 
         
            +
            #### 3. Based on `sentence_transformers`
         
     | 
| 276 | 
         
            +
             
     | 
| 277 | 
         
            +
            For `EmbeddingModel`:
         
     | 
| 278 | 
         
            +
             
     | 
| 279 | 
         
            +
            ```python
         
     | 
| 280 | 
         
            +
            from sentence_transformers import SentenceTransformer
         
     | 
| 281 | 
         
            +
             
     | 
| 282 | 
         
            +
            # list of sentences
         
     | 
| 283 | 
         
            +
            sentences = ['sentence_0', 'sentence_1', ...]
         
     | 
| 284 | 
         
            +
             
     | 
| 285 | 
         
            +
            # init embedding model
         
     | 
| 286 | 
         
            +
            ## New update for sentence-trnasformers. So clean up your ""`SENTENCE_TRANSFORMERS_HOME`/maidalun1020_bce-embedding-base_v1"" or ""~/.cache/torch/sentence_transformers/maidalun1020_bce-embedding-base_v1"" first for downloading new version.
         
     | 
| 287 | 
         
            +
            model = SentenceTransformer(""maidalun1020/bce-embedding-base_v1"")
         
     | 
| 288 | 
         
            +
             
     | 
| 289 | 
         
            +
            # extract embeddings
         
     | 
| 290 | 
         
            +
            embeddings = model.encode(sentences, normalize_embeddings=True)
         
     | 
| 291 | 
         
            +
            ```
         
     | 
| 292 | 
         
            +
             
     | 
| 293 | 
         
            +
            For `RerankerModel`:
         
     | 
| 294 | 
         
            +
             
     | 
| 295 | 
         
            +
            ```python
         
     | 
| 296 | 
         
            +
            from sentence_transformers import CrossEncoder
         
     | 
| 297 | 
         
            +
             
     | 
| 298 | 
         
            +
            # init reranker model
         
     | 
| 299 | 
         
            +
            model = CrossEncoder('maidalun1020/bce-reranker-base_v1', max_length=512)
         
     | 
| 300 | 
         
            +
             
     | 
| 301 | 
         
            +
            # calculate scores of sentence pairs
         
     | 
| 302 | 
         
            +
            scores = model.predict(sentence_pairs)
         
     | 
| 303 | 
         
            +
            ```
         
     | 
| 304 | 
         
            +
             
     | 
| 305 | 
         
            +
            ### Integrations for RAG Frameworks
         
     | 
| 306 | 
         
            +
             
     | 
| 307 | 
         
            +
            #### 1. Used in `langchain`
         
     | 
| 308 | 
         
            +
             
     | 
| 309 | 
         
            +
            ```python
         
     | 
| 310 | 
         
            +
            from langchain.embeddings import HuggingFaceEmbeddings
         
     | 
| 311 | 
         
            +
            from langchain_community.vectorstores import FAISS
         
     | 
| 312 | 
         
            +
            from langchain_community.vectorstores.utils import DistanceStrategy
         
     | 
| 313 | 
         
            +
             
     | 
| 314 | 
         
            +
            query = 'apples'
         
     | 
| 315 | 
         
            +
            passages = [
         
     | 
| 316 | 
         
            +
                    'I like apples', 
         
     | 
| 317 | 
         
            +
                    'I like oranges', 
         
     | 
| 318 | 
         
            +
                    'Apples and oranges are fruits'
         
     | 
| 319 | 
         
            +
                ]
         
     | 
| 320 | 
         
            +
              
         
     | 
| 321 | 
         
            +
            # init embedding model
         
     | 
| 322 | 
         
            +
            model_name = 'maidalun1020/bce-embedding-base_v1'
         
     | 
| 323 | 
         
            +
            model_kwargs = {'device': 'cuda'}
         
     | 
| 324 | 
         
            +
            encode_kwargs = {'batch_size': 64, 'normalize_embeddings': True, 'show_progress_bar': False}
         
     | 
| 325 | 
         
            +
             
     | 
| 326 | 
         
            +
            embed_model = HuggingFaceEmbeddings(
         
     | 
| 327 | 
         
            +
                model_name=model_name,
         
     | 
| 328 | 
         
            +
                model_kwargs=model_kwargs,
         
     | 
| 329 | 
         
            +
                encode_kwargs=encode_kwargs
         
     | 
| 330 | 
         
            +
              )
         
     | 
| 331 | 
         
            +
             
     | 
| 332 | 
         
            +
            # example #1. extract embeddings
         
     | 
| 333 | 
         
            +
            query_embedding = embed_model.embed_query(query)
         
     | 
| 334 | 
         
            +
            passages_embeddings = embed_model.embed_documents(passages)
         
     | 
| 335 | 
         
            +
             
     | 
| 336 | 
         
            +
            # example #2. langchain retriever example
         
     | 
| 337 | 
         
            +
            faiss_vectorstore = FAISS.from_texts(passages, embed_model, distance_strategy=DistanceStrategy.MAX_INNER_PRODUCT)
         
     | 
| 338 | 
         
            +
             
     | 
| 339 | 
         
            +
            retriever = faiss_vectorstore.as_retriever(search_type=""similarity"", search_kwargs={""score_threshold"": 0.5, ""k"": 3})
         
     | 
| 340 | 
         
            +
             
     | 
| 341 | 
         
            +
            related_passages = retriever.get_relevant_documents(query)
         
     | 
| 342 | 
         
            +
            ```
         
     | 
| 343 | 
         
            +
             
     | 
| 344 | 
         
            +
            #### 2. Used in `llama_index`
         
     | 
| 345 | 
         
            +
             
     | 
| 346 | 
         
            +
            ```python
         
     | 
| 347 | 
         
            +
            from llama_index.embeddings import HuggingFaceEmbedding
         
     | 
| 348 | 
         
            +
            from llama_index import VectorStoreIndex, ServiceContext, SimpleDirectoryReader
         
     | 
| 349 | 
         
            +
            from llama_index.node_parser import SimpleNodeParser
         
     | 
| 350 | 
         
            +
            from llama_index.llms import OpenAI
         
     | 
| 351 | 
         
            +
             
     | 
| 352 | 
         
            +
            query = 'apples'
         
     | 
| 353 | 
         
            +
            passages = [
         
     | 
| 354 | 
         
            +
                    'I like apples', 
         
     | 
| 355 | 
         
            +
                    'I like oranges', 
         
     | 
| 356 | 
         
            +
                    'Apples and oranges are fruits'
         
     | 
| 357 | 
         
            +
                ]
         
     | 
| 358 | 
         
            +
             
     | 
| 359 | 
         
            +
            # init embedding model
         
     | 
| 360 | 
         
            +
            model_args = {'model_name': 'maidalun1020/bce-embedding-base_v1', 'max_length': 512, 'embed_batch_size': 64, 'device': 'cuda'}
         
     | 
| 361 | 
         
            +
            embed_model = HuggingFaceEmbedding(**model_args)
         
     | 
| 362 | 
         
            +
             
     | 
| 363 | 
         
            +
            # example #1. extract embeddings
         
     | 
| 364 | 
         
            +
            query_embedding = embed_model.get_query_embedding(query)
         
     | 
| 365 | 
         
            +
            passages_embeddings = embed_model.get_text_embedding_batch(passages)
         
     | 
| 366 | 
         
            +
             
     | 
| 367 | 
         
            +
            # example #2. rag example
         
     | 
| 368 | 
         
            +
            llm = OpenAI(model='gpt-3.5-turbo-0613', api_key=os.environ.get('OPENAI_API_KEY'), api_base=os.environ.get('OPENAI_BASE_URL'))
         
     | 
| 369 | 
         
            +
            service_context = ServiceContext.from_defaults(llm=llm, embed_model=embed_model)
         
     | 
| 370 | 
         
            +
             
     | 
| 371 | 
         
            +
            documents = SimpleDirectoryReader(input_files=[""BCEmbedding/tools/eval_rag/eval_pdfs/Comp_en_llama2.pdf""]).load_data()
         
     | 
| 372 | 
         
            +
            node_parser = SimpleNodeParser.from_defaults(chunk_size=512)
         
     | 
| 373 | 
         
            +
            nodes = node_parser.get_nodes_from_documents(documents[0:36])
         
     | 
| 374 | 
         
            +
            index = VectorStoreIndex(nodes, service_context=service_context)
         
     | 
| 375 | 
         
            +
            query_engine = index.as_query_engine()
         
     | 
| 376 | 
         
            +
            response = query_engine.query(""What is llama?"")
         
     | 
| 377 | 
         
            +
            ```
         
     | 
| 378 | 
         
            +
             
     | 
| 379 | 
         
            +
             
     | 
| 380 | 
         
            +
            ## ⚙️ Evaluation
         
     | 
| 381 | 
         
            +
             
     | 
| 382 | 
         
            +
            ### Evaluate Semantic Representation by MTEB
         
     | 
| 383 | 
         
            +
             
     | 
| 384 | 
         
            +
            We provide evaluateion tools for `embedding` and `reranker` models, based on [MTEB](https://github.com/embeddings-benchmark/mteb) and [C_MTEB](https://github.com/FlagOpen/FlagEmbedding/tree/master/C_MTEB).
         
     | 
| 385 | 
         
            +
             
     | 
| 386 | 
         
            +
              我们基于[MTEB](https://github.com/embeddings-benchmark/mteb)和[C_MTEB](https://github.com/FlagOpen/FlagEmbedding/tree/master/C_MTEB),提供`embedding`和`reranker`模型的语义表征评测工具。
         
     | 
| 387 | 
         
            +
             
     | 
| 388 | 
         
            +
            #### 1. Embedding Models
         
     | 
| 389 | 
         
            +
             
     | 
| 390 | 
         
            +
            Just run following cmd to evaluate `your_embedding_model` (e.g. `maidalun1020/bce-embedding-base_v1`) in **bilingual and crosslingual settings** (e.g. `[""en"", ""zh"", ""en-zh"", ""zh-en""]`).
         
     | 
| 391 | 
         
            +
             
     | 
| 392 | 
         
            +
              运行下面命令评测`your_embedding_model`(比如,`maidalun1020/bce-embedding-base_v1`)。评测任务将会在**双语和跨语种**(比如,`[""en"", ""zh"", ""en-zh"", ""zh-en""]`)模式下评测:
         
     | 
| 393 | 
         
            +
             
     | 
| 394 | 
         
            +
            ```bash
         
     | 
| 395 | 
         
            +
            python BCEmbedding/tools/eval_mteb/eval_embedding_mteb.py --model_name_or_path maidalun1020/bce-embedding-base_v1 --pooler cls
         
     | 
| 396 | 
         
            +
            ```
         
     | 
| 397 | 
         
            +
             
     | 
| 398 | 
         
            +
            The total evaluation tasks contain ***114 datastes*** of **""Retrieval"", ""STS"", ""PairClassification"", ""Classification"", ""Reranking"" and ""Clustering""**.
         
     | 
| 399 | 
         
            +
             
     | 
| 400 | 
         
            +
              评测包含 **""Retrieval"", ""STS"", ""PairClassification"", ""Classification"", ""Reranking""和""Clustering""** 这六大类任务的 ***114个数据集***。
         
     | 
| 401 | 
         
            +
             
     | 
| 402 | 
         
            +
            ***NOTE:***
         
     | 
| 403 | 
         
            +
            - **All models are evaluated in their recommended pooling method (`pooler`)**.
         
     | 
| 404 | 
         
            +
              - `mean` pooler: ""jina-embeddings-v2-base-en"", ""m3e-base"", ""m3e-large"", ""e5-large-v2"", ""multilingual-e5-base"", ""multilingual-e5-large"" and ""gte-large"".
         
     | 
| 405 | 
         
            +
              - `cls` pooler: Other models.
         
     | 
| 406 | 
         
            +
            - ""jina-embeddings-v2-base-en"" model should be loaded with `trust_remote_code`.
         
     | 
| 407 | 
         
            +
             
     | 
| 408 | 
         
            +
            ```bash
         
     | 
| 409 | 
         
            +
            python BCEmbedding/tools/eval_mteb/eval_embedding_mteb.py --model_name_or_path {moka-ai/m3e-base | moka-ai/m3e-large} --pooler mean
         
     | 
| 410 | 
         
            +
             
     | 
| 411 | 
         
            +
            python BCEmbedding/tools/eval_mteb/eval_embedding_mteb.py --model_name_or_path jinaai/jina-embeddings-v2-base-en --pooler mean --trust_remote_code
         
     | 
| 412 | 
         
            +
            ```
         
     | 
| 413 | 
         
            +
             
     | 
| 414 | 
         
            +
              ***注意:***
         
     | 
| 415 | 
         
            +
              - 所有模型的评测采用各自推荐的`pooler`。""jina-embeddings-v2-base-en"", ""m3e-base"", ""m3e-large"", ""e5-large-v2"", ""multilingual-e5-base"", ""multilingual-e5-large""和""gte-large""的 `pooler`采用`mean`,其他模型的`pooler`采用`cls`.
         
     | 
| 416 | 
         
            +
              - ""jina-embeddings-v2-base-en""模型在载入时需要`trust_remote_code`。
         
     | 
| 417 | 
         
            +
             
     | 
| 418 | 
         
            +
            #### 2. Reranker Models
         
     | 
| 419 | 
         
            +
             
     | 
| 420 | 
         
            +
            Run following cmd to evaluate `your_reranker_model` (e.g. ""maidalun1020/bce-reranker-base_v1"") in **bilingual and crosslingual settings** (e.g. `[""en"", ""zh"", ""en-zh"", ""zh-en""]`).
         
     | 
| 421 | 
         
            +
             
     | 
| 422 | 
         
            +
              运行下面命令评测`your_reranker_model`(比如,`maidalun1020/bce-reranker-base_v1`)。评测任务将会在 **双语种和跨语种**(比如,`[""en"", ""zh"", ""en-zh"", ""zh-en""]`)模式下评测:
         
     | 
| 423 | 
         
            +
             
     | 
| 424 | 
         
            +
            ```bash
         
     | 
| 425 | 
         
            +
            python BCEmbedding/tools/eval_mteb/eval_reranker_mteb.py --model_name_or_path maidalun1020/bce-reranker-base_v1
         
     | 
| 426 | 
         
            +
            ```
         
     | 
| 427 | 
         
            +
             
     | 
| 428 | 
         
            +
            The evaluation tasks contain ***12 datastes*** of **""Reranking""**.
         
     | 
| 429 | 
         
            +
             
     | 
| 430 | 
         
            +
              评测包含 **""Reranking""** 任务的 ***12个数据集***。
         
     | 
| 431 | 
         
            +
             
     | 
| 432 | 
         
            +
            #### 3. Metrics Visualization Tool
         
     | 
| 433 | 
         
            +
             
     | 
| 434 | 
         
            +
            We proveide a one-click script to sumarize evaluation results of `embedding` and `reranker` models as [Embedding Models Evaluation Summary](https://github.com/netease-youdao/BCEmbedding/blob/master/Docs/EvaluationSummary/embedding_eval_summary.md) and [Reranker Models Evaluation Summary](https://github.com/netease-youdao/BCEmbedding/blob/master/Docs/EvaluationSummary/reranker_eval_summary.md).
         
     | 
| 435 | 
         
            +
             
     | 
| 436 | 
         
            +
              我们提供了`embedding`和`reranker`模型的指标可视化一键脚本,输出一个markdown文件,详见[Embedding模型指标汇总](https://github.com/netease-youdao/BCEmbedding/blob/master/Docs/EvaluationSummary/embedding_eval_summary.md)和[Reranker模型指标汇总](https://github.com/netease-youdao/BCEmbedding/blob/master/Docs/EvaluationSummary/reranker_eval_summary.md)。
         
     | 
| 437 | 
         
            +
             
     | 
| 438 | 
         
            +
            ```bash
         
     | 
| 439 | 
         
            +
            python BCEmbedding/evaluation/mteb/summarize_eval_results.py --results_dir {your_embedding_results_dir | your_reranker_results_dir}
         
     | 
| 440 | 
         
            +
            ```
         
     | 
| 441 | 
         
            +
             
     | 
| 442 | 
         
            +
            ### Evaluate RAG by LlamaIndex
         
     | 
| 443 | 
         
            +
             
     | 
| 444 | 
         
            +
            [LlamaIndex](https://github.com/run-llama/llama_index) is a famous data framework for LLM-based applications, particularly in RAG. Recently, the [LlamaIndex Blog](https://blog.llamaindex.ai/boosting-rag-picking-the-best-embedding-reranker-models-42d079022e83) has evaluated the popular embedding and reranker models in RAG pipeline and attract great attention. Now, we follow its pipeline to evaluate our `BCEmbedding`.
         
     | 
| 445 | 
         
            +
             
     | 
| 446 | 
         
            +
              [LlamaIndex](https://github.com/run-llama/llama_index)是一个著名的大模型应用的开源工具,在RAG中很受欢迎。最近,[LlamaIndex博客](https://blog.llamaindex.ai/boosting-rag-picking-the-best-embedding-reranker-models-42d079022e83)对市面上常用的embedding和reranker模型进行RAG流程的评测,吸引广泛关注。下面我们按照该评测流程验证`BCEmbedding`在RAG中的效果。
         
     | 
| 447 | 
         
            +
             
     | 
| 448 | 
         
            +
            First, install LlamaIndex:
         
     | 
| 449 | 
         
            +
            ```bash
         
     | 
| 450 | 
         
            +
            pip install llama-index==0.9.22
         
     | 
| 451 | 
         
            +
            ```
         
     | 
| 452 | 
         
            +
             
     | 
| 453 | 
         
            +
            #### 1. Metrics Definition
         
     | 
| 454 | 
         
            +
             
     | 
| 455 | 
         
            +
            - Hit Rate:
         
     | 
| 456 | 
         
            +
             
     | 
| 457 | 
         
            +
              Hit rate calculates the fraction of queries where the correct answer is found within the top-k retrieved documents. In simpler terms, it's about how often our system gets it right within the top few guesses. ***The larger, the better.***
         
     | 
| 458 | 
         
            +
             
     | 
| 459 | 
         
            +
            - Mean Reciprocal Rank (MRR):
         
     | 
| 460 | 
         
            +
              
         
     | 
| 461 | 
         
            +
              For each query, MRR evaluates the system's accuracy by looking at the rank of the highest-placed relevant document. Specifically, it's the average of the reciprocals of these ranks across all the queries. So, if the first relevant document is the top result, the reciprocal rank is 1; if it's second, the reciprocal rank is 1/2, and so on. ***The larger, the better.***
         
     | 
| 462 | 
         
            +
             
     | 
| 463 | 
         
            +
              - 命中率(Hit Rate)
         
     | 
| 464 | 
         
            +
              
         
     | 
| 465 | 
         
            +
                命中率计算的是在检索的前k个文档中找到正确答案的查询所占的比例。简单来说,它反映了我们的系统在前几次猜测中答对的频率。***该指标越大越好。***
         
     | 
| 466 | 
         
            +
              
         
     | 
| 467 | 
         
            +
              - 平均倒数排名(Mean Reciprocal Rank,MRR)
         
     | 
| 468 | 
         
            +
                
         
     | 
| 469 | 
         
            +
                对于每个查询,MRR通过查看最高排名的相关文档的排名来评估系统的准确性。具体来说,它是在所有查询中这些排名的倒数的平均值。因此,如果第一个相关文档是排名最靠前的结果,倒数排名就是1;如果是第二个,倒数排名就是1/2,依此类推。***该指标越大越好。***
         
     | 
| 470 | 
         
            +
             
     | 
| 471 | 
         
            +
            #### 2. Reproduce [LlamaIndex Blog](https://blog.llamaindex.ai/boosting-rag-picking-the-best-embedding-reranker-models-42d079022e83)
         
     | 
| 472 | 
         
            +
             
     | 
| 473 | 
         
            +
            In order to compare our `BCEmbedding` with other embedding and reranker models fairly, we provide a one-click script to reproduce results of the LlamaIndex Blog, including our `BCEmbedding`:
         
     | 
| 474 | 
         
            +
             
     | 
| 475 | 
         
            +
              为了公平起见,运行下面脚本,复现LlamaIndex博客的结果,将`BCEmbedding`与其他embedding和reranker模型进行对比分析:
         
     | 
| 476 | 
         
            +
             
     | 
| 477 | 
         
            +
            ```bash
         
     | 
| 478 | 
         
            +
            # There should be two GPUs available at least.
         
     | 
| 479 | 
         
            +
            CUDA_VISIBLE_DEVICES=0,1 python BCEmbedding/tools/eval_rag/eval_llamaindex_reproduce.py
         
     | 
| 480 | 
         
            +
            ```
         
     | 
| 481 | 
         
            +
             
     | 
| 482 | 
         
            +
            Then, sumarize the evaluation results by:
         
     | 
| 483 | 
         
            +
            ```bash
         
     | 
| 484 | 
         
            +
            python BCEmbedding/tools/eval_rag/summarize_eval_results.py --results_dir results/rag_reproduce_results
         
     | 
| 485 | 
         
            +
            ```
         
     | 
| 486 | 
         
            +
             
     | 
| 487 | 
         
            +
            Results Reproduced from the LlamaIndex Blog can be checked in ***[Reproduced Summary of RAG Evaluation](https://github.com/netease-youdao/BCEmbedding/blob/master/Docs/EvaluationSummary/rag_eval_reproduced_summary.md)***, with some obvious ***conclusions***:
         
     | 
| 488 | 
         
            +
            - In `WithoutReranker` setting, our `bce-embedding-base_v1` outperforms all the other embedding models.
         
     | 
| 489 | 
         
            +
            - With fixing the embedding model, our `bce-reranker-base_v1` achieves the best performence.
         
     | 
| 490 | 
         
            +
            - ***The combination of `bce-embedding-base_v1` and `bce-reranker-base_v1` is SOTA.***
         
     | 
| 491 | 
         
            +
             
     | 
| 492 | 
         
            +
              输出的指标汇总详见 ***[LlamaIndex RAG评测结果复现](https://github.com/netease-youdao/BCEmbedding/blob/master/Docs/EvaluationSummary/rag_eval_reproduced_summary.md)***。从该复现结果中,可以看出:
         
     | 
| 493 | 
         
            +
              - 在`WithoutReranker`设置下(**竖排对比**),`bce-embedding-base_v1`比其他embedding模型效果都要好。
         
     | 
| 494 | 
         
            +
              - 在固定embedding模型设置下,对比不同reranker效果(**横排对比**),`bce-reranker-base_v1`比其他reranker模型效果都要好。
         
     | 
| 495 | 
         
            +
              - ***`bce-embedding-base_v1`和`bce-reranker-base_v1`组合,表现SOTA。***
         
     | 
| 496 | 
         
            +
             
     | 
| 497 | 
         
            +
            #### 3. Broad Domain Adaptability
         
     | 
| 498 | 
         
            +
             
     | 
| 499 | 
         
            +
            The evaluation of [LlamaIndex Blog](https://blog.llamaindex.ai/boosting-rag-picking-the-best-embedding-reranker-models-42d079022e83) is **monolingual, small amount of data, and specific domain** (just including ""llama2"" paper). In order to evaluate the **broad domain adaptability, bilingual and crosslingual capability**, we follow the blog to build a multiple domains evaluation dataset (includding ""Computer Science"", ""Physics"", ""Biology"", ""Economics"", ""Math"", and ""Quantitative Finance""), named [CrosslingualMultiDomainsDataset](https://huggingface.co/datasets/maidalun1020/CrosslingualMultiDomainsDataset), **by OpenAI `gpt-4-1106-preview` for high quality**.
         
     | 
| 500 | 
         
            +
             
     | 
| 501 | 
         
            +
              在上述的[LlamaIndex博客](https://blog.llamaindex.ai/boosting-rag-picking-the-best-embedding-reranker-models-42d079022e83)的评测数据只用了“llama2”这一篇文章,该评测是 **单语种,小数据量,特定领域** 的。为了兼容更真实更广的用户使用场景,评测算法模型的 **领域泛化性,双语和跨语种能力**,我们按照该博客的方法构建了一个多领域(计算机科学,物理学,生物学,经济学,数学,量化金融等)的双语种、跨语种评测数据,[CrosslingualMultiDomainsDataset](https://huggingface.co/datasets/maidalun1020/CrosslingualMultiDomainsDataset)。**为了保证构建数据的高质量,我们采用OpenAI的`gpt-4-1106-preview`。**
         
     | 
| 502 | 
         
            +
             
     | 
| 503 | 
         
            +
            First, run following cmd to evaluate the most popular and powerful embedding and reranker models:
         
     | 
| 504 | 
         
            +
             
     | 
| 505 | 
         
            +
            ```bash
         
     | 
| 506 | 
         
            +
            # There should be two GPUs available at least.
         
     | 
| 507 | 
         
            +
            CUDA_VISIBLE_DEVICES=0,1 python BCEmbedding/tools/eval_rag/eval_llamaindex_multiple_domains.py
         
     | 
| 508 | 
         
            +
            ```
         
     | 
| 509 | 
         
            +
             
     | 
| 510 | 
         
            +
            Then, run the following script to sumarize the evaluation results:
         
     | 
| 511 | 
         
            +
            ```bash
         
     | 
| 512 | 
         
            +
            python BCEmbedding/tools/eval_rag/summarize_eval_results.py --results_dir results/rag_results
         
     | 
| 513 | 
         
            +
            ```
         
     | 
| 514 | 
         
            +
             
     | 
| 515 | 
         
            +
            The summary of multiple domains evaluations can be seen in <a href=#1-multiple-domains-scenarios>Multiple Domains Scenarios</a>.
         
     | 
| 516 | 
         
            +
             
     | 
| 517 | 
         
            +
            ## 📈 Leaderboard
         
     | 
| 518 | 
         
            +
             
     | 
| 519 | 
         
            +
            ### Semantic Representation Evaluations in MTEB
         
     | 
| 520 | 
         
            +
             
     | 
| 521 | 
         
            +
            #### 1. Embedding Models
         
     | 
| 522 | 
         
            +
             
     | 
| 523 | 
         
            +
            | Model | Dimensions | Pooler | Instructions | Retrieval (47) | STS (19) | PairClassification (5) | Classification (21) | Reranking (12) | Clustering (15) | ***AVG*** (119) |  
         
     | 
| 524 | 
         
            +
            |:--------|:--------:|:--------:|:--------:|:--------:|:--------:|:--------:|:--------:|:--------:|:--------:|:--------:|  
         
     | 
| 525 | 
         
            +
            | bge-base-en-v1.5 | 768 | `cls` | Need | 37.14 | 55.06 | 75.45 | 59.73 | 43.00 | 37.74 | 47.19 |  
         
     | 
| 526 | 
         
            +
            | bge-base-zh-v1.5 | 768 | `cls` | Need | 47.63 | 63.72 | 77.40 | 63.38 | 54.95 | 32.56 | 53.62 |  
         
     | 
| 527 | 
         
            +
            | bge-large-en-v1.5 | 1024 | `cls` | Need | 37.18 | 54.09 | 75.00 | 59.24 | 42.47 | 37.32 | 46.80 |  
         
     | 
| 528 | 
         
            +
            | bge-large-zh-v1.5 | 1024 | `cls` | Need | 47.58 | 64.73 | 79.14 | 64.19 | 55.98 | 33.26 | 54.23 |  
         
     | 
| 529 | 
         
            +
            | e5-large-v2 | 1024 | `mean` | Need | 35.98 | 55.23 | 75.28 | 59.53 | 42.12 | 36.51 | 46.52 |  
         
     | 
| 530 | 
         
            +
            | gte-large | 1024 | `mean` | Free | 36.68 | 55.22 | 74.29 | 57.73 | 42.44 | 38.51 | 46.67 |  
         
     | 
| 531 | 
         
            +
            | gte-large-zh | 1024 | `cls` | Free | 41.15 | 64.62 | 77.58 | 62.04 | 55.62 | 33.03 | 51.51 |  
         
     | 
| 532 | 
         
            +
            | jina-embeddings-v2-base-en | 768 | `mean` | Free | 31.58 | 54.28 | 74.84 | 58.42 | 41.16 | 34.67 | 44.29 |  
         
     | 
| 533 | 
         
            +
            | m3e-base | 768 | `mean` | Free | 46.29 | 63.93 | 71.84 | 64.08 | 52.38 | 37.84 | 53.54 |  
         
     | 
| 534 | 
         
            +
            | m3e-large | 1024 | `mean` | Free | 34.85 | 59.74 | 67.69 | 60.07 | 48.99 | 31.62 | 46.78 |  
         
     | 
| 535 | 
         
            +
            | multilingual-e5-base | 768 | `mean` | Need | 54.73 | 65.49 | 76.97 | 69.72 | 55.01 | 38.44 | 58.34 |  
         
     | 
| 536 | 
         
            +
            | multilingual-e5-large | 1024 | `mean` | Need | 56.76 | 66.79 | 78.80 | 71.61 | 56.49 | 43.09 | 60.50 |  
         
     | 
| 537 | 
         
            +
            | ***bce-embedding-base_v1*** | 768 | `cls` | Free | 57.60 | 65.73 | 74.96 | 69.00 | 57.29 | 38.95 | 59.43 |  
         
     | 
| 538 | 
         
            +
             
     | 
| 539 | 
         
            +
            ***NOTE:***
         
     | 
| 540 | 
         
            +
            - Our ***bce-embedding-base_v1*** outperforms other opensource embedding models with comparable model size.
         
     | 
| 541 | 
         
            +
            - ***114 datastes*** of **""Retrieval"", ""STS"", ""PairClassification"", ""Classification"", ""Reranking"" and ""Clustering""** in `[""en"", ""zh"", ""en-zh"", ""zh-en""]` setting.
         
     | 
| 542 | 
         
            +
            - The [crosslingual evaluation datasets](https://github.com/netease-youdao/BCEmbedding/blob/master/BCEmbedding/evaluation/c_mteb/Retrieval.py) we released belong to `Retrieval` task.
         
     | 
| 543 | 
         
            +
            - More evaluation details please check [Embedding Models Evaluation Summary](https://github.com/netease-youdao/BCEmbedding/blob/master/Docs/EvaluationSummary/embedding_eval_summary.md).
         
     | 
| 544 | 
         
            +
             
     | 
| 545 | 
         
            +
              ***要点:***
         
     | 
| 546 | 
         
            +
              - 对比其他开源的相同规模的embedding模型,***bce-embedding-base_v1*** 表现最好,效果比最好的large模型稍差。
         
     | 
| 547 | 
         
            +
              - 评测包含 **""Retrieval"", ""STS"", ""PairClassification"", ""Classification"", ""Reranking""和""Clustering""** 这六大类任务的共 ***114个数据集***。
         
     | 
| 548 | 
         
            +
              - 我们开源的[跨语种语义表征评测数据](https://github.com/netease-youdao/BCEmbedding/blob/master/BCEmbedding/evaluation/c_mteb/Retrieval.py)属于`Retrieval`任务。
         
     | 
| 549 | 
         
            +
              - 更详细的评测结果详见[Embedding模型指标汇总](https://github.com/netease-youdao/BCEmbedding/blob/master/Docs/EvaluationSummary/embedding_eval_summary.md)。
         
     | 
| 550 | 
         
            +
             
     | 
| 551 | 
         
            +
            #### 2. Reranker Models
         
     | 
| 552 | 
         
            +
             
     | 
| 553 | 
         
            +
            | Model                              | Reranking (12) | ***AVG*** (12) |
         
     | 
| 554 | 
         
            +
            | :--------------------------------- | :-------------: | :--------------------: |
         
     | 
| 555 | 
         
            +
            | bge-reranker-base                  |      59.04      |         59.04         |
         
     | 
| 556 | 
         
            +
            | bge-reranker-large                 |      60.86      |         60.86         |
         
     | 
| 557 | 
         
            +
            | ***bce-reranker-base_v1*** | **61.29** |  ***61.29***  |
         
     | 
| 558 | 
         
            +
             
     | 
| 559 | 
         
            +
            ***NOTE:***
         
     | 
| 560 | 
         
            +
            - Our ***bce-reranker-base_v1*** outperforms other opensource reranker models.
         
     | 
| 561 | 
         
            +
            - ***12 datastes*** of **""Reranking""** in `[""en"", ""zh"", ""en-zh"", ""zh-en""]` setting.
         
     | 
| 562 | 
         
            +
            - More evaluation details please check [Reranker Models Evaluation Summary](https://github.com/netease-youdao/BCEmbedding/blob/master/Docs/EvaluationSummary/reranker_eval_summary.md).
         
     | 
| 563 | 
         
            +
             
     | 
| 564 | 
         
            +
              ***要点:***
         
     | 
| 565 | 
         
            +
              - ***bce-reranker-base_v1*** 优于其他开源reranker模型。
         
     | 
| 566 | 
         
            +
              - 评测包含 **""Reranking""** 任务的 ***12个数据集***。
         
     | 
| 567 | 
         
            +
              - 更详细的评测结果详见[Reranker模型指标汇总](https://github.com/netease-youdao/BCEmbedding/blob/master/Docs/EvaluationSummary/reranker_eval_summary.md)
         
     | 
| 568 | 
         
            +
             
     | 
| 569 | 
         
            +
            ### RAG Evaluations in LlamaIndex
         
     | 
| 570 | 
         
            +
             
     | 
| 571 | 
         
            +
            #### 1. Multiple Domains Scenarios
         
     | 
| 572 | 
         
            +
             
     | 
| 573 | 
         
            +
            
         
     | 
| 574 | 
         
            +
             
     | 
| 575 | 
         
            +
            ***NOTE:***
         
     | 
| 576 | 
         
            +
            - Evaluated in **`[""en"", ""zh"", ""en-zh"", ""zh-en""]` setting**.
         
     | 
| 577 | 
         
            +
            - In `WithoutReranker` setting, our `bce-embedding-base_v1` outperforms all the other embedding models.
         
     | 
| 578 | 
         
            +
            - With fixing the embedding model, our `bce-reranker-base_v1` achieves the best performence.
         
     | 
| 579 | 
         
            +
            - **The combination of `bce-embedding-base_v1` and `bce-reranker-base_v1` is SOTA**.
         
     | 
| 580 | 
         
            +
             
     | 
| 581 | 
         
            +
              ***要点:***
         
     | 
| 582 | 
         
            +
              - 评测是在`[""en"", ""zh"", ""en-zh"", ""zh-en""]`设置下。
         
     | 
| 583 | 
         
            +
              - 在`WithoutReranker`设置下(**竖排对比**),`bce-embedding-base_v1`优于其他Embedding模型,包括开源和闭源。
         
     | 
| 584 | 
         
            +
              - 在固定Embedding模型设置下,对比不同reranker效果(**横排对比**),`bce-reranker-base_v1`比其他reranker模型效果都要好,包括开源和闭源。
         
     | 
| 585 | 
         
            +
              - ***`bce-embedding-base_v1`和`bce-reranker-base_v1`组合,表现SOTA。***
         
     | 
| 586 | 
         
            +
             
     | 
| 587 | 
         
            +
            ## 🛠 Youdao's BCEmbedding API
         
     | 
| 588 | 
         
            +
             
     | 
| 589 | 
         
            +
            For users who prefer a hassle-free experience without the need to download and configure the model on their own systems, `BCEmbedding` is readily accessible through Youdao's API. This option offers a streamlined and efficient way to integrate BCEmbedding into your projects, bypassing the complexities of manual setup and maintenance. Detailed instructions and comprehensive API documentation are available at [Youdao BCEmbedding API](https://ai.youdao.com/DOCSIRMA/html/aigc/api/embedding/index.html). Here, you'll find all the necessary guidance to easily implement `BCEmbedding` across a variety of use cases, ensuring a smooth and effective integration for optimal results.
         
     | 
| 590 | 
         
            +
             
     | 
| 591 | 
         
            +
              对于那些更喜欢直接调用api的用户,有道提供方便的`BCEmbedding`调用api。该方式是一种简化和高效的方式,将`BCEmbedding`集成到您的项目中,避开了手动设置和系统维护的复杂性。更详细的api调用接口说明详见[有道BCEmbedding API](https://ai.youdao.com/DOCSIRMA/html/aigc/api/embedding/index.html)。
         
     | 
| 592 | 
         
            +
             
     | 
| 593 | 
         
            +
            ## 🧲 WeChat Group
         
     | 
| 594 | 
         
            +
             
     | 
| 595 | 
         
            +
            Welcome to scan the QR code below and join the WeChat group.
         
     | 
| 596 | 
         
            +
             
     | 
| 597 | 
         
            +
              欢迎大家扫码加入官方微信交流群。
         
     | 
| 598 | 
         
            +
             
     | 
| 599 | 
         
            +
            
         
     | 
| 600 | 
         
            +
             
     | 
| 601 | 
         
            +
            ## ✏️ Citation
         
     | 
| 602 | 
         
            +
             
     | 
| 603 | 
         
            +
            If you use `BCEmbedding` in your research or project, please feel free to cite and star it:
         
     | 
| 604 | 
         
            +
             
     | 
| 605 | 
         
            +
              如果在您的研究或任何项目中使用本工作,烦请按照下方进行引用,并打个小星星~
         
     | 
| 606 | 
         
            +
             
     | 
| 607 | 
         
            +
            ```
         
     | 
| 608 | 
         
            +
            @misc{youdao_bcembedding_2023,
         
     | 
| 609 | 
         
            +
                title={BCEmbedding: Bilingual and Crosslingual Embedding for RAG},
         
     | 
| 610 | 
         
            +
                author={NetEase Youdao, Inc.},
         
     | 
| 611 | 
         
            +
                year={2023},
         
     | 
| 612 | 
         
            +
                howpublished={\url{https://github.com/netease-youdao/BCEmbedding}}
         
     | 
| 613 | 
         
            +
            }
         
     | 
| 614 | 
         
            +
            ```
         
     | 
| 615 | 
         
            +
             
     | 
| 616 | 
         
            +
            ## 🔐 License
         
     | 
| 617 | 
         
            +
             
     | 
| 618 | 
         
            +
            `BCEmbedding` is licensed under [Apache 2.0 License](https://github.com/netease-youdao/BCEmbedding/blob/master/LICENSE)
         
     | 
| 619 | 
         
            +
             
     | 
| 620 | 
         
            +
            ## 🔗 Related Links
         
     | 
| 621 | 
         
            +
             
     | 
| 622 | 
         
            +
            [Netease Youdao - QAnything](https://github.com/netease-youdao/qanything)
         
     | 
| 623 | 
         
            +
             
     | 
| 624 | 
         
            +
            [FlagEmbedding](https://github.com/FlagOpen/FlagEmbedding)
         
     | 
| 625 | 
         
            +
             
     | 
| 626 | 
         
            +
            [MTEB](https://github.com/embeddings-benchmark/mteb)
         
     | 
| 627 | 
         
            +
             
     | 
| 628 | 
         
            +
            [C_MTEB](https://github.com/FlagOpen/FlagEmbedding/tree/master/C_MTEB)
         
     | 
| 629 | 
         
            +
             
     | 
| 630 | 
         
            +
            [LLama Index](https://github.com/run-llama/llama_index) | [LlamaIndex Blog](https://blog.llamaindex.ai/boosting-rag-picking-the-best-embedding-reranker-models-42d079022e83)","{""id"": ""maidalun1020/bce-embedding-base_v1"", ""author"": ""maidalun1020"", ""sha"": ""f542e557e78bd8c5feed08573f183d87bc3d5535"", ""last_modified"": ""2024-11-08 05:02:41+00:00"", ""created_at"": ""2023-12-29 07:38:08+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 79426, ""downloads_all_time"": null, ""likes"": 375, ""library_name"": ""sentence-transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""sentence-transformers"", ""pytorch"", ""xlm-roberta"", ""feature-extraction"", ""sentence-similarity"", ""transformers"", ""en"", ""zh"", ""license:apache-2.0"", ""autotrain_compatible"", ""text-embeddings-inference"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""feature-extraction"", ""mask_token"": ""<mask>"", ""trending_score"": null, ""card_data"": ""language:\n- en\n- zh\nlicense: apache-2.0\npipeline_tag: feature-extraction\ntags:\n- sentence-transformers\n- feature-extraction\n- sentence-similarity\n- transformers"", ""widget_data"": null, ""model_index"": null, ""config"": {""architectures"": [""XLMRobertaModel""], ""model_type"": ""xlm-roberta"", ""tokenizer_config"": {""bos_token"": ""<s>"", ""cls_token"": ""<s>"", ""eos_token"": ""</s>"", ""mask_token"": ""<mask>"", ""pad_token"": ""<pad>"", ""sep_token"": ""</s>"", ""unk_token"": ""<unk>""}}, ""transformers_info"": {""auto_model"": ""AutoModel"", ""custom_class"": null, ""pipeline_tag"": ""feature-extraction"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='1_Pooling/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/Wechat.jpg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/rag_eval_multiple_domains_summary.jpg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config_sentence_transformers.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='modules.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='sentence_bert_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='sentencepiece.bpe.model', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [""silk-road/Zero-Haruhi-50_Novels-Playground-API"", ""xzyun2011/wulewule"", ""fakeym/travel"", ""LISA-Kadi/LISA-demo"", ""jameszzg/maidalun1020-bce-embedding-base_v1"", ""silk-road/Zero-Haruhi-50_Novels-Playground"", ""AK2022/maidalun1020-bce-embedding-base_v1"", ""nus-cs5242-team-3/project"", ""BYY5410/maidalun1020-bce-embedding-base_v1"", ""rocky4dx3906/maidalun1020-bce-embedding-base_v1"", ""Elvis666/maidalun1020-bce-embedding-base_v1"", ""horry135/maidalun1020-bce-embedding-base_v1"", ""coachMorta/maidalun1020-bce-embedding-base_v1"", ""Yijun-Yang/ReadReview"", ""Darions/maidalun1020-bce-embedding-base_v1"", ""retopara/ragflow"", ""codywu1996/maidalun1020-bce-embedding-base_v1"", ""Starowo/ragflow""], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-11-08 05:02:41+00:00"", ""cardData"": ""language:\n- en\n- zh\nlicense: apache-2.0\npipeline_tag: feature-extraction\ntags:\n- sentence-transformers\n- feature-extraction\n- sentence-similarity\n- transformers"", ""transformersInfo"": {""auto_model"": ""AutoModel"", ""custom_class"": null, ""pipeline_tag"": ""feature-extraction"", ""processor"": ""AutoTokenizer""}, ""_id"": ""658e776016227c7a2ddaeb50"", ""modelId"": ""maidalun1020/bce-embedding-base_v1"", ""usedStorage"": 3368166798}",0,,0,,0,https://huggingface.co/opengf/bce-embedding-base_v1-Q4_K_M-GGUF,1,,0,"AK2022/maidalun1020-bce-embedding-base_v1, BYY5410/maidalun1020-bce-embedding-base_v1, Elvis666/maidalun1020-bce-embedding-base_v1, LISA-Kadi/LISA-demo, fakeym/travel, horry135/maidalun1020-bce-embedding-base_v1, huggingface/InferenceSupport/discussions/new?title=maidalun1020/bce-embedding-base_v1&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bmaidalun1020%2Fbce-embedding-base_v1%5D(%2Fmaidalun1020%2Fbce-embedding-base_v1)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, jameszzg/maidalun1020-bce-embedding-base_v1, nus-cs5242-team-3/project, rocky4dx3906/maidalun1020-bce-embedding-base_v1, silk-road/Zero-Haruhi-50_Novels-Playground, silk-road/Zero-Haruhi-50_Novels-Playground-API, xzyun2011/wulewule",13
         
     | 
    	
        bert-base-chinese_finetunes_20250424_223250.csv_finetunes_20250424_223250.csv
    ADDED
    
    | 
         The diff for this file is too large to render. 
		See raw diff 
     | 
| 
         | 
    	
        bert-base-portuguese-cased_finetunes_20250427_003734.csv_finetunes_20250427_003734.csv
    ADDED
    
    | 
         The diff for this file is too large to render. 
		See raw diff 
     | 
| 
         | 
    	
        cde-small-v1_finetunes_20250426_171734.csv_finetunes_20250426_171734.csv
    ADDED
    
    | 
         The diff for this file is too large to render. 
		See raw diff 
     | 
| 
         | 
    	
        control_v1p_sd15_qrcode_monster_finetunes_20250424_193500.csv_finetunes_20250424_193500.csv
    ADDED
    
    | 
         @@ -0,0 +1,59 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
         
     | 
| 2 | 
         
            +
            monster-labs/control_v1p_sd15_qrcode_monster,"---
         
     | 
| 3 | 
         
            +
            tags:
         
     | 
| 4 | 
         
            +
            - stable-diffusion
         
     | 
| 5 | 
         
            +
            - controlnet
         
     | 
| 6 | 
         
            +
            - qrcode
         
     | 
| 7 | 
         
            +
            license: openrail++
         
     | 
| 8 | 
         
            +
            language:
         
     | 
| 9 | 
         
            +
            - en
         
     | 
| 10 | 
         
            +
            ---
         
     | 
| 11 | 
         
            +
            # Controlnet QR Code Monster v2 For SD-1.5
         
     | 
| 12 | 
         
            +
             
     | 
| 13 | 
         
            +
            
         
     | 
| 14 | 
         
            +
             
     | 
| 15 | 
         
            +
            ##  Model Description
         
     | 
| 16 | 
         
            +
             
     | 
| 17 | 
         
            +
            This model is made to generate creative QR codes that still scan.
         
     | 
| 18 | 
         
            +
            Keep in mind that not all generated codes might be readable, but you can try different parameters and prompts to get the desired results.
         
     | 
| 19 | 
         
            +
             
     | 
| 20 | 
         
            +
            **NEW VERSION**
         
     | 
| 21 | 
         
            +
             
     | 
| 22 | 
         
            +
            Introducing the upgraded version of our model - Controlnet QR code Monster v2.
         
     | 
| 23 | 
         
            +
            V2 is a huge upgrade over v1, for scannability AND creativity.
         
     | 
| 24 | 
         
            +
             
     | 
| 25 | 
         
            +
            QR codes can now seamlessly blend the image by using a gray-colored background (#808080).
         
     | 
| 26 | 
         
            +
             
     | 
| 27 | 
         
            +
            As with the former version, the readability of some generated codes may vary, however playing around with parameters and prompts could yield better results.
         
     | 
| 28 | 
         
            +
             
     | 
| 29 | 
         
            +
            You can find in in the `v2/` subfolder.
         
     | 
| 30 | 
         
            +
             
     | 
| 31 | 
         
            +
            ## How to Use
         
     | 
| 32 | 
         
            +
             
     | 
| 33 | 
         
            +
            - **Condition**: QR codes are passed as condition images with a module size of 16px. Use a higher error correction level to make it easier to read (sometimes a lower level can be easier to read if smaller in size). Use a gray background for the rest of the image to make the code integrate better.
         
     | 
| 34 | 
         
            +
             
     | 
| 35 | 
         
            +
            - **Prompts**: Use a prompt to guide the QR code generation. The output will highly depend on the given prompt. Some seem to be really easily accepted by the qr code process, some will require careful tweaking to get good results.
         
     | 
| 36 | 
         
            +
             
     | 
| 37 | 
         
            +
            - **Controlnet guidance scale**: Set the controlnet guidance scale value:
         
     | 
| 38 | 
         
            +
               - High values: The generated QR code will be more readable.
         
     | 
| 39 | 
         
            +
               - Low values: The generated QR code will be more creative.
         
     | 
| 40 | 
         
            +
             
     | 
| 41 | 
         
            +
            ### Tips
         
     | 
| 42 | 
         
            +
             
     | 
| 43 | 
         
            +
            - For an optimally readable output, try generating multiple QR codes with similar parameters, then choose the best ones.
         
     | 
| 44 | 
         
            +
             
     | 
| 45 | 
         
            +
            - Use the Image-to-Image feature to improve the readability of a generated QR code:
         
     | 
| 46 | 
         
            +
              - Decrease the denoising strength to retain more of the original image.
         
     | 
| 47 | 
         
            +
              - Increase the controlnet guidance scale value for better readability.
         
     | 
| 48 | 
         
            +
              A typical workflow for ""saving"" a code would be :
         
     | 
| 49 | 
         
            +
              Max out the guidance scale and minimize the denoising strength, then bump the strength until the code scans.
         
     | 
| 50 | 
         
            +
             
     | 
| 51 | 
         
            +
            ## Example Outputs
         
     | 
| 52 | 
         
            +
             
     | 
| 53 | 
         
            +
            Here are some examples of creative, yet scannable QR codes produced by our model:
         
     | 
| 54 | 
         
            +
             
     | 
| 55 | 
         
            +
            
         
     | 
| 56 | 
         
            +
            
         
     | 
| 57 | 
         
            +
            
         
     | 
| 58 | 
         
            +
             
     | 
| 59 | 
         
            +
            Feel free to experiment with prompts, parameters, and the Image-to-Image feature to achieve the desired QR code output. Good luck and have fun!","{""id"": ""monster-labs/control_v1p_sd15_qrcode_monster"", ""author"": ""monster-labs"", ""sha"": ""560fb7b15d0badb409f8cd578a2bfe63bd4b8046"", ""last_modified"": ""2023-07-21 11:35:31+00:00"", ""created_at"": ""2023-06-24 15:07:20+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 96778, ""downloads_all_time"": null, ""likes"": 1400, ""library_name"": ""diffusers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""diffusers"", ""safetensors"", ""stable-diffusion"", ""controlnet"", ""qrcode"", ""en"", ""license:openrail++"", ""region:us""], ""pipeline_tag"": null, ""mask_token"": null, ""trending_score"": null, ""card_data"": ""language:\n- en\nlicense: openrail++\ntags:\n- stable-diffusion\n- controlnet\n- qrcode"", ""widget_data"": null, ""model_index"": null, ""config"": {}, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='control_v1p_sd15_qrcode_monster.ckpt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='control_v1p_sd15_qrcode_monster.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='control_v1p_sd15_qrcode_monster.yaml', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='diffusion_pytorch_model.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='diffusion_pytorch_model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='images/architecture.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='images/monster.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='images/skulls.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='images/tree.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='v2/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='v2/control_v1p_sd15_qrcode_monster_v2.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='v2/control_v1p_sd15_qrcode_monster_v2.yaml', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='v2/diffusion_pytorch_model.safetensors', size=None, blob_id=None, lfs=None)""], ""spaces"": [""AP123/IllusionDiffusion"", ""radames/Real-Time-Latent-Consistency-Model"", ""r3gm/DiffuseCraft"", ""John6666/DiffuseCraftMod"", ""radames/real-time-pix2pix-turbo"", ""John6666/votepurchase-multiple-model"", ""monster-labs/Controlnet-QRCode-Monster-V1"", ""kinsung/qraf"", ""andyaii/IllusionDiffusion"", ""HusseinHE/psis"", ""EnigmaOfTheWorld/Power_AI_Point"", ""Menyu/DiffuseCraftMod"", ""measmonysuon/IllusionDiffusion"", ""44brabal/runwayml-stable-diffusion-v1-5"", ""bobber/DiffuseCraft"", ""PirateHFH/IllusionDiffusion"", ""TheNetherWatcher/Vid2Vid-using-Text-prompt"", ""Neurify/IllusionDiffusion"", ""brainstone/qr"", ""Jeremygrd/IllusionDiffusion"", ""dhanilka/illusion-image-ai"", ""Joeythemonster/IllusionDiffusionhq"", ""vittore/pattern-into-image"", ""KienPongPoh/pareidolia-but-cute"", ""cbensimon/IllusionDiffusion3"", ""John6666/testvp"", ""astronauti/QR-code-AI-art-generator"", ""johnsmi876545678/Controlnet-QRCode-Monster-V1"", ""DabHero/IllusionDiffusion"", ""migandhi/IllusionDiffusion"", ""migandhi/createillusion"", ""pseudotheos/scrolldiffusion"", ""singularity7/IllusionDiffusion"", ""SteveRyherd/IllusionDiffusion"", ""emma321/IllusionDiffusion"", ""LayekAhmed/I"", ""novoloom/IllusionDiffusion"", ""geero/IllusionDiffusion"", ""Smmmama/IllusionDiffusion"", ""awqwqwq/1IllusionDiffusion"", ""nipun08/qraf"", ""ijsbeerdev/IllusionDiffusion"", ""Omnibus/Controlnet-QRCode-Monster-V1"", ""yaffd/IllusionDiffusion"", ""Abinghosh/IllusionDiffusion"", ""hachoumm/IllusionDiffusion"", ""abidlabs/IllusionDiffusion2"", ""K00B404/ModelmasterUI"", ""imjunaidafzal/Power_AI_Point"", ""Omnibus-archive/IllusionDiffusion"", ""JCTN/IllusionDiffusion"", ""sumanthtelapuri/IllusionDiffusion"", ""BulatF/pics"", ""Girisgc/IllusionDiffusion"", ""RallyHere/RallyTest"", ""sally123321/IllusionDiffusionWeb"", ""dogwtz/IllusionDiffusion"", ""Stokewell/IllusionDiffusion"", ""Prince2004z/IllusionDiffusion"", ""44brabal/mods"", ""Joeythemonster/IllusionDiffusionhdjejdn"", ""imjunaidafzal/IllusionDiffusion"", ""ADLONX234/IllusionDiffusionXXX"", ""mesmitpatel/IllusionDiffusion"", ""JayaHarsha/IllusionDiffusion"", ""Supunabro/IllusionDiffusionHQsupuna"", ""supernatman/illusion-diffusion-hq"", ""MustafaaS/nob"", ""yusufxvn/IllusionDiffusionn"", ""spzla/IllusionDiffusion"", ""pseudotheos/IllusionDiffusionFastAPI"", ""badpenisok/IllusionDiffusion"", ""iykoou/IllusionDiffusion"", ""Sofia123456/fabtabulizer"", ""DeathR/IllusionDiffusionClone"", ""ghostblade/IllusionDiffusion"", ""sub314xxl/IllusionDiffusion"", ""thesamgfx/IllusionDiffusion"", ""flowp/IllusionDiffusion"", ""mikefish/fish-illusion-diffusion"", ""dkarwa/IllusionDiffusion"", ""jimr1603/IllusionDiffusion"", ""maher32/IllusionDiffusion"", ""Zombro01/IllusionDiffusion"", ""Waqasanwar94/IllusionDiffusion"", ""MWare101/IllusionDiffusion"", ""ItsSapde/runwayml-stable-diffusion-v1-5"", ""kabax/IllusionDiffusionBroken"", ""Kalmu/my-personal-space-for-doing-thing-with-stable-diffusion"", ""Antropophobe/QR_Code"", ""Nadhem0101/ChatGPT4"", ""LxFasT/WierdcoppyofIllusionDiffusion"", ""cfLiam/greenballsHaHaHa"", ""WildSp/IllusionDiffusion1"", ""YanzBotz/Illustration_diffusion"", ""Nymbo/real-time-pix2pix-turbo"", ""FaronMentink/IllusionDiffusion"", ""dorkai/IllusionDiffusion"", ""c-w/IllusionDiffusion"", ""Fede1/IllusionDiffusionAI""], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2023-07-21 11:35:31+00:00"", ""cardData"": ""language:\n- en\nlicense: openrail++\ntags:\n- stable-diffusion\n- controlnet\n- qrcode"", ""transformersInfo"": null, ""_id"": ""649706a85e85f281061ab3e3"", ""modelId"": ""monster-labs/control_v1p_sd15_qrcode_monster"", ""usedStorage"": 8689186346}",0,,0,,0,,0,,0,"AP123/IllusionDiffusion, John6666/DiffuseCraftMod, John6666/votepurchase-multiple-model, Menyu/DiffuseCraftMod, TheNetherWatcher/Vid2Vid-using-Text-prompt, andyaii/IllusionDiffusion, bobber/DiffuseCraft, huggingface/InferenceSupport/discussions/new?title=monster-labs/control_v1p_sd15_qrcode_monster&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bmonster-labs%2Fcontrol_v1p_sd15_qrcode_monster%5D(%2Fmonster-labs%2Fcontrol_v1p_sd15_qrcode_monster)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, kinsung/qraf, r3gm/DiffuseCraft, radames/Real-Time-Latent-Consistency-Model, radames/real-time-pix2pix-turbo, vittore/pattern-into-image",13
         
     | 
    	
        detr-resnet-50_finetunes_20250424_223250.csv_finetunes_20250424_223250.csv
    ADDED
    
    | 
         The diff for this file is too large to render. 
		See raw diff 
     | 
| 
         |