Upload folder using huggingface_hub
Browse files- .gitattributes +1 -0
- added_tokens.json +28 -0
- config.json +33 -0
- generation_config.json +13 -0
- merges.txt +0 -0
- model-00001-of-00002.safetensors +3 -0
- model-00002-of-00002.safetensors +3 -0
- model.safetensors.index.json +405 -0
- modeling_zeranker.py +188 -0
- special_tokens_map.json +31 -0
- tokenizer.json +3 -0
- tokenizer_config.json +241 -0
- vocab.json +0 -0
    	
        .gitattributes
    CHANGED
    
    | @@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text | |
| 33 | 
             
            *.zip filter=lfs diff=lfs merge=lfs -text
         | 
| 34 | 
             
            *.zst filter=lfs diff=lfs merge=lfs -text
         | 
| 35 | 
             
            *tfevents* filter=lfs diff=lfs merge=lfs -text
         | 
|  | 
|  | |
| 33 | 
             
            *.zip filter=lfs diff=lfs merge=lfs -text
         | 
| 34 | 
             
            *.zst filter=lfs diff=lfs merge=lfs -text
         | 
| 35 | 
             
            *tfevents* filter=lfs diff=lfs merge=lfs -text
         | 
| 36 | 
            +
            tokenizer.json filter=lfs diff=lfs merge=lfs -text
         | 
    	
        added_tokens.json
    ADDED
    
    | @@ -0,0 +1,28 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            {
         | 
| 2 | 
            +
              "</think>": 151668,
         | 
| 3 | 
            +
              "</tool_call>": 151658,
         | 
| 4 | 
            +
              "</tool_response>": 151666,
         | 
| 5 | 
            +
              "<think>": 151667,
         | 
| 6 | 
            +
              "<tool_call>": 151657,
         | 
| 7 | 
            +
              "<tool_response>": 151665,
         | 
| 8 | 
            +
              "<|box_end|>": 151649,
         | 
| 9 | 
            +
              "<|box_start|>": 151648,
         | 
| 10 | 
            +
              "<|endoftext|>": 151643,
         | 
| 11 | 
            +
              "<|file_sep|>": 151664,
         | 
| 12 | 
            +
              "<|fim_middle|>": 151660,
         | 
| 13 | 
            +
              "<|fim_pad|>": 151662,
         | 
| 14 | 
            +
              "<|fim_prefix|>": 151659,
         | 
| 15 | 
            +
              "<|fim_suffix|>": 151661,
         | 
| 16 | 
            +
              "<|im_end|>": 151645,
         | 
| 17 | 
            +
              "<|im_start|>": 151644,
         | 
| 18 | 
            +
              "<|image_pad|>": 151655,
         | 
| 19 | 
            +
              "<|object_ref_end|>": 151647,
         | 
| 20 | 
            +
              "<|object_ref_start|>": 151646,
         | 
| 21 | 
            +
              "<|quad_end|>": 151651,
         | 
| 22 | 
            +
              "<|quad_start|>": 151650,
         | 
| 23 | 
            +
              "<|repo_name|>": 151663,
         | 
| 24 | 
            +
              "<|video_pad|>": 151656,
         | 
| 25 | 
            +
              "<|vision_end|>": 151653,
         | 
| 26 | 
            +
              "<|vision_pad|>": 151654,
         | 
| 27 | 
            +
              "<|vision_start|>": 151652
         | 
| 28 | 
            +
            }
         | 
    	
        config.json
    ADDED
    
    | @@ -0,0 +1,33 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            {
         | 
| 2 | 
            +
                "architectures": [
         | 
| 3 | 
            +
                    "Qwen3ForCausalLM"
         | 
| 4 | 
            +
                ],
         | 
| 5 | 
            +
                "attention_bias": false,
         | 
| 6 | 
            +
                "attention_dropout": 0.0,
         | 
| 7 | 
            +
                "bos_token_id": 151643,
         | 
| 8 | 
            +
                "eos_token_id": 151645,
         | 
| 9 | 
            +
                "head_dim": 128,
         | 
| 10 | 
            +
                "hidden_act": "silu",
         | 
| 11 | 
            +
                "hidden_size": 2560,
         | 
| 12 | 
            +
                "initializer_range": 0.02,
         | 
| 13 | 
            +
                "intermediate_size": 9728,
         | 
| 14 | 
            +
                "max_position_embeddings": 40960,
         | 
| 15 | 
            +
                "max_window_layers": 36,
         | 
| 16 | 
            +
                "model_type": "qwen3",
         | 
| 17 | 
            +
                "num_attention_heads": 32,
         | 
| 18 | 
            +
                "num_hidden_layers": 36,
         | 
| 19 | 
            +
                "num_key_value_heads": 8,
         | 
| 20 | 
            +
                "rms_norm_eps": 1e-06,
         | 
| 21 | 
            +
                "rope_scaling": null,
         | 
| 22 | 
            +
                "rope_theta": 1000000,
         | 
| 23 | 
            +
                "sliding_window": null,
         | 
| 24 | 
            +
                "tie_word_embeddings": true,
         | 
| 25 | 
            +
                "torch_dtype": "bfloat16",
         | 
| 26 | 
            +
                "transformers_version": "4.51.3",
         | 
| 27 | 
            +
                "use_cache": true,
         | 
| 28 | 
            +
                "use_sliding_window": false,
         | 
| 29 | 
            +
                "vocab_size": 151936,
         | 
| 30 | 
            +
                "auto_map": {
         | 
| 31 | 
            +
                    "AutoConfig": "modeling_zeranker.ZEConfig"
         | 
| 32 | 
            +
                }
         | 
| 33 | 
            +
            }
         | 
    	
        generation_config.json
    ADDED
    
    | @@ -0,0 +1,13 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            {
         | 
| 2 | 
            +
              "bos_token_id": 151643,
         | 
| 3 | 
            +
              "do_sample": true,
         | 
| 4 | 
            +
              "eos_token_id": [
         | 
| 5 | 
            +
                151645,
         | 
| 6 | 
            +
                151643
         | 
| 7 | 
            +
              ],
         | 
| 8 | 
            +
              "pad_token_id": 151643,
         | 
| 9 | 
            +
              "temperature": 0.6,
         | 
| 10 | 
            +
              "top_k": 20,
         | 
| 11 | 
            +
              "top_p": 0.95,
         | 
| 12 | 
            +
              "transformers_version": "4.51.3"
         | 
| 13 | 
            +
            }
         | 
    	
        merges.txt
    ADDED
    
    | The diff for this file is too large to render. 
		See raw diff | 
|  | 
    	
        model-00001-of-00002.safetensors
    ADDED
    
    | @@ -0,0 +1,3 @@ | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            +
            oid sha256:f37773ac017dadc7e0745838258f3d18633f9140fed9ada7bda84d9efc7791d7
         | 
| 3 | 
            +
            size 4967215360
         | 
    	
        model-00002-of-00002.safetensors
    ADDED
    
    | @@ -0,0 +1,3 @@ | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            +
            oid sha256:0f1296b2f346ad7915a015e94a6af5389c02b26a8056febe82fe69f000f4100b
         | 
| 3 | 
            +
            size 3077766632
         | 
    	
        model.safetensors.index.json
    ADDED
    
    | @@ -0,0 +1,405 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            {
         | 
| 2 | 
            +
              "metadata": {
         | 
| 3 | 
            +
                "total_size": 8044936192
         | 
| 4 | 
            +
              },
         | 
| 5 | 
            +
              "weight_map": {
         | 
| 6 | 
            +
                "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
         | 
| 7 | 
            +
                "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
         | 
| 8 | 
            +
                "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 9 | 
            +
                "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 10 | 
            +
                "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 11 | 
            +
                "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
         | 
| 12 | 
            +
                "model.layers.0.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
         | 
| 13 | 
            +
                "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 14 | 
            +
                "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 15 | 
            +
                "model.layers.0.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
         | 
| 16 | 
            +
                "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 17 | 
            +
                "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 18 | 
            +
                "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
         | 
| 19 | 
            +
                "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 20 | 
            +
                "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 21 | 
            +
                "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 22 | 
            +
                "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
         | 
| 23 | 
            +
                "model.layers.1.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
         | 
| 24 | 
            +
                "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 25 | 
            +
                "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 26 | 
            +
                "model.layers.1.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
         | 
| 27 | 
            +
                "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 28 | 
            +
                "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 29 | 
            +
                "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
         | 
| 30 | 
            +
                "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 31 | 
            +
                "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 32 | 
            +
                "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 33 | 
            +
                "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
         | 
| 34 | 
            +
                "model.layers.10.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
         | 
| 35 | 
            +
                "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 36 | 
            +
                "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 37 | 
            +
                "model.layers.10.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
         | 
| 38 | 
            +
                "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 39 | 
            +
                "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 40 | 
            +
                "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
         | 
| 41 | 
            +
                "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 42 | 
            +
                "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 43 | 
            +
                "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 44 | 
            +
                "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
         | 
| 45 | 
            +
                "model.layers.11.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
         | 
| 46 | 
            +
                "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 47 | 
            +
                "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 48 | 
            +
                "model.layers.11.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
         | 
| 49 | 
            +
                "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 50 | 
            +
                "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 51 | 
            +
                "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
         | 
| 52 | 
            +
                "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 53 | 
            +
                "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 54 | 
            +
                "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 55 | 
            +
                "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
         | 
| 56 | 
            +
                "model.layers.12.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
         | 
| 57 | 
            +
                "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 58 | 
            +
                "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 59 | 
            +
                "model.layers.12.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
         | 
| 60 | 
            +
                "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 61 | 
            +
                "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 62 | 
            +
                "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
         | 
| 63 | 
            +
                "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 64 | 
            +
                "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 65 | 
            +
                "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 66 | 
            +
                "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
         | 
| 67 | 
            +
                "model.layers.13.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
         | 
| 68 | 
            +
                "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 69 | 
            +
                "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 70 | 
            +
                "model.layers.13.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
         | 
| 71 | 
            +
                "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 72 | 
            +
                "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 73 | 
            +
                "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
         | 
| 74 | 
            +
                "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 75 | 
            +
                "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 76 | 
            +
                "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 77 | 
            +
                "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
         | 
| 78 | 
            +
                "model.layers.14.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
         | 
| 79 | 
            +
                "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 80 | 
            +
                "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 81 | 
            +
                "model.layers.14.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
         | 
| 82 | 
            +
                "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 83 | 
            +
                "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 84 | 
            +
                "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
         | 
| 85 | 
            +
                "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 86 | 
            +
                "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 87 | 
            +
                "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 88 | 
            +
                "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
         | 
| 89 | 
            +
                "model.layers.15.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
         | 
| 90 | 
            +
                "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 91 | 
            +
                "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 92 | 
            +
                "model.layers.15.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
         | 
| 93 | 
            +
                "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 94 | 
            +
                "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 95 | 
            +
                "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
         | 
| 96 | 
            +
                "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 97 | 
            +
                "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 98 | 
            +
                "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 99 | 
            +
                "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
         | 
| 100 | 
            +
                "model.layers.16.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
         | 
| 101 | 
            +
                "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 102 | 
            +
                "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 103 | 
            +
                "model.layers.16.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
         | 
| 104 | 
            +
                "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 105 | 
            +
                "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 106 | 
            +
                "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
         | 
| 107 | 
            +
                "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 108 | 
            +
                "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 109 | 
            +
                "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 110 | 
            +
                "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
         | 
| 111 | 
            +
                "model.layers.17.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
         | 
| 112 | 
            +
                "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 113 | 
            +
                "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 114 | 
            +
                "model.layers.17.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
         | 
| 115 | 
            +
                "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 116 | 
            +
                "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 117 | 
            +
                "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
         | 
| 118 | 
            +
                "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 119 | 
            +
                "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 120 | 
            +
                "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 121 | 
            +
                "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
         | 
| 122 | 
            +
                "model.layers.18.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
         | 
| 123 | 
            +
                "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 124 | 
            +
                "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 125 | 
            +
                "model.layers.18.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
         | 
| 126 | 
            +
                "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 127 | 
            +
                "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 128 | 
            +
                "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
         | 
| 129 | 
            +
                "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 130 | 
            +
                "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 131 | 
            +
                "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 132 | 
            +
                "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
         | 
| 133 | 
            +
                "model.layers.19.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
         | 
| 134 | 
            +
                "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 135 | 
            +
                "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 136 | 
            +
                "model.layers.19.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
         | 
| 137 | 
            +
                "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 138 | 
            +
                "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 139 | 
            +
                "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
         | 
| 140 | 
            +
                "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 141 | 
            +
                "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 142 | 
            +
                "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 143 | 
            +
                "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
         | 
| 144 | 
            +
                "model.layers.2.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
         | 
| 145 | 
            +
                "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 146 | 
            +
                "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 147 | 
            +
                "model.layers.2.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
         | 
| 148 | 
            +
                "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 149 | 
            +
                "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 150 | 
            +
                "model.layers.20.input_layernorm.weight": "model-00002-of-00002.safetensors",
         | 
| 151 | 
            +
                "model.layers.20.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 152 | 
            +
                "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 153 | 
            +
                "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 154 | 
            +
                "model.layers.20.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
         | 
| 155 | 
            +
                "model.layers.20.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
         | 
| 156 | 
            +
                "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 157 | 
            +
                "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 158 | 
            +
                "model.layers.20.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
         | 
| 159 | 
            +
                "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 160 | 
            +
                "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 161 | 
            +
                "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
         | 
| 162 | 
            +
                "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 163 | 
            +
                "model.layers.21.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 164 | 
            +
                "model.layers.21.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 165 | 
            +
                "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
         | 
| 166 | 
            +
                "model.layers.21.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
         | 
| 167 | 
            +
                "model.layers.21.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 168 | 
            +
                "model.layers.21.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 169 | 
            +
                "model.layers.21.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
         | 
| 170 | 
            +
                "model.layers.21.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 171 | 
            +
                "model.layers.21.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 172 | 
            +
                "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
         | 
| 173 | 
            +
                "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 174 | 
            +
                "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 175 | 
            +
                "model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 176 | 
            +
                "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
         | 
| 177 | 
            +
                "model.layers.22.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
         | 
| 178 | 
            +
                "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 179 | 
            +
                "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 180 | 
            +
                "model.layers.22.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
         | 
| 181 | 
            +
                "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 182 | 
            +
                "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 183 | 
            +
                "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
         | 
| 184 | 
            +
                "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 185 | 
            +
                "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 186 | 
            +
                "model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 187 | 
            +
                "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
         | 
| 188 | 
            +
                "model.layers.23.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
         | 
| 189 | 
            +
                "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 190 | 
            +
                "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 191 | 
            +
                "model.layers.23.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
         | 
| 192 | 
            +
                "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 193 | 
            +
                "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 194 | 
            +
                "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
         | 
| 195 | 
            +
                "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 196 | 
            +
                "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 197 | 
            +
                "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 198 | 
            +
                "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
         | 
| 199 | 
            +
                "model.layers.24.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
         | 
| 200 | 
            +
                "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 201 | 
            +
                "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 202 | 
            +
                "model.layers.24.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
         | 
| 203 | 
            +
                "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 204 | 
            +
                "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 205 | 
            +
                "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
         | 
| 206 | 
            +
                "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 207 | 
            +
                "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 208 | 
            +
                "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 209 | 
            +
                "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
         | 
| 210 | 
            +
                "model.layers.25.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
         | 
| 211 | 
            +
                "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 212 | 
            +
                "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 213 | 
            +
                "model.layers.25.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
         | 
| 214 | 
            +
                "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 215 | 
            +
                "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 216 | 
            +
                "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
         | 
| 217 | 
            +
                "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 218 | 
            +
                "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 219 | 
            +
                "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 220 | 
            +
                "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
         | 
| 221 | 
            +
                "model.layers.26.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
         | 
| 222 | 
            +
                "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 223 | 
            +
                "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 224 | 
            +
                "model.layers.26.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
         | 
| 225 | 
            +
                "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 226 | 
            +
                "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 227 | 
            +
                "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
         | 
| 228 | 
            +
                "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 229 | 
            +
                "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 230 | 
            +
                "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 231 | 
            +
                "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
         | 
| 232 | 
            +
                "model.layers.27.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
         | 
| 233 | 
            +
                "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 234 | 
            +
                "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 235 | 
            +
                "model.layers.27.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
         | 
| 236 | 
            +
                "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 237 | 
            +
                "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 238 | 
            +
                "model.layers.28.input_layernorm.weight": "model-00002-of-00002.safetensors",
         | 
| 239 | 
            +
                "model.layers.28.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 240 | 
            +
                "model.layers.28.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 241 | 
            +
                "model.layers.28.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 242 | 
            +
                "model.layers.28.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
         | 
| 243 | 
            +
                "model.layers.28.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
         | 
| 244 | 
            +
                "model.layers.28.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 245 | 
            +
                "model.layers.28.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 246 | 
            +
                "model.layers.28.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
         | 
| 247 | 
            +
                "model.layers.28.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 248 | 
            +
                "model.layers.28.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 249 | 
            +
                "model.layers.29.input_layernorm.weight": "model-00002-of-00002.safetensors",
         | 
| 250 | 
            +
                "model.layers.29.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 251 | 
            +
                "model.layers.29.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 252 | 
            +
                "model.layers.29.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 253 | 
            +
                "model.layers.29.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
         | 
| 254 | 
            +
                "model.layers.29.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
         | 
| 255 | 
            +
                "model.layers.29.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 256 | 
            +
                "model.layers.29.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 257 | 
            +
                "model.layers.29.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
         | 
| 258 | 
            +
                "model.layers.29.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 259 | 
            +
                "model.layers.29.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 260 | 
            +
                "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
         | 
| 261 | 
            +
                "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 262 | 
            +
                "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 263 | 
            +
                "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 264 | 
            +
                "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
         | 
| 265 | 
            +
                "model.layers.3.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
         | 
| 266 | 
            +
                "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 267 | 
            +
                "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 268 | 
            +
                "model.layers.3.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
         | 
| 269 | 
            +
                "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 270 | 
            +
                "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 271 | 
            +
                "model.layers.30.input_layernorm.weight": "model-00002-of-00002.safetensors",
         | 
| 272 | 
            +
                "model.layers.30.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 273 | 
            +
                "model.layers.30.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 274 | 
            +
                "model.layers.30.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 275 | 
            +
                "model.layers.30.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
         | 
| 276 | 
            +
                "model.layers.30.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
         | 
| 277 | 
            +
                "model.layers.30.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 278 | 
            +
                "model.layers.30.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 279 | 
            +
                "model.layers.30.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
         | 
| 280 | 
            +
                "model.layers.30.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 281 | 
            +
                "model.layers.30.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 282 | 
            +
                "model.layers.31.input_layernorm.weight": "model-00002-of-00002.safetensors",
         | 
| 283 | 
            +
                "model.layers.31.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 284 | 
            +
                "model.layers.31.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 285 | 
            +
                "model.layers.31.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 286 | 
            +
                "model.layers.31.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
         | 
| 287 | 
            +
                "model.layers.31.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
         | 
| 288 | 
            +
                "model.layers.31.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 289 | 
            +
                "model.layers.31.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 290 | 
            +
                "model.layers.31.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
         | 
| 291 | 
            +
                "model.layers.31.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 292 | 
            +
                "model.layers.31.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 293 | 
            +
                "model.layers.32.input_layernorm.weight": "model-00002-of-00002.safetensors",
         | 
| 294 | 
            +
                "model.layers.32.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 295 | 
            +
                "model.layers.32.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 296 | 
            +
                "model.layers.32.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 297 | 
            +
                "model.layers.32.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
         | 
| 298 | 
            +
                "model.layers.32.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
         | 
| 299 | 
            +
                "model.layers.32.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 300 | 
            +
                "model.layers.32.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 301 | 
            +
                "model.layers.32.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
         | 
| 302 | 
            +
                "model.layers.32.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 303 | 
            +
                "model.layers.32.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 304 | 
            +
                "model.layers.33.input_layernorm.weight": "model-00002-of-00002.safetensors",
         | 
| 305 | 
            +
                "model.layers.33.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 306 | 
            +
                "model.layers.33.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 307 | 
            +
                "model.layers.33.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 308 | 
            +
                "model.layers.33.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
         | 
| 309 | 
            +
                "model.layers.33.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
         | 
| 310 | 
            +
                "model.layers.33.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 311 | 
            +
                "model.layers.33.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 312 | 
            +
                "model.layers.33.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
         | 
| 313 | 
            +
                "model.layers.33.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 314 | 
            +
                "model.layers.33.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 315 | 
            +
                "model.layers.34.input_layernorm.weight": "model-00002-of-00002.safetensors",
         | 
| 316 | 
            +
                "model.layers.34.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 317 | 
            +
                "model.layers.34.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 318 | 
            +
                "model.layers.34.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 319 | 
            +
                "model.layers.34.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
         | 
| 320 | 
            +
                "model.layers.34.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
         | 
| 321 | 
            +
                "model.layers.34.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 322 | 
            +
                "model.layers.34.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 323 | 
            +
                "model.layers.34.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
         | 
| 324 | 
            +
                "model.layers.34.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 325 | 
            +
                "model.layers.34.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 326 | 
            +
                "model.layers.35.input_layernorm.weight": "model-00002-of-00002.safetensors",
         | 
| 327 | 
            +
                "model.layers.35.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 328 | 
            +
                "model.layers.35.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 329 | 
            +
                "model.layers.35.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 330 | 
            +
                "model.layers.35.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
         | 
| 331 | 
            +
                "model.layers.35.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
         | 
| 332 | 
            +
                "model.layers.35.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 333 | 
            +
                "model.layers.35.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 334 | 
            +
                "model.layers.35.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
         | 
| 335 | 
            +
                "model.layers.35.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 336 | 
            +
                "model.layers.35.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 337 | 
            +
                "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
         | 
| 338 | 
            +
                "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 339 | 
            +
                "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 340 | 
            +
                "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 341 | 
            +
                "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
         | 
| 342 | 
            +
                "model.layers.4.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
         | 
| 343 | 
            +
                "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 344 | 
            +
                "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 345 | 
            +
                "model.layers.4.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
         | 
| 346 | 
            +
                "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 347 | 
            +
                "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 348 | 
            +
                "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
         | 
| 349 | 
            +
                "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 350 | 
            +
                "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 351 | 
            +
                "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 352 | 
            +
                "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
         | 
| 353 | 
            +
                "model.layers.5.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
         | 
| 354 | 
            +
                "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 355 | 
            +
                "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 356 | 
            +
                "model.layers.5.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
         | 
| 357 | 
            +
                "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 358 | 
            +
                "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 359 | 
            +
                "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
         | 
| 360 | 
            +
                "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 361 | 
            +
                "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 362 | 
            +
                "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 363 | 
            +
                "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
         | 
| 364 | 
            +
                "model.layers.6.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
         | 
| 365 | 
            +
                "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 366 | 
            +
                "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 367 | 
            +
                "model.layers.6.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
         | 
| 368 | 
            +
                "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 369 | 
            +
                "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 370 | 
            +
                "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
         | 
| 371 | 
            +
                "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 372 | 
            +
                "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 373 | 
            +
                "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 374 | 
            +
                "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
         | 
| 375 | 
            +
                "model.layers.7.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
         | 
| 376 | 
            +
                "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 377 | 
            +
                "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 378 | 
            +
                "model.layers.7.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
         | 
| 379 | 
            +
                "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 380 | 
            +
                "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 381 | 
            +
                "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
         | 
| 382 | 
            +
                "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 383 | 
            +
                "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 384 | 
            +
                "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 385 | 
            +
                "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
         | 
| 386 | 
            +
                "model.layers.8.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
         | 
| 387 | 
            +
                "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 388 | 
            +
                "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 389 | 
            +
                "model.layers.8.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
         | 
| 390 | 
            +
                "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 391 | 
            +
                "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 392 | 
            +
                "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
         | 
| 393 | 
            +
                "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 394 | 
            +
                "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 395 | 
            +
                "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 396 | 
            +
                "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
         | 
| 397 | 
            +
                "model.layers.9.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
         | 
| 398 | 
            +
                "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 399 | 
            +
                "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 400 | 
            +
                "model.layers.9.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
         | 
| 401 | 
            +
                "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 402 | 
            +
                "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 403 | 
            +
                "model.norm.weight": "model-00002-of-00002.safetensors"
         | 
| 404 | 
            +
              }
         | 
| 405 | 
            +
            }
         | 
    	
        modeling_zeranker.py
    ADDED
    
    | @@ -0,0 +1,188 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            from sentence_transformers import CrossEncoder as _CE
         | 
| 2 | 
            +
             | 
| 3 | 
            +
            import math
         | 
| 4 | 
            +
            from typing import cast
         | 
| 5 | 
            +
            import types
         | 
| 6 | 
            +
             | 
| 7 | 
            +
            import torch
         | 
| 8 | 
            +
            from transformers.configuration_utils import PretrainedConfig
         | 
| 9 | 
            +
            from transformers.models.auto.configuration_auto import AutoConfig
         | 
| 10 | 
            +
            from transformers.models.auto.modeling_auto import AutoModelForCausalLM
         | 
| 11 | 
            +
            from transformers.models.auto.tokenization_auto import AutoTokenizer
         | 
| 12 | 
            +
            from transformers.models.gemma3.modeling_gemma3 import (
         | 
| 13 | 
            +
                Gemma3ForCausalLM,
         | 
| 14 | 
            +
                Gemma3ForConditionalGeneration,
         | 
| 15 | 
            +
            )
         | 
| 16 | 
            +
            from transformers.models.llama.modeling_llama import LlamaForCausalLM
         | 
| 17 | 
            +
            from transformers.models.qwen3.modeling_qwen3 import Qwen3ForCausalLM
         | 
| 18 | 
            +
            from transformers.tokenization_utils_base import BatchEncoding
         | 
| 19 | 
            +
            from transformers.tokenization_utils_fast import PreTrainedTokenizerFast
         | 
| 20 | 
            +
             | 
| 21 | 
            +
            # pyright: reportUnknownMemberType=false
         | 
| 22 | 
            +
            # pyright: reportUnknownVariableType=false
         | 
| 23 | 
            +
             | 
| 24 | 
            +
            MODEL_PATH = "zeroentropy/ze-rerank-large-v0.3.0"
         | 
| 25 | 
            +
            PER_DEVICE_BATCH_SIZE_TOKENS = 15_000
         | 
| 26 | 
            +
             | 
| 27 | 
            +
             | 
| 28 | 
            +
            def format_pointwise_datapoints(
         | 
| 29 | 
            +
                tokenizer: PreTrainedTokenizerFast,
         | 
| 30 | 
            +
                query_documents: list[tuple[str, str]],
         | 
| 31 | 
            +
            ) -> BatchEncoding:
         | 
| 32 | 
            +
                input_texts: list[str] = []
         | 
| 33 | 
            +
                for query, document in query_documents:
         | 
| 34 | 
            +
                    system_prompt = f"""
         | 
| 35 | 
            +
            {query}
         | 
| 36 | 
            +
            """.strip()
         | 
| 37 | 
            +
                    user_message = f"""
         | 
| 38 | 
            +
            {document}
         | 
| 39 | 
            +
            """.strip()
         | 
| 40 | 
            +
                    messages = [
         | 
| 41 | 
            +
                        {"role": "system", "content": system_prompt},
         | 
| 42 | 
            +
                        {"role": "user", "content": user_message},
         | 
| 43 | 
            +
                    ]
         | 
| 44 | 
            +
                    input_text = tokenizer.apply_chat_template(
         | 
| 45 | 
            +
                        messages,
         | 
| 46 | 
            +
                        tokenize=False,
         | 
| 47 | 
            +
                        add_generation_prompt=True,
         | 
| 48 | 
            +
                    )
         | 
| 49 | 
            +
                    assert isinstance(input_text, str)
         | 
| 50 | 
            +
                    input_texts.append(input_text)
         | 
| 51 | 
            +
             | 
| 52 | 
            +
                batch_inputs = tokenizer(
         | 
| 53 | 
            +
                    input_texts,
         | 
| 54 | 
            +
                    padding=True,
         | 
| 55 | 
            +
                    return_tensors="pt",
         | 
| 56 | 
            +
                )
         | 
| 57 | 
            +
                return batch_inputs
         | 
| 58 | 
            +
             | 
| 59 | 
            +
             | 
| 60 | 
            +
            def load_model(
         | 
| 61 | 
            +
                device: torch.device | None = None,
         | 
| 62 | 
            +
            ) -> tuple[
         | 
| 63 | 
            +
                PreTrainedTokenizerFast,
         | 
| 64 | 
            +
                LlamaForCausalLM
         | 
| 65 | 
            +
                | Gemma3ForConditionalGeneration
         | 
| 66 | 
            +
                | Gemma3ForCausalLM
         | 
| 67 | 
            +
                | Qwen3ForCausalLM,
         | 
| 68 | 
            +
            ]:
         | 
| 69 | 
            +
                if device is None:
         | 
| 70 | 
            +
                    device = torch.device("cpu")
         | 
| 71 | 
            +
             | 
| 72 | 
            +
                config = AutoConfig.from_pretrained(MODEL_PATH)
         | 
| 73 | 
            +
                assert isinstance(config, PretrainedConfig)
         | 
| 74 | 
            +
             | 
| 75 | 
            +
                model = AutoModelForCausalLM.from_pretrained(
         | 
| 76 | 
            +
                    MODEL_PATH,
         | 
| 77 | 
            +
                    torch_dtype="auto",
         | 
| 78 | 
            +
                    quantization_config=None,
         | 
| 79 | 
            +
                    device_map={"": device},
         | 
| 80 | 
            +
                )
         | 
| 81 | 
            +
                if config.model_type == "llama":
         | 
| 82 | 
            +
                    model.config.attn_implementation = "flash_attention_2"
         | 
| 83 | 
            +
                print(f"Model Type: {config.model_type}")
         | 
| 84 | 
            +
                assert isinstance(
         | 
| 85 | 
            +
                    model,
         | 
| 86 | 
            +
                    LlamaForCausalLM
         | 
| 87 | 
            +
                    | Gemma3ForConditionalGeneration
         | 
| 88 | 
            +
                    | Gemma3ForCausalLM
         | 
| 89 | 
            +
                    | Qwen3ForCausalLM,
         | 
| 90 | 
            +
                )
         | 
| 91 | 
            +
             | 
| 92 | 
            +
                tokenizer = cast(
         | 
| 93 | 
            +
                    AutoTokenizer,
         | 
| 94 | 
            +
                    AutoTokenizer.from_pretrained(
         | 
| 95 | 
            +
                        MODEL_PATH,
         | 
| 96 | 
            +
                        padding_side="right",
         | 
| 97 | 
            +
                    ),
         | 
| 98 | 
            +
                )
         | 
| 99 | 
            +
                assert isinstance(tokenizer, PreTrainedTokenizerFast)
         | 
| 100 | 
            +
             | 
| 101 | 
            +
                if tokenizer.pad_token is None:
         | 
| 102 | 
            +
                    tokenizer.pad_token = tokenizer.eos_token
         | 
| 103 | 
            +
             | 
| 104 | 
            +
                return tokenizer, model
         | 
| 105 | 
            +
             | 
| 106 | 
            +
             | 
| 107 | 
            +
            def predict(self, query_documents: list[tuple[str, str]]) -> list[float]:
         | 
| 108 | 
            +
                if not hasattr(self, "inner_model"):
         | 
| 109 | 
            +
                    self.inner_tokenizer, self.inner_model = load_model(torch.device("cuda"))
         | 
| 110 | 
            +
                    self.inner_model.gradient_checkpointing_enable()
         | 
| 111 | 
            +
                    self.inner_model.eval()
         | 
| 112 | 
            +
                    self.inner_yes_token_id = self.inner_tokenizer.encode("Yes", add_special_tokens=False)[0]
         | 
| 113 | 
            +
                    print("patched")
         | 
| 114 | 
            +
             | 
| 115 | 
            +
                model = self.inner_model
         | 
| 116 | 
            +
                tokenizer = self.inner_tokenizer
         | 
| 117 | 
            +
             | 
| 118 | 
            +
                query_documents = [
         | 
| 119 | 
            +
                    (query[:2_000], document[:10_000]) for query, document in query_documents
         | 
| 120 | 
            +
                ]
         | 
| 121 | 
            +
                # Sort
         | 
| 122 | 
            +
                permutation = list(range(len(query_documents)))
         | 
| 123 | 
            +
                permutation.sort(key=lambda i: -len(query_documents[i][0]) - len(query_documents[i][1]))
         | 
| 124 | 
            +
                query_documents = [query_documents[i] for i in permutation]
         | 
| 125 | 
            +
             | 
| 126 | 
            +
                device = torch.device("cuda")
         | 
| 127 | 
            +
             | 
| 128 | 
            +
                # Extract document batches from this line of datapoints
         | 
| 129 | 
            +
                max_length = 0
         | 
| 130 | 
            +
                batches: list[list[tuple[str, str]]] = []
         | 
| 131 | 
            +
                for query, document in query_documents:
         | 
| 132 | 
            +
                    if (
         | 
| 133 | 
            +
                        len(batches) == 0
         | 
| 134 | 
            +
                        or (len(batches[-1]) + 1) * max(max_length, len(query) + len(document))
         | 
| 135 | 
            +
                        > PER_DEVICE_BATCH_SIZE_TOKENS
         | 
| 136 | 
            +
                    ):
         | 
| 137 | 
            +
                        batches.append([])
         | 
| 138 | 
            +
                        max_length = 0
         | 
| 139 | 
            +
             | 
| 140 | 
            +
                    batches[-1].append((query, document))
         | 
| 141 | 
            +
                    max_length = max(max_length, 20 + len(query) + len(document))
         | 
| 142 | 
            +
             | 
| 143 | 
            +
                # Inference all of the document batches
         | 
| 144 | 
            +
                all_logits: list[float] = []
         | 
| 145 | 
            +
                for batch in batches:
         | 
| 146 | 
            +
                    batch_inputs = format_pointwise_datapoints(
         | 
| 147 | 
            +
                        tokenizer,
         | 
| 148 | 
            +
                        batch,
         | 
| 149 | 
            +
                    )
         | 
| 150 | 
            +
             | 
| 151 | 
            +
                    batch_inputs = batch_inputs.to(device)
         | 
| 152 | 
            +
             | 
| 153 | 
            +
                    try:
         | 
| 154 | 
            +
                        outputs = model(**batch_inputs, use_cache=False)
         | 
| 155 | 
            +
                    except torch.OutOfMemoryError:
         | 
| 156 | 
            +
                        print(f"GPU OOM! {torch.cuda.memory_reserved()}")
         | 
| 157 | 
            +
                        torch.cuda.empty_cache()
         | 
| 158 | 
            +
                        print(f"GPU After OOM Cache Clear: {torch.cuda.memory_reserved()}")
         | 
| 159 | 
            +
                        outputs = model(**batch_inputs, use_cache=False)
         | 
| 160 | 
            +
             | 
| 161 | 
            +
                    # Extract the logits
         | 
| 162 | 
            +
                    logits = cast(torch.Tensor, outputs.logits)
         | 
| 163 | 
            +
                    attention_mask = cast(torch.Tensor, batch_inputs.attention_mask)
         | 
| 164 | 
            +
                    last_positions = attention_mask.sum(dim=1) - 1
         | 
| 165 | 
            +
             | 
| 166 | 
            +
                    batch_size = logits.shape[0]
         | 
| 167 | 
            +
                    batch_indices = torch.arange(batch_size, device=device)
         | 
| 168 | 
            +
                    last_logits = logits[batch_indices, last_positions]
         | 
| 169 | 
            +
             | 
| 170 | 
            +
                    yes_logits = last_logits[:, self.inner_yes_token_id]
         | 
| 171 | 
            +
                    all_logits.extend([float(logit) / 5.0 for logit in yes_logits])
         | 
| 172 | 
            +
             | 
| 173 | 
            +
                def sigmoid(x: float) -> float:
         | 
| 174 | 
            +
                    return 1 / (1 + math.exp(-x))
         | 
| 175 | 
            +
             | 
| 176 | 
            +
                scores = [sigmoid(logit) for logit in all_logits]
         | 
| 177 | 
            +
             | 
| 178 | 
            +
                # Unsort by indices
         | 
| 179 | 
            +
                scores = [score for _, score in sorted(zip(permutation, scores, strict=True))]
         | 
| 180 | 
            +
             | 
| 181 | 
            +
                return scores
         | 
| 182 | 
            +
             | 
| 183 | 
            +
             | 
| 184 | 
            +
            _CE.predict = predict
         | 
| 185 | 
            +
             | 
| 186 | 
            +
            from transformers import Qwen3Config
         | 
| 187 | 
            +
             | 
| 188 | 
            +
            ZEConfig = Qwen3Config
         | 
    	
        special_tokens_map.json
    ADDED
    
    | @@ -0,0 +1,31 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            {
         | 
| 2 | 
            +
              "additional_special_tokens": [
         | 
| 3 | 
            +
                "<|im_start|>",
         | 
| 4 | 
            +
                "<|im_end|>",
         | 
| 5 | 
            +
                "<|object_ref_start|>",
         | 
| 6 | 
            +
                "<|object_ref_end|>",
         | 
| 7 | 
            +
                "<|box_start|>",
         | 
| 8 | 
            +
                "<|box_end|>",
         | 
| 9 | 
            +
                "<|quad_start|>",
         | 
| 10 | 
            +
                "<|quad_end|>",
         | 
| 11 | 
            +
                "<|vision_start|>",
         | 
| 12 | 
            +
                "<|vision_end|>",
         | 
| 13 | 
            +
                "<|vision_pad|>",
         | 
| 14 | 
            +
                "<|image_pad|>",
         | 
| 15 | 
            +
                "<|video_pad|>"
         | 
| 16 | 
            +
              ],
         | 
| 17 | 
            +
              "eos_token": {
         | 
| 18 | 
            +
                "content": "<|im_end|>",
         | 
| 19 | 
            +
                "lstrip": false,
         | 
| 20 | 
            +
                "normalized": false,
         | 
| 21 | 
            +
                "rstrip": false,
         | 
| 22 | 
            +
                "single_word": false
         | 
| 23 | 
            +
              },
         | 
| 24 | 
            +
              "pad_token": {
         | 
| 25 | 
            +
                "content": "<|endoftext|>",
         | 
| 26 | 
            +
                "lstrip": false,
         | 
| 27 | 
            +
                "normalized": false,
         | 
| 28 | 
            +
                "rstrip": false,
         | 
| 29 | 
            +
                "single_word": false
         | 
| 30 | 
            +
              }
         | 
| 31 | 
            +
            }
         | 
    	
        tokenizer.json
    ADDED
    
    | @@ -0,0 +1,3 @@ | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            +
            oid sha256:aeb13307a71acd8fe81861d94ad54ab689df773318809eed3cbe794b4492dae4
         | 
| 3 | 
            +
            size 11422654
         | 
    	
        tokenizer_config.json
    ADDED
    
    | @@ -0,0 +1,241 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            {
         | 
| 2 | 
            +
              "add_bos_token": false,
         | 
| 3 | 
            +
              "add_prefix_space": false,
         | 
| 4 | 
            +
              "added_tokens_decoder": {
         | 
| 5 | 
            +
                "151643": {
         | 
| 6 | 
            +
                  "content": "<|endoftext|>",
         | 
| 7 | 
            +
                  "lstrip": false,
         | 
| 8 | 
            +
                  "normalized": false,
         | 
| 9 | 
            +
                  "rstrip": false,
         | 
| 10 | 
            +
                  "single_word": false,
         | 
| 11 | 
            +
                  "special": true
         | 
| 12 | 
            +
                },
         | 
| 13 | 
            +
                "151644": {
         | 
| 14 | 
            +
                  "content": "<|im_start|>",
         | 
| 15 | 
            +
                  "lstrip": false,
         | 
| 16 | 
            +
                  "normalized": false,
         | 
| 17 | 
            +
                  "rstrip": false,
         | 
| 18 | 
            +
                  "single_word": false,
         | 
| 19 | 
            +
                  "special": true
         | 
| 20 | 
            +
                },
         | 
| 21 | 
            +
                "151645": {
         | 
| 22 | 
            +
                  "content": "<|im_end|>",
         | 
| 23 | 
            +
                  "lstrip": false,
         | 
| 24 | 
            +
                  "normalized": false,
         | 
| 25 | 
            +
                  "rstrip": false,
         | 
| 26 | 
            +
                  "single_word": false,
         | 
| 27 | 
            +
                  "special": true
         | 
| 28 | 
            +
                },
         | 
| 29 | 
            +
                "151646": {
         | 
| 30 | 
            +
                  "content": "<|object_ref_start|>",
         | 
| 31 | 
            +
                  "lstrip": false,
         | 
| 32 | 
            +
                  "normalized": false,
         | 
| 33 | 
            +
                  "rstrip": false,
         | 
| 34 | 
            +
                  "single_word": false,
         | 
| 35 | 
            +
                  "special": true
         | 
| 36 | 
            +
                },
         | 
| 37 | 
            +
                "151647": {
         | 
| 38 | 
            +
                  "content": "<|object_ref_end|>",
         | 
| 39 | 
            +
                  "lstrip": false,
         | 
| 40 | 
            +
                  "normalized": false,
         | 
| 41 | 
            +
                  "rstrip": false,
         | 
| 42 | 
            +
                  "single_word": false,
         | 
| 43 | 
            +
                  "special": true
         | 
| 44 | 
            +
                },
         | 
| 45 | 
            +
                "151648": {
         | 
| 46 | 
            +
                  "content": "<|box_start|>",
         | 
| 47 | 
            +
                  "lstrip": false,
         | 
| 48 | 
            +
                  "normalized": false,
         | 
| 49 | 
            +
                  "rstrip": false,
         | 
| 50 | 
            +
                  "single_word": false,
         | 
| 51 | 
            +
                  "special": true
         | 
| 52 | 
            +
                },
         | 
| 53 | 
            +
                "151649": {
         | 
| 54 | 
            +
                  "content": "<|box_end|>",
         | 
| 55 | 
            +
                  "lstrip": false,
         | 
| 56 | 
            +
                  "normalized": false,
         | 
| 57 | 
            +
                  "rstrip": false,
         | 
| 58 | 
            +
                  "single_word": false,
         | 
| 59 | 
            +
                  "special": true
         | 
| 60 | 
            +
                },
         | 
| 61 | 
            +
                "151650": {
         | 
| 62 | 
            +
                  "content": "<|quad_start|>",
         | 
| 63 | 
            +
                  "lstrip": false,
         | 
| 64 | 
            +
                  "normalized": false,
         | 
| 65 | 
            +
                  "rstrip": false,
         | 
| 66 | 
            +
                  "single_word": false,
         | 
| 67 | 
            +
                  "special": true
         | 
| 68 | 
            +
                },
         | 
| 69 | 
            +
                "151651": {
         | 
| 70 | 
            +
                  "content": "<|quad_end|>",
         | 
| 71 | 
            +
                  "lstrip": false,
         | 
| 72 | 
            +
                  "normalized": false,
         | 
| 73 | 
            +
                  "rstrip": false,
         | 
| 74 | 
            +
                  "single_word": false,
         | 
| 75 | 
            +
                  "special": true
         | 
| 76 | 
            +
                },
         | 
| 77 | 
            +
                "151652": {
         | 
| 78 | 
            +
                  "content": "<|vision_start|>",
         | 
| 79 | 
            +
                  "lstrip": false,
         | 
| 80 | 
            +
                  "normalized": false,
         | 
| 81 | 
            +
                  "rstrip": false,
         | 
| 82 | 
            +
                  "single_word": false,
         | 
| 83 | 
            +
                  "special": true
         | 
| 84 | 
            +
                },
         | 
| 85 | 
            +
                "151653": {
         | 
| 86 | 
            +
                  "content": "<|vision_end|>",
         | 
| 87 | 
            +
                  "lstrip": false,
         | 
| 88 | 
            +
                  "normalized": false,
         | 
| 89 | 
            +
                  "rstrip": false,
         | 
| 90 | 
            +
                  "single_word": false,
         | 
| 91 | 
            +
                  "special": true
         | 
| 92 | 
            +
                },
         | 
| 93 | 
            +
                "151654": {
         | 
| 94 | 
            +
                  "content": "<|vision_pad|>",
         | 
| 95 | 
            +
                  "lstrip": false,
         | 
| 96 | 
            +
                  "normalized": false,
         | 
| 97 | 
            +
                  "rstrip": false,
         | 
| 98 | 
            +
                  "single_word": false,
         | 
| 99 | 
            +
                  "special": true
         | 
| 100 | 
            +
                },
         | 
| 101 | 
            +
                "151655": {
         | 
| 102 | 
            +
                  "content": "<|image_pad|>",
         | 
| 103 | 
            +
                  "lstrip": false,
         | 
| 104 | 
            +
                  "normalized": false,
         | 
| 105 | 
            +
                  "rstrip": false,
         | 
| 106 | 
            +
                  "single_word": false,
         | 
| 107 | 
            +
                  "special": true
         | 
| 108 | 
            +
                },
         | 
| 109 | 
            +
                "151656": {
         | 
| 110 | 
            +
                  "content": "<|video_pad|>",
         | 
| 111 | 
            +
                  "lstrip": false,
         | 
| 112 | 
            +
                  "normalized": false,
         | 
| 113 | 
            +
                  "rstrip": false,
         | 
| 114 | 
            +
                  "single_word": false,
         | 
| 115 | 
            +
                  "special": true
         | 
| 116 | 
            +
                },
         | 
| 117 | 
            +
                "151657": {
         | 
| 118 | 
            +
                  "content": "<tool_call>",
         | 
| 119 | 
            +
                  "lstrip": false,
         | 
| 120 | 
            +
                  "normalized": false,
         | 
| 121 | 
            +
                  "rstrip": false,
         | 
| 122 | 
            +
                  "single_word": false,
         | 
| 123 | 
            +
                  "special": false
         | 
| 124 | 
            +
                },
         | 
| 125 | 
            +
                "151658": {
         | 
| 126 | 
            +
                  "content": "</tool_call>",
         | 
| 127 | 
            +
                  "lstrip": false,
         | 
| 128 | 
            +
                  "normalized": false,
         | 
| 129 | 
            +
                  "rstrip": false,
         | 
| 130 | 
            +
                  "single_word": false,
         | 
| 131 | 
            +
                  "special": false
         | 
| 132 | 
            +
                },
         | 
| 133 | 
            +
                "151659": {
         | 
| 134 | 
            +
                  "content": "<|fim_prefix|>",
         | 
| 135 | 
            +
                  "lstrip": false,
         | 
| 136 | 
            +
                  "normalized": false,
         | 
| 137 | 
            +
                  "rstrip": false,
         | 
| 138 | 
            +
                  "single_word": false,
         | 
| 139 | 
            +
                  "special": false
         | 
| 140 | 
            +
                },
         | 
| 141 | 
            +
                "151660": {
         | 
| 142 | 
            +
                  "content": "<|fim_middle|>",
         | 
| 143 | 
            +
                  "lstrip": false,
         | 
| 144 | 
            +
                  "normalized": false,
         | 
| 145 | 
            +
                  "rstrip": false,
         | 
| 146 | 
            +
                  "single_word": false,
         | 
| 147 | 
            +
                  "special": false
         | 
| 148 | 
            +
                },
         | 
| 149 | 
            +
                "151661": {
         | 
| 150 | 
            +
                  "content": "<|fim_suffix|>",
         | 
| 151 | 
            +
                  "lstrip": false,
         | 
| 152 | 
            +
                  "normalized": false,
         | 
| 153 | 
            +
                  "rstrip": false,
         | 
| 154 | 
            +
                  "single_word": false,
         | 
| 155 | 
            +
                  "special": false
         | 
| 156 | 
            +
                },
         | 
| 157 | 
            +
                "151662": {
         | 
| 158 | 
            +
                  "content": "<|fim_pad|>",
         | 
| 159 | 
            +
                  "lstrip": false,
         | 
| 160 | 
            +
                  "normalized": false,
         | 
| 161 | 
            +
                  "rstrip": false,
         | 
| 162 | 
            +
                  "single_word": false,
         | 
| 163 | 
            +
                  "special": false
         | 
| 164 | 
            +
                },
         | 
| 165 | 
            +
                "151663": {
         | 
| 166 | 
            +
                  "content": "<|repo_name|>",
         | 
| 167 | 
            +
                  "lstrip": false,
         | 
| 168 | 
            +
                  "normalized": false,
         | 
| 169 | 
            +
                  "rstrip": false,
         | 
| 170 | 
            +
                  "single_word": false,
         | 
| 171 | 
            +
                  "special": false
         | 
| 172 | 
            +
                },
         | 
| 173 | 
            +
                "151664": {
         | 
| 174 | 
            +
                  "content": "<|file_sep|>",
         | 
| 175 | 
            +
                  "lstrip": false,
         | 
| 176 | 
            +
                  "normalized": false,
         | 
| 177 | 
            +
                  "rstrip": false,
         | 
| 178 | 
            +
                  "single_word": false,
         | 
| 179 | 
            +
                  "special": false
         | 
| 180 | 
            +
                },
         | 
| 181 | 
            +
                "151665": {
         | 
| 182 | 
            +
                  "content": "<tool_response>",
         | 
| 183 | 
            +
                  "lstrip": false,
         | 
| 184 | 
            +
                  "normalized": false,
         | 
| 185 | 
            +
                  "rstrip": false,
         | 
| 186 | 
            +
                  "single_word": false,
         | 
| 187 | 
            +
                  "special": false
         | 
| 188 | 
            +
                },
         | 
| 189 | 
            +
                "151666": {
         | 
| 190 | 
            +
                  "content": "</tool_response>",
         | 
| 191 | 
            +
                  "lstrip": false,
         | 
| 192 | 
            +
                  "normalized": false,
         | 
| 193 | 
            +
                  "rstrip": false,
         | 
| 194 | 
            +
                  "single_word": false,
         | 
| 195 | 
            +
                  "special": false
         | 
| 196 | 
            +
                },
         | 
| 197 | 
            +
                "151667": {
         | 
| 198 | 
            +
                  "content": "<think>",
         | 
| 199 | 
            +
                  "lstrip": false,
         | 
| 200 | 
            +
                  "normalized": false,
         | 
| 201 | 
            +
                  "rstrip": false,
         | 
| 202 | 
            +
                  "single_word": false,
         | 
| 203 | 
            +
                  "special": false
         | 
| 204 | 
            +
                },
         | 
| 205 | 
            +
                "151668": {
         | 
| 206 | 
            +
                  "content": "</think>",
         | 
| 207 | 
            +
                  "lstrip": false,
         | 
| 208 | 
            +
                  "normalized": false,
         | 
| 209 | 
            +
                  "rstrip": false,
         | 
| 210 | 
            +
                  "single_word": false,
         | 
| 211 | 
            +
                  "special": false
         | 
| 212 | 
            +
                }
         | 
| 213 | 
            +
              },
         | 
| 214 | 
            +
              "additional_special_tokens": [
         | 
| 215 | 
            +
                "<|im_start|>",
         | 
| 216 | 
            +
                "<|im_end|>",
         | 
| 217 | 
            +
                "<|object_ref_start|>",
         | 
| 218 | 
            +
                "<|object_ref_end|>",
         | 
| 219 | 
            +
                "<|box_start|>",
         | 
| 220 | 
            +
                "<|box_end|>",
         | 
| 221 | 
            +
                "<|quad_start|>",
         | 
| 222 | 
            +
                "<|quad_end|>",
         | 
| 223 | 
            +
                "<|vision_start|>",
         | 
| 224 | 
            +
                "<|vision_end|>",
         | 
| 225 | 
            +
                "<|vision_pad|>",
         | 
| 226 | 
            +
                "<|image_pad|>",
         | 
| 227 | 
            +
                "<|video_pad|>"
         | 
| 228 | 
            +
              ],
         | 
| 229 | 
            +
              "bos_token": null,
         | 
| 230 | 
            +
              "chat_template": "{%- if tools %}\n    {{- '<|im_start|>system\\n' }}\n    {%- if messages[0].role == 'system' %}\n        {{- messages[0].content + '\\n\\n' }}\n    {%- endif %}\n    {{- \"# Tools\\n\\nYou may call one or more functions to assist with the user query.\\n\\nYou are provided with function signatures within <tools></tools> XML tags:\\n<tools>\" }}\n    {%- for tool in tools %}\n        {{- \"\\n\" }}\n        {{- tool | tojson }}\n    {%- endfor %}\n    {{- \"\\n</tools>\\n\\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\\n<tool_call>\\n{\\\"name\\\": <function-name>, \\\"arguments\\\": <args-json-object>}\\n</tool_call><|im_end|>\\n\" }}\n{%- else %}\n    {%- if messages[0].role == 'system' %}\n        {{- '<|im_start|>system\\n' + messages[0].content + '<|im_end|>\\n' }}\n    {%- endif %}\n{%- endif %}\n{%- set ns = namespace(multi_step_tool=true, last_query_index=messages|length - 1) %}\n{%- for message in messages[::-1] %}\n    {%- set index = (messages|length - 1) - loop.index0 %}\n    {%- if ns.multi_step_tool and message.role == \"user\" and message.content is string and not(message.content.startswith('<tool_response>') and message.content.endswith('</tool_response>')) %}\n        {%- set ns.multi_step_tool = false %}\n        {%- set ns.last_query_index = index %}\n    {%- endif %}\n{%- endfor %}\n{%- for message in messages %}\n    {%- if message.content is string %}\n        {%- set content = message.content %}\n    {%- else %}\n        {%- set content = '' %}\n    {%- endif %}\n    {%- if (message.role == \"user\") or (message.role == \"system\" and not loop.first) %}\n        {{- '<|im_start|>' + message.role + '\\n' + content + '<|im_end|>' + '\\n' }}\n    {%- elif message.role == \"assistant\" %}\n        {%- set reasoning_content = '' %}\n        {%- if message.reasoning_content is string %}\n            {%- set reasoning_content = message.reasoning_content %}\n        {%- else %}\n            {%- if '</think>' in content %}\n                {%- set reasoning_content = content.split('</think>')[0].rstrip('\\n').split('<think>')[-1].lstrip('\\n') %}\n                {%- set content = content.split('</think>')[-1].lstrip('\\n') %}\n            {%- endif %}\n        {%- endif %}\n        {%- if loop.index0 > ns.last_query_index %}\n            {%- if loop.last or (not loop.last and reasoning_content) %}\n                {{- '<|im_start|>' + message.role + '\\n<think>\\n' + reasoning_content.strip('\\n') + '\\n</think>\\n\\n' + content.lstrip('\\n') }}\n            {%- else %}\n                {{- '<|im_start|>' + message.role + '\\n' + content }}\n            {%- endif %}\n        {%- else %}\n            {{- '<|im_start|>' + message.role + '\\n' + content }}\n        {%- endif %}\n        {%- if message.tool_calls %}\n            {%- for tool_call in message.tool_calls %}\n                {%- if (loop.first and content) or (not loop.first) %}\n                    {{- '\\n' }}\n                {%- endif %}\n                {%- if tool_call.function %}\n                    {%- set tool_call = tool_call.function %}\n                {%- endif %}\n                {{- '<tool_call>\\n{\"name\": \"' }}\n                {{- tool_call.name }}\n                {{- '\", \"arguments\": ' }}\n                {%- if tool_call.arguments is string %}\n                    {{- tool_call.arguments }}\n                {%- else %}\n                    {{- tool_call.arguments | tojson }}\n                {%- endif %}\n                {{- '}\\n</tool_call>' }}\n            {%- endfor %}\n        {%- endif %}\n        {{- '<|im_end|>\\n' }}\n    {%- elif message.role == \"tool\" %}\n        {%- if loop.first or (messages[loop.index0 - 1].role != \"tool\") %}\n            {{- '<|im_start|>user' }}\n        {%- endif %}\n        {{- '\\n<tool_response>\\n' }}\n        {{- content }}\n        {{- '\\n</tool_response>' }}\n        {%- if loop.last or (messages[loop.index0 + 1].role != \"tool\") %}\n            {{- '<|im_end|>\\n' }}\n        {%- endif %}\n    {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n    {{- '<|im_start|>assistant\\n' }}\n    {%- if enable_thinking is defined and enable_thinking is false %}\n        {{- '<think>\\n\\n</think>\\n\\n' }}\n    {%- endif %}\n{%- endif %}",
         | 
| 231 | 
            +
              "clean_up_tokenization_spaces": false,
         | 
| 232 | 
            +
              "eos_token": "<|im_end|>",
         | 
| 233 | 
            +
              "errors": "replace",
         | 
| 234 | 
            +
              "extra_special_tokens": {},
         | 
| 235 | 
            +
              "model_max_length": 131072,
         | 
| 236 | 
            +
              "pad_token": "<|endoftext|>",
         | 
| 237 | 
            +
              "padding_side": "right",
         | 
| 238 | 
            +
              "split_special_tokens": false,
         | 
| 239 | 
            +
              "tokenizer_class": "Qwen2Tokenizer",
         | 
| 240 | 
            +
              "unk_token": null
         | 
| 241 | 
            +
            }
         | 
    	
        vocab.json
    ADDED
    
    | The diff for this file is too large to render. 
		See raw diff | 
|  | 

