Naphula commited on
Commit
aa8807d
·
verified ·
1 Parent(s): 8cd4fe7

Upload vocab_resizer_mistral24B.py

Browse files
Files changed (1) hide show
  1. vocab_resizer_mistral24B.py +117 -0
vocab_resizer_mistral24B.py ADDED
@@ -0,0 +1,117 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from safetensors.torch import save_file, load_file
3
+ import os
4
+ import json
5
+ import glob
6
+ from collections import OrderedDict
7
+
8
+ def fix_vocab_size(model_dir, output_dir, new_vocab_size=131072):
9
+ """
10
+ Resizes the vocabulary-dependent tensors of a sharded model to a new size.
11
+
12
+ Args:
13
+ model_dir (str): The directory of the model to fix.
14
+ output_dir (str): The directory where the fixed model will be saved.
15
+ new_vocab_size (int): The target vocabulary size.
16
+ """
17
+ try:
18
+ if not os.path.exists(output_dir):
19
+ os.makedirs(output_dir)
20
+ print(f"Created output directory: {output_dir}")
21
+
22
+ # --- Step 1: Find all safetensor shards ---
23
+ search_pattern = os.path.join(model_dir, '*.safetensors')
24
+ shard_paths = sorted(glob.glob(search_pattern))
25
+ if not shard_paths:
26
+ print(f"Error: No '.safetensors' files found in {model_dir}")
27
+ return
28
+
29
+ print(f"Found {len(shard_paths)} shards to process.")
30
+
31
+ # --- Step 2: Identify which shards contain the vocab tensors ---
32
+ vocab_tensor_keys = ["model.embed_tokens.weight", "lm_head.weight"]
33
+ shards_to_modify = {} # {filename: {key: tensor}}
34
+
35
+ for shard_path in shard_paths:
36
+ with open(shard_path, "rb") as f:
37
+ data = f.read()
38
+ header_size = int.from_bytes(data[:8], 'little')
39
+ header_str = data[8:8+header_size].decode('utf-8')
40
+ header = json.loads(header_str)
41
+
42
+ for key in header.keys():
43
+ if key in vocab_tensor_keys:
44
+ filename = os.path.basename(shard_path)
45
+ if filename not in shards_to_modify:
46
+ shards_to_modify[filename] = {}
47
+ # Load only the specific tensor
48
+ shards_to_modify[filename][key] = load_file(shard_path)[key]
49
+ print(f"Found '{key}' in shard: {filename}")
50
+
51
+ if not shards_to_modify:
52
+ print("Error: Could not find 'embed_tokens' or 'lm_head' tensors in any shard.")
53
+ return
54
+
55
+ # --- Step 3: Process all shards, modifying the ones with vocab tensors ---
56
+ for shard_path in shard_paths:
57
+ filename = os.path.basename(shard_path)
58
+ output_shard_path = os.path.join(output_dir, filename)
59
+
60
+ # Load all tensors from the current shard
61
+ tensors = load_file(shard_path)
62
+
63
+ if filename in shards_to_modify:
64
+ print(f"Resizing tensors in {filename}...")
65
+ for key, tensor in shards_to_modify[filename].items():
66
+ original_size = tensor.shape[0]
67
+ print(f" - Resizing '{key}' from {original_size} to {new_vocab_size}")
68
+ # Trim the tensor to the new vocabulary size
69
+ resized_tensor = tensor[:new_vocab_size, :]
70
+ tensors[key] = resized_tensor # Replace the tensor in the loaded dict
71
+
72
+ # Save the (potentially modified) tensors to the new location
73
+ save_file(tensors, output_shard_path)
74
+ print(f"Saved new shard: {output_shard_path}")
75
+
76
+ # --- Step 4: Modify and save the config.json ---
77
+ config_path = os.path.join(model_dir, 'config.json')
78
+ new_config_path = os.path.join(output_dir, 'config.json')
79
+ if os.path.exists(config_path):
80
+ with open(config_path, 'r') as f:
81
+ config = json.load(f)
82
+
83
+ print(f"\nUpdating config.json: 'vocab_size' from {config.get('vocab_size')} to {new_vocab_size}")
84
+ config['vocab_size'] = new_vocab_size
85
+
86
+ with open(new_config_path, 'w') as f:
87
+ json.dump(config, f, indent=2)
88
+ print(f"Saved new config.json to {new_config_path}")
89
+ else:
90
+ print("Warning: config.json not found. Please create it manually.")
91
+
92
+ # --- Step 5: Copy other essential files ---
93
+ for filename in os.listdir(model_dir):
94
+ if filename.endswith(('.json', '.py', '.md', '.txt')) and filename != 'config.json':
95
+ if not os.path.exists(os.path.join(output_dir, filename)):
96
+ import shutil
97
+ shutil.copy2(os.path.join(model_dir, filename), output_dir)
98
+ print(f"Copied {filename} to output directory.")
99
+
100
+ print("\nVocabulary resizing complete. The model is now ready for merging.")
101
+
102
+ except Exception as e:
103
+ print(f"An error occurred: {e}")
104
+
105
+ if __name__ == "__main__":
106
+ # --- Configuration ---
107
+ # Directory of the original DeepHermes-24B model
108
+ input_model_directory = r"path/to/your/DeepHermes-24B"
109
+
110
+ # Directory to save the fixed, merge-ready model
111
+ output_model_directory = r"path/to/your/DeepHermes-24B-fixed"
112
+
113
+ # The standard vocab size you are targeting for the merge
114
+ target_vocab_size = 131072
115
+
116
+ # --- Run the script ---
117
+ fix_vocab_size(input_model_directory, output_model_directory, target_vocab_size)