| from transformers import PretrainedConfig | |
| from transformers.utils import logging | |
| logger = logging.get_logger(__name__) | |
| class CodeFuseCGELargeConfig(PretrainedConfig): | |
| model_type = "qwen2" | |
| keys_to_ignore_at_inference = ["past_key_values"] | |
| def __init__( | |
| self, | |
| vocab_size=151936, | |
| hidden_size=4096, | |
| intermediate_size=22016, | |
| num_hidden_layers=32, | |
| num_attention_heads=32, | |
| num_key_value_heads=32, | |
| hidden_act="silu", | |
| max_position_embeddings=32768, | |
| initializer_range=0.02, | |
| rms_norm_eps=1e-6, | |
| use_cache=True, | |
| tie_word_embeddings=False, | |
| rope_theta=10000.0, | |
| use_sliding_window=False, | |
| sliding_window=4096, | |
| max_window_layers=28, | |
| attention_dropout=0.0, | |
| embedding_method="pma", | |
| inf_seq_length=1024, | |
| padding_side="right", | |
| compress_dim=1024, | |
| keep_max_layer=32, | |
| pma_num_heads=32, | |
| pma_ln=True, | |
| pma_norm=False, | |
| pma_norm_mode="post_normal", | |
| **kwargs, | |
| ): | |
| self.vocab_size = vocab_size | |
| self.max_position_embeddings = max_position_embeddings | |
| self.hidden_size = hidden_size | |
| self.intermediate_size = intermediate_size | |
| self.num_hidden_layers = num_hidden_layers | |
| self.num_attention_heads = num_attention_heads | |
| self.use_sliding_window = use_sliding_window | |
| self.sliding_window = sliding_window if use_sliding_window else None | |
| self.max_window_layers = max_window_layers | |
| if num_key_value_heads is None: | |
| num_key_value_heads = num_attention_heads | |
| self.num_key_value_heads = num_key_value_heads | |
| self.hidden_act = hidden_act | |
| self.initializer_range = initializer_range | |
| self.rms_norm_eps = rms_norm_eps | |
| self.use_cache = use_cache | |
| self.rope_theta = rope_theta | |
| self.attention_dropout = attention_dropout | |
| self.embedding_method = embedding_method | |
| self.inf_seq_length = inf_seq_length | |
| self.padding_side = padding_side | |
| self.compress_dim = compress_dim | |
| self.keep_max_layer = keep_max_layer | |
| self.pma_num_heads = pma_num_heads | |
| self.pma_ln = pma_ln | |
| self.pma_norm = pma_norm | |
| self.pma_norm_mode = pma_norm_mode | |
| super().__init__( | |
| tie_word_embeddings=tie_word_embeddings, | |
| **kwargs, | |
| ) | |