Spaces:
Running
Running
| """ PyTorch Moss model.""" | |
| from typing import Optional, Tuple, Union | |
| import torch | |
| import torch.utils.checkpoint | |
| from torch import nn | |
| from torch.nn import CrossEntropyLoss | |
| from transformers.activations import ACT2FN | |
| from transformers.modeling_utils import PreTrainedModel | |
| from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast | |
| from transformers.utils import ( | |
| add_code_sample_docstrings, | |
| add_start_docstrings, | |
| add_start_docstrings_to_model_forward, | |
| logging | |
| ) | |
| from .configuration_moss import MossConfig | |
| logger = logging.get_logger(__name__) | |
| _CHECKPOINT_FOR_DOC = "fnlp/moss-moon-003-base" | |
| _CONFIG_FOR_DOC = "MossConfig" | |
| MOSS_PRETRAINED_MODEL_ARCHIVE_LIST = [ | |
| "fnlp/moss-moon-003-base", | |
| "fnlp/moss-moon-003-sft", | |
| "fnlp/moss-moon-003-sft-plugin", | |
| ] | |
| # Copied from transformers.models.gptj.modeling_gptj.create_sinusoidal_positions | |
| def create_sinusoidal_positions(num_pos: int, dim: int) -> torch.Tensor: | |
| inv_freq = 1.0 / (10000 ** (torch.arange(0, dim, 2) / dim)) | |
| sinusoid_inp = torch.einsum("i , j -> i j", torch.arange(num_pos, dtype=torch.float), inv_freq).float() | |
| return torch.cat((torch.sin(sinusoid_inp), torch.cos(sinusoid_inp)), dim=1) | |
| # Copied from transformers.models.gptj.modeling_gptj.rotate_every_two | |
| def rotate_every_two(x: torch.Tensor) -> torch.Tensor: | |
| x1 = x[:, :, :, ::2] | |
| x2 = x[:, :, :, 1::2] | |
| x = torch.stack((-x2, x1), dim=-1) | |
| return x.flatten(-2) # in einsum notation: rearrange(x, '... d j -> ... (d j)') | |
| # Copied from transformers.models.gptj.modeling_gptj.apply_rotary_pos_emb | |
| def apply_rotary_pos_emb(tensor: torch.Tensor, sin: torch.Tensor, cos: torch.Tensor) -> torch.Tensor: | |
| sin = torch.repeat_interleave(sin[:, :, None, :], 2, 3) | |
| cos = torch.repeat_interleave(cos[:, :, None, :], 2, 3) | |
| return (tensor * cos) + (rotate_every_two(tensor) * sin) | |
| class MossAttention(nn.Module): | |
| def __init__(self, config): | |
| super().__init__() | |
| max_positions = config.max_position_embeddings | |
| self.register_buffer( | |
| "causal_mask", | |
| torch.tril(torch.ones((max_positions, max_positions), dtype=torch.bool)).view( | |
| 1, 1, max_positions, max_positions | |
| ), | |
| ) | |
| self.attn_dropout = nn.Dropout(config.attn_pdrop) | |
| self.resid_dropout = nn.Dropout(config.resid_pdrop) | |
| self.embed_dim = config.hidden_size | |
| self.num_attention_heads = config.num_attention_heads | |
| self.head_dim = self.embed_dim // self.num_attention_heads | |
| if self.head_dim * self.num_attention_heads != self.embed_dim: | |
| raise ValueError( | |
| f"embed_dim must be divisible by num_attention_heads (got `embed_dim`: {self.embed_dim} and" | |
| f" `num_attention_heads`: {self.num_attention_heads})." | |
| ) | |
| self.scale_attn = torch.sqrt(torch.tensor(self.head_dim, dtype=torch.float32)).to(torch.get_default_dtype()) | |
| self.qkv_proj = nn.Linear(self.embed_dim, self.embed_dim * 3, bias=False) | |
| self.out_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=False) | |
| self.rotary_dim = config.rotary_dim | |
| pos_embd_dim = self.rotary_dim or self.embed_dim | |
| self.embed_positions = create_sinusoidal_positions(max_positions, pos_embd_dim) | |
| def _split_heads(self, x, n_head, dim_head, mp_num): | |
| reshaped = x.reshape(x.shape[:-1] + (n_head // mp_num, dim_head)) | |
| reshaped = reshaped.reshape(x.shape[:-2] + (-1,) + reshaped.shape[-1:]) | |
| return reshaped | |
| def _merge_heads(self, tensor, num_attention_heads, attn_head_size): | |
| """ | |
| Merges attn_head_size dim and num_attn_heads dim into n_ctx | |
| """ | |
| if len(tensor.shape) == 5: | |
| tensor = tensor.permute(0, 1, 3, 2, 4).contiguous() | |
| elif len(tensor.shape) == 4: | |
| tensor = tensor.permute(0, 2, 1, 3).contiguous() | |
| else: | |
| raise ValueError(f"Input tensor rank should be one of [4, 5], but is: {len(tensor.shape)}") | |
| new_shape = tensor.size()[:-2] + (num_attention_heads * attn_head_size,) | |
| return tensor.view(new_shape) | |
| def _attn( | |
| self, | |
| query, | |
| key, | |
| value, | |
| attention_mask=None, | |
| head_mask=None, | |
| ): | |
| # compute causal mask from causal mask buffer | |
| query_length, key_length = query.size(-2), key.size(-2) | |
| causal_mask = self.causal_mask[:, :, key_length - query_length : key_length, :key_length] | |
| # Keep the attention weights computation in fp32 to avoid overflow issues | |
| query = query.to(torch.float32) | |
| key = key.to(torch.float32) | |
| attn_weights = torch.matmul(query, key.transpose(-1, -2)) | |
| attn_weights = attn_weights / self.scale_attn | |
| mask_value = torch.finfo(attn_weights.dtype).min | |
| # Need to be a tensor, otherwise we get error: `RuntimeError: expected scalar type float but found double`. | |
| # Need to be on the same device, otherwise `RuntimeError: ..., x and y to be on the same device` | |
| mask_value = torch.tensor(mask_value, dtype=attn_weights.dtype).to(attn_weights.device) | |
| attn_weights = torch.where(causal_mask, attn_weights, mask_value) | |
| if attention_mask is not None: | |
| # Apply the attention mask | |
| attn_weights = attn_weights + attention_mask | |
| attn_weights = nn.Softmax(dim=-1)(attn_weights) | |
| attn_weights = attn_weights.to(value.dtype) | |
| attn_weights = self.attn_dropout(attn_weights) | |
| # Mask heads if we want to | |
| if head_mask is not None: | |
| attn_weights = attn_weights * head_mask | |
| attn_output = torch.matmul(attn_weights, value) | |
| return attn_output, attn_weights | |
| def forward( | |
| self, | |
| hidden_states: Optional[torch.FloatTensor], | |
| layer_past: Optional[Tuple[torch.Tensor]] = None, | |
| attention_mask: Optional[torch.FloatTensor] = None, | |
| position_ids: Optional[torch.LongTensor] = None, | |
| head_mask: Optional[torch.FloatTensor] = None, | |
| use_cache: Optional[bool] = False, | |
| output_attentions: Optional[bool] = False, | |
| ) -> Union[ | |
| Tuple[torch.Tensor, Tuple[torch.Tensor]], | |
| Optional[Tuple[torch.Tensor, Tuple[torch.Tensor], Tuple[torch.Tensor, ...]]], | |
| ]: | |
| qkv = self.qkv_proj(hidden_states) | |
| # TODO(enijkamp): factor out number of logical TPU-v4 cores or make forward pass agnostic | |
| mp_num = 4 | |
| qkv_split = qkv.reshape(qkv.shape[:-1] + (mp_num, -1)) | |
| local_dim = self.head_dim * self.num_attention_heads // mp_num | |
| query, value, key = torch.split(qkv_split, local_dim, dim=-1) | |
| query = self._split_heads(query, self.num_attention_heads, self.head_dim, mp_num=mp_num) | |
| key = self._split_heads(key, self.num_attention_heads, self.head_dim, mp_num=mp_num) | |
| value = self._split_heads(value, self.num_attention_heads, self.head_dim, mp_num=mp_num) | |
| value = value.permute(0, 2, 1, 3) | |
| embed_positions = self.embed_positions | |
| if embed_positions.device != position_ids.device: | |
| embed_positions = embed_positions.to(position_ids.device) | |
| self.embed_positions = embed_positions | |
| sincos = embed_positions[position_ids] | |
| sin, cos = torch.split(sincos, sincos.shape[-1] // 2, dim=-1) | |
| if self.rotary_dim is not None: | |
| k_rot = key[:, :, :, : self.rotary_dim] | |
| k_pass = key[:, :, :, self.rotary_dim :] | |
| q_rot = query[:, :, :, : self.rotary_dim] | |
| q_pass = query[:, :, :, self.rotary_dim :] | |
| k_rot = apply_rotary_pos_emb(k_rot, sin, cos) | |
| q_rot = apply_rotary_pos_emb(q_rot, sin, cos) | |
| key = torch.cat([k_rot, k_pass], dim=-1) | |
| query = torch.cat([q_rot, q_pass], dim=-1) | |
| else: | |
| key = apply_rotary_pos_emb(key, sin, cos) | |
| query = apply_rotary_pos_emb(query, sin, cos) | |
| key = key.permute(0, 2, 1, 3) | |
| query = query.permute(0, 2, 1, 3) | |
| if layer_past is not None: | |
| past_key = layer_past[0] | |
| past_value = layer_past[1] | |
| key = torch.cat((past_key, key), dim=-2) | |
| value = torch.cat((past_value, value), dim=-2) | |
| if use_cache is True: | |
| present = (key, value) | |
| else: | |
| present = None | |
| # compute self-attention: V x Softmax(QK^T) | |
| attn_output, attn_weights = self._attn(query, key, value, attention_mask, head_mask) | |
| attn_output = self._merge_heads(attn_output, self.num_attention_heads, self.head_dim) | |
| attn_output = self.out_proj(attn_output) | |
| attn_output = self.resid_dropout(attn_output) | |
| outputs = (attn_output, present) | |
| if output_attentions: | |
| outputs += (attn_weights,) | |
| return outputs # a, present, (attentions) | |
| # Copied from transformers.models.gptj.modeling_gptj.GPTJMLP with GPTJ->Moss | |
| class MossMLP(nn.Module): | |
| def __init__(self, intermediate_size, config): # in MLP: intermediate_size= 4 * embed_dim | |
| super().__init__() | |
| embed_dim = config.n_embd | |
| self.fc_in = nn.Linear(embed_dim, intermediate_size) | |
| self.fc_out = nn.Linear(intermediate_size, embed_dim) | |
| self.act = ACT2FN[config.activation_function] | |
| self.dropout = nn.Dropout(config.resid_pdrop) | |
| def forward(self, hidden_states: Optional[torch.FloatTensor]) -> torch.FloatTensor: | |
| hidden_states = self.fc_in(hidden_states) | |
| hidden_states = self.act(hidden_states) | |
| hidden_states = self.fc_out(hidden_states) | |
| hidden_states = self.dropout(hidden_states) | |
| return hidden_states | |
| # Copied from transformers.models.gptj.modeling_gptj.GPTJBlock with GPTJ->Moss | |
| class MossBlock(nn.Module): | |
| def __init__(self, config): | |
| super().__init__() | |
| inner_dim = config.n_inner if config.n_inner is not None else 4 * config.n_embd | |
| self.ln_1 = nn.LayerNorm(config.n_embd, eps=config.layer_norm_epsilon) | |
| self.attn = MossAttention(config) | |
| self.mlp = MossMLP(inner_dim, config) | |
| def forward( | |
| self, | |
| hidden_states: Optional[torch.FloatTensor], | |
| layer_past: Optional[Tuple[torch.Tensor]] = None, | |
| attention_mask: Optional[torch.FloatTensor] = None, | |
| position_ids: Optional[torch.LongTensor] = None, | |
| head_mask: Optional[torch.FloatTensor] = None, | |
| use_cache: Optional[bool] = False, | |
| output_attentions: Optional[bool] = False, | |
| ) -> Union[Tuple[torch.Tensor], Optional[Tuple[torch.Tensor, Tuple[torch.FloatTensor, ...]]]]: | |
| residual = hidden_states | |
| hidden_states = self.ln_1(hidden_states) | |
| attn_outputs = self.attn( | |
| hidden_states=hidden_states, | |
| layer_past=layer_past, | |
| attention_mask=attention_mask, | |
| position_ids=position_ids, | |
| head_mask=head_mask, | |
| use_cache=use_cache, | |
| output_attentions=output_attentions, | |
| ) | |
| attn_output = attn_outputs[0] # output_attn: a, present, (attentions) | |
| outputs = attn_outputs[1:] | |
| feed_forward_hidden_states = self.mlp(hidden_states) | |
| hidden_states = attn_output + feed_forward_hidden_states + residual | |
| if use_cache: | |
| outputs = (hidden_states,) + outputs | |
| else: | |
| outputs = (hidden_states,) + outputs[1:] | |
| return outputs # hidden_states, present, (attentions) | |
| class MossPreTrainedModel(PreTrainedModel): | |
| """ | |
| An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained | |
| models. | |
| """ | |
| config_class = MossConfig | |
| base_model_prefix = "transformer" | |
| supports_gradient_checkpointing = True | |
| _no_split_modules = ["MossBlock"] | |
| def __init__(self, *inputs, **kwargs): | |
| super().__init__(*inputs, **kwargs) | |
| def _init_weights(self, module): | |
| """Initialize the weights.""" | |
| if isinstance(module, (nn.Linear,)): | |
| # Slightly different from Mesh Transformer JAX which uses truncated_normal for initialization | |
| # cf https://github.com/pytorch/pytorch/pull/5617 | |
| module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) | |
| if module.bias is not None: | |
| module.bias.data.zero_() | |
| elif isinstance(module, nn.Embedding): | |
| module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) | |
| if module.padding_idx is not None: | |
| module.weight.data[module.padding_idx].zero_() | |
| elif isinstance(module, nn.LayerNorm): | |
| module.bias.data.zero_() | |
| module.weight.data.fill_(1.0) | |
| def _set_gradient_checkpointing(self, module, value=False): | |
| if isinstance(module, MossModel): | |
| module.gradient_checkpointing = value | |
| MOSS_START_DOCSTRING = r""" | |
| This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use | |
| it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and | |
| behavior. | |
| Parameters: | |
| config ([`MossConfig`]): Model configuration class with all the parameters of the model. | |
| Initializing with a config file does not load the weights associated with the model, only the | |
| configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. | |
| """ | |
| MOSS_INPUTS_DOCSTRING = r""" | |
| Args: | |
| input_ids (`torch.LongTensor` of shape `({0})`): | |
| Indices of input sequence tokens in the vocabulary. | |
| Indices can be obtained using [`AutoProcenizer`]. See [`PreTrainedTokenizer.encode`] and | |
| [`PreTrainedTokenizer.__call__`] for details. | |
| [What are input IDs?](../glossary#input-ids) | |
| attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*): | |
| Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: | |
| - 1 for tokens that are **not masked**, | |
| - 0 for tokens that are **masked**. | |
| [What are attention masks?](../glossary#attention-mask) | |
| token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*): | |
| Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, | |
| 1]`: | |
| - 0 corresponds to a *sentence A* token, | |
| - 1 corresponds to a *sentence B* token. | |
| [What are token type IDs?](../glossary#token-type-ids) | |
| position_ids (`torch.LongTensor` of shape `({0})`, *optional*): | |
| Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, | |
| config.n_positions - 1]`. | |
| [What are position IDs?](../glossary#position-ids) | |
| head_mask (`torch.FloatTensor` of shape `(num_attention_heads,)` or `(n_layer, num_attention_heads)`, *optional*): | |
| Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: | |
| - 1 indicates the head is **not masked**, | |
| - 0 indicates the head is **masked**. | |
| inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_dim)`, *optional*): | |
| Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This | |
| is useful if you want more control over how to convert *input_ids* indices into associated vectors than the | |
| model's internal embedding lookup matrix. | |
| output_attentions (`bool`, *optional*): | |
| Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned | |
| tensors for more detail. | |
| output_hidden_states (`bool`, *optional*): | |
| Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for | |
| more detail. | |
| return_dict (`bool`, *optional*): | |
| Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. | |
| """ | |
| class MossModel(MossPreTrainedModel): | |
| def __init__(self, config): | |
| super().__init__(config) | |
| self.embed_dim = config.n_embd | |
| self.vocab_size = config.vocab_size | |
| self.wte = nn.Embedding(config.vocab_size, self.embed_dim) | |
| self.drop = nn.Dropout(config.embd_pdrop) | |
| self.h = nn.ModuleList([MossBlock(config) for _ in range(config.n_layer)]) | |
| self.ln_f = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_epsilon) | |
| self.rotary_dim = min(config.rotary_dim, config.n_ctx // config.num_attention_heads) | |
| self.gradient_checkpointing = False | |
| # Initialize weights and apply final processing | |
| self.post_init() | |
| def get_input_embeddings(self): | |
| return self.wte | |
| def set_input_embeddings(self, new_embeddings): | |
| self.wte = new_embeddings | |
| def forward( | |
| self, | |
| input_ids: Optional[torch.LongTensor] = None, | |
| past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None, | |
| attention_mask: Optional[torch.FloatTensor] = None, | |
| token_type_ids: Optional[torch.LongTensor] = None, | |
| position_ids: Optional[torch.LongTensor] = None, | |
| head_mask: Optional[torch.FloatTensor] = None, | |
| inputs_embeds: Optional[torch.FloatTensor] = None, | |
| use_cache: Optional[bool] = None, | |
| output_attentions: Optional[bool] = None, | |
| output_hidden_states: Optional[bool] = None, | |
| return_dict: Optional[bool] = None, | |
| ) -> Union[Tuple, BaseModelOutputWithPast]: | |
| output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions | |
| output_hidden_states = ( | |
| output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states | |
| ) | |
| use_cache = use_cache if use_cache is not None else self.config.use_cache | |
| return_dict = return_dict if return_dict is not None else self.config.use_return_dict | |
| if input_ids is not None and inputs_embeds is not None: | |
| raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") | |
| elif input_ids is not None: | |
| input_shape = input_ids.size() | |
| input_ids = input_ids.view(-1, input_shape[-1]) | |
| batch_size = input_ids.shape[0] | |
| elif inputs_embeds is not None: | |
| input_shape = inputs_embeds.size()[:-1] | |
| batch_size = inputs_embeds.shape[0] | |
| else: | |
| raise ValueError("You have to specify either input_ids or inputs_embeds") | |
| device = input_ids.device if input_ids is not None else inputs_embeds.device | |
| if token_type_ids is not None: | |
| token_type_ids = token_type_ids.view(-1, input_shape[-1]) | |
| if position_ids is not None: | |
| position_ids = position_ids.view(-1, input_shape[-1]).long() | |
| if past_key_values is None: | |
| past_length = 0 | |
| past_key_values = tuple([None] * len(self.h)) | |
| else: | |
| past_length = past_key_values[0][0].size(-2) | |
| if position_ids is None: | |
| position_ids = torch.arange(past_length, input_shape[-1] + past_length, dtype=torch.long, device=device) | |
| position_ids = position_ids.unsqueeze(0).view(-1, input_shape[-1]) | |
| # Attention mask. | |
| if attention_mask is not None: | |
| if batch_size <= 0: | |
| raise ValueError("batch_size has to be defined and > 0") | |
| attention_mask = attention_mask.view(batch_size, -1) | |
| # We create a 3D attention mask from a 2D tensor mask. | |
| # Sizes are [batch_size, 1, 1, to_seq_length] | |
| # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length] | |
| # this attention mask is more simple than the triangular masking of causal attention | |
| # used in OpenAI GPT, we just need to prepare the broadcast dimension here. | |
| attention_mask = attention_mask[:, None, None, :] | |
| # Since attention_mask is 1.0 for positions we want to attend and 0.0 for | |
| # masked positions, this operation will create a tensor which is 0.0 for | |
| # positions we want to attend and the dtype's smallest value for masked positions. | |
| # Since we are adding it to the raw scores before the softmax, this is | |
| # effectively the same as removing these entirely. | |
| attention_mask = attention_mask.to(dtype=self.dtype) # fp16 compatibility | |
| attention_mask = (1.0 - attention_mask) * torch.finfo(self.dtype).min | |
| # Prepare head mask if needed | |
| # 1.0 in head_mask indicate we keep the head | |
| # attention_probs has shape bsz x num_attention_heads x N x N | |
| # head_mask has shape n_layer x batch x num_attention_heads x N x N | |
| head_mask = self.get_head_mask(head_mask, self.config.n_layer) | |
| if inputs_embeds is None: | |
| inputs_embeds = self.wte(input_ids) | |
| hidden_states = inputs_embeds | |
| if token_type_ids is not None: | |
| token_type_embeds = self.wte(token_type_ids) | |
| hidden_states = hidden_states + token_type_embeds | |
| hidden_states = self.drop(hidden_states) | |
| output_shape = input_shape + (hidden_states.size(-1),) | |
| if self.gradient_checkpointing and self.training: | |
| if use_cache: | |
| logger.warning_once( | |
| "`use_cache=True` is incompatible with `config.gradient_checkpointing=True`. Setting " | |
| "`use_cache=False`..." | |
| ) | |
| use_cache = False | |
| presents = () if use_cache else None | |
| all_self_attentions = () if output_attentions else None | |
| all_hidden_states = () if output_hidden_states else None | |
| for i, (block, layer_past) in enumerate(zip(self.h, past_key_values)): | |
| if output_hidden_states: | |
| all_hidden_states = all_hidden_states + (hidden_states,) | |
| if self.gradient_checkpointing and self.training: | |
| def create_custom_forward(module): | |
| def custom_forward(*inputs): | |
| # None for past_key_value | |
| return module(*inputs, use_cache, output_attentions) | |
| return custom_forward | |
| outputs = torch.utils.checkpoint.checkpoint( | |
| create_custom_forward(block), | |
| hidden_states, | |
| None, | |
| attention_mask, | |
| position_ids, | |
| head_mask[i], | |
| ) | |
| else: | |
| outputs = block( | |
| hidden_states=hidden_states, | |
| layer_past=layer_past, | |
| attention_mask=attention_mask, | |
| position_ids=position_ids, | |
| head_mask=head_mask[i], | |
| use_cache=use_cache, | |
| output_attentions=output_attentions, | |
| ) | |
| hidden_states = outputs[0] | |
| if use_cache is True: | |
| presents = presents + (outputs[1],) | |
| if output_attentions: | |
| all_self_attentions = all_self_attentions + (outputs[2 if use_cache else 1],) | |
| hidden_states = self.ln_f(hidden_states) | |
| hidden_states = hidden_states.view(output_shape) | |
| # Add last hidden state | |
| if output_hidden_states: | |
| all_hidden_states = all_hidden_states + (hidden_states,) | |
| if not return_dict: | |
| return tuple(v for v in [hidden_states, presents, all_hidden_states, all_self_attentions] if v is not None) | |
| return BaseModelOutputWithPast( | |
| last_hidden_state=hidden_states, | |
| past_key_values=presents, | |
| hidden_states=all_hidden_states, | |
| attentions=all_self_attentions, | |
| ) | |
| class MossForCausalLM(MossPreTrainedModel): | |
| _keys_to_ignore_on_load_missing = [r"h\.\d+\.attn\.causal_mask"] | |
| def __init__(self, config): | |
| super().__init__(config) | |
| self.transformer = MossModel(config) | |
| self.lm_head = nn.Linear(config.n_embd, config.vocab_size) | |
| # Initialize weights and apply final processing | |
| self.post_init() | |
| def get_output_embeddings(self): | |
| return self.lm_head | |
| def set_output_embeddings(self, new_embeddings): | |
| self.lm_head = new_embeddings | |
| def prepare_inputs_for_generation(self, input_ids, past_key_values=None, **kwargs): | |
| token_type_ids = kwargs.get("token_type_ids", None) | |
| # only last token for inputs_ids if past is defined in kwargs | |
| if past_key_values: | |
| input_ids = input_ids[:, -1].unsqueeze(-1) | |
| if token_type_ids is not None: | |
| token_type_ids = token_type_ids[:, -1].unsqueeze(-1) | |
| attention_mask = kwargs.get("attention_mask", None) | |
| position_ids = kwargs.get("position_ids", None) | |
| if attention_mask is not None and position_ids is None: | |
| # create position_ids on the fly for batch generation | |
| position_ids = attention_mask.long().cumsum(-1) - 1 | |
| position_ids.masked_fill_(attention_mask == 0, 1) | |
| if past_key_values: | |
| position_ids = position_ids[:, -1].unsqueeze(-1) | |
| return { | |
| "input_ids": input_ids, | |
| "past_key_values": past_key_values, | |
| "use_cache": kwargs.get("use_cache"), | |
| "position_ids": position_ids, | |
| "attention_mask": attention_mask, | |
| "token_type_ids": token_type_ids, | |
| } | |
| def forward( | |
| self, | |
| input_ids: Optional[torch.LongTensor] = None, | |
| past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None, | |
| attention_mask: Optional[torch.FloatTensor] = None, | |
| token_type_ids: Optional[torch.LongTensor] = None, | |
| position_ids: Optional[torch.LongTensor] = None, | |
| head_mask: Optional[torch.FloatTensor] = None, | |
| inputs_embeds: Optional[torch.FloatTensor] = None, | |
| labels: Optional[torch.LongTensor] = None, | |
| use_cache: Optional[bool] = None, | |
| output_attentions: Optional[bool] = None, | |
| output_hidden_states: Optional[bool] = None, | |
| return_dict: Optional[bool] = None, | |
| ) -> Union[Tuple, CausalLMOutputWithPast]: | |
| r""" | |
| labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): | |
| Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set | |
| `labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100` | |
| are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]` | |
| """ | |
| return_dict = return_dict if return_dict is not None else self.config.use_return_dict | |
| transformer_outputs = self.transformer( | |
| input_ids, | |
| past_key_values=past_key_values, | |
| attention_mask=attention_mask, | |
| token_type_ids=token_type_ids, | |
| position_ids=position_ids, | |
| head_mask=head_mask, | |
| inputs_embeds=inputs_embeds, | |
| use_cache=use_cache, | |
| output_attentions=output_attentions, | |
| output_hidden_states=output_hidden_states, | |
| return_dict=return_dict, | |
| ) | |
| hidden_states = transformer_outputs[0] | |
| # make sure sampling in fp16 works correctly and | |
| # compute loss in fp32 to match with mesh-tf version | |
| # https://github.com/EleutherAI/gpt-neo/blob/89ce74164da2fb16179106f54e2269b5da8db333/models/gpt2/gpt2.py#L179 | |
| lm_logits = self.lm_head(hidden_states).to(torch.float32) | |
| loss = None | |
| if labels is not None: | |
| # Shift so that tokens < n predict n | |
| shift_logits = lm_logits[..., :-1, :].contiguous() | |
| shift_labels = labels[..., 1:].contiguous() | |
| # Flatten the tokens | |
| loss_fct = CrossEntropyLoss() | |
| loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1)) | |
| loss = loss.to(hidden_states.dtype) | |
| if not return_dict: | |
| output = (lm_logits,) + transformer_outputs[1:] | |
| return ((loss,) + output) if loss is not None else output | |
| return CausalLMOutputWithPast( | |
| loss=loss, | |
| logits=lm_logits, | |
| past_key_values=transformer_outputs.past_key_values, | |
| hidden_states=transformer_outputs.hidden_states, | |
| attentions=transformer_outputs.attentions, | |
| ) | |
| def _reorder_cache( | |
| past_key_values: Tuple[Tuple[torch.Tensor]], beam_idx: torch.Tensor | |
| ) -> Tuple[Tuple[torch.Tensor]]: | |
| """ | |
| This function is used to re-order the `past_key_values` cache if [`~PretrainedModel.beam_search`] or | |
| [`~PretrainedModel.beam_sample`] is called. This is required to match `past_key_values` with the correct | |
| beam_idx at every generation step. | |
| """ | |
| return tuple( | |
| tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past) | |
| for layer_past in past_key_values | |
| ) | |