| from transformers import PreTrainedModel, AutoModel, PretrainedConfig | |
| import torch | |
| from torch import nn | |
| class MultiTaskUnixCoderConfig(PretrainedConfig): | |
| model_type = "multi_task_unixcoder" | |
| def __init__(self, num_cwe_classes=106, **kwargs): | |
| super().__init__(**kwargs) | |
| self.num_cwe_classes = num_cwe_classes | |
| class MultiTaskUnixCoder(PreTrainedModel): | |
| config_class = MultiTaskUnixCoderConfig | |
| base_model_prefix = "base" | |
| def __init__(self, config): | |
| super().__init__(config) | |
| self.base = AutoModel.from_pretrained("microsoft/unixcoder-base") | |
| self.vul_head = nn.Linear(768, 2) | |
| self.cwe_head = nn.Linear(768, config.num_cwe_classes + 1) | |
| def forward(self, input_ids, attention_mask=None, labels_vul=None, labels_cwe=None): | |
| outputs = self.base(input_ids=input_ids, attention_mask=attention_mask) | |
| hidden_state = outputs.last_hidden_state[:, 0, :] | |
| vul_logits = self.vul_head(hidden_state) | |
| cwe_logits = self.cwe_head(hidden_state) | |
| loss = None | |
| if labels_vul is not None and labels_cwe is not None: | |
| vul_loss = nn.CrossEntropyLoss()(vul_logits, labels_vul) | |
| cwe_loss = nn.CrossEntropyLoss()(cwe_logits, labels_cwe + 1) | |
| loss = vul_loss + cwe_loss | |
| return {"loss": loss, "vul_logits": vul_logits, "cwe_logits": cwe_logits} if loss is not None else {"vul_logits": vul_logits, "cwe_logits": cwe_logits} |