Spaces:
Runtime error
Runtime error
Commit
Β·
812a5fb
1
Parent(s):
b142f60
Upload fincat_utils.py
Browse files- fincat_utils.py +108 -0
fincat_utils.py
ADDED
|
@@ -0,0 +1,108 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import pandas as pd
|
| 2 |
+
import numpy as np
|
| 3 |
+
import pickle
|
| 4 |
+
import torch
|
| 5 |
+
from torch.utils.data import Dataset, DataLoader
|
| 6 |
+
from transformers import BertTokenizer, BertModel
|
| 7 |
+
from transformers import AutoTokenizer, AutoModel
|
| 8 |
+
import nltk
|
| 9 |
+
|
| 10 |
+
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
|
| 11 |
+
model = BertModel.from_pretrained('bert-base-uncased', output_hidden_states = True,)
|
| 12 |
+
|
| 13 |
+
def extract_context_words(x, window = 6):
|
| 14 |
+
paragraph, offset_start, offset_end = x['paragraph'], x['offset_start'], x['offset_end']
|
| 15 |
+
target_word = paragraph[offset_start : offset_end]
|
| 16 |
+
paragraph = ' ' + paragraph + ' '
|
| 17 |
+
offset_start = offset_start + 1
|
| 18 |
+
offset_end = offset_end + 1
|
| 19 |
+
prev_space_posn = (paragraph[:offset_start].rindex(' ') + 1)
|
| 20 |
+
end_space_posn = (offset_end + paragraph[offset_end:].index(' '))
|
| 21 |
+
full_word = paragraph[prev_space_posn : end_space_posn]
|
| 22 |
+
|
| 23 |
+
prev_words = nltk.word_tokenize(paragraph[0:prev_space_posn])
|
| 24 |
+
next_words = nltk.word_tokenize(paragraph[end_space_posn:])
|
| 25 |
+
words_in_context_window = prev_words[-1*window:] + [full_word] + next_words[:window]
|
| 26 |
+
context_text = ' '.join(words_in_context_window)
|
| 27 |
+
return context_text
|
| 28 |
+
|
| 29 |
+
"""The following functions have been created with inspiration from https://github.com/arushiprakash/MachineLearning/blob/main/BERT%20Word%20Embeddings.ipynb"""
|
| 30 |
+
|
| 31 |
+
def bert_text_preparation(text, tokenizer):
|
| 32 |
+
"""Preparing the input for BERT
|
| 33 |
+
|
| 34 |
+
Takes a string argument and performs
|
| 35 |
+
pre-processing like adding special tokens,
|
| 36 |
+
tokenization, tokens to ids, and tokens to
|
| 37 |
+
segment ids. All tokens are mapped to seg-
|
| 38 |
+
ment id = 1.
|
| 39 |
+
|
| 40 |
+
Args:
|
| 41 |
+
text (str): Text to be converted
|
| 42 |
+
tokenizer (obj): Tokenizer object
|
| 43 |
+
to convert text into BERT-re-
|
| 44 |
+
adable tokens and ids
|
| 45 |
+
|
| 46 |
+
Returns:
|
| 47 |
+
list: List of BERT-readable tokens
|
| 48 |
+
obj: Torch tensor with token ids
|
| 49 |
+
obj: Torch tensor segment ids
|
| 50 |
+
|
| 51 |
+
"""
|
| 52 |
+
marked_text = "[CLS] " + text + " [SEP]"
|
| 53 |
+
tokenized_text = tokenizer.tokenize(marked_text)
|
| 54 |
+
indexed_tokens = tokenizer.convert_tokens_to_ids(tokenized_text)
|
| 55 |
+
segments_ids = [1]*len(indexed_tokens)
|
| 56 |
+
|
| 57 |
+
# Convert inputs to PyTorch tensors
|
| 58 |
+
tokens_tensor = torch.tensor([indexed_tokens])
|
| 59 |
+
segments_tensors = torch.tensor([segments_ids])
|
| 60 |
+
|
| 61 |
+
return tokenized_text, tokens_tensor, segments_tensors
|
| 62 |
+
|
| 63 |
+
def get_bert_embeddings(tokens_tensor, segments_tensors, model):
|
| 64 |
+
"""Get embeddings from an embedding model
|
| 65 |
+
|
| 66 |
+
Args:
|
| 67 |
+
tokens_tensor (obj): Torch tensor size [n_tokens]
|
| 68 |
+
with token ids for each token in text
|
| 69 |
+
segments_tensors (obj): Torch tensor size [n_tokens]
|
| 70 |
+
with segment ids for each token in text
|
| 71 |
+
model (obj): Embedding model to generate embeddings
|
| 72 |
+
from token and segment ids
|
| 73 |
+
|
| 74 |
+
Returns:
|
| 75 |
+
list: List of list of floats of size
|
| 76 |
+
[n_tokens, n_embedding_dimensions]
|
| 77 |
+
containing embeddings for each token
|
| 78 |
+
"""
|
| 79 |
+
|
| 80 |
+
# Gradient calculation id disabled
|
| 81 |
+
# Model is in inference mode
|
| 82 |
+
with torch.no_grad():
|
| 83 |
+
outputs = model(tokens_tensor, segments_tensors)
|
| 84 |
+
# Removing the first hidden state
|
| 85 |
+
# The first state is the input state
|
| 86 |
+
hidden_states = outputs[2][1:]
|
| 87 |
+
|
| 88 |
+
# Getting embeddings from the final BERT layer
|
| 89 |
+
token_embeddings = hidden_states[-1]
|
| 90 |
+
# Collapsing the tensor into 1-dimension
|
| 91 |
+
token_embeddings = torch.squeeze(token_embeddings, dim=0)
|
| 92 |
+
# Converting torchtensors to lists
|
| 93 |
+
list_token_embeddings = [token_embed.tolist() for token_embed in token_embeddings]
|
| 94 |
+
|
| 95 |
+
return list_token_embeddings
|
| 96 |
+
|
| 97 |
+
def bert_embedding_extract(context_text, word):
|
| 98 |
+
tokenized_text, tokens_tensor, segments_tensors = bert_text_preparation(context_text, tokenizer)
|
| 99 |
+
list_token_embeddings = get_bert_embeddings(tokens_tensor, segments_tensors, model)
|
| 100 |
+
word_tokens,tt,st = bert_text_preparation(word, tokenizer)
|
| 101 |
+
word_embedding_all = []
|
| 102 |
+
for word_tk in word_tokens:
|
| 103 |
+
word_index = tokenized_text.index(word_tk)
|
| 104 |
+
word_embedding = list_token_embeddings[word_index]
|
| 105 |
+
word_embedding_all.append(word_embedding)
|
| 106 |
+
word_embedding_mean = np.array(word_embedding_all).mean(axis=0)
|
| 107 |
+
return word_embedding_mean
|
| 108 |
+
|