Spaces:
Runtime error
Runtime error
Hariharan Vijayachandran
commited on
Commit
·
a6af52b
1
Parent(s):
0c44400
fix
Browse files- app.py +13 -13
- requirements.txt +0 -1
app.py
CHANGED
|
@@ -18,7 +18,7 @@ from nltk.data import find
|
|
| 18 |
import nltk
|
| 19 |
import gensim
|
| 20 |
|
| 21 |
-
@st.
|
| 22 |
def get_embed_model():
|
| 23 |
nltk.download("word2vec_sample")
|
| 24 |
word2vec_sample = str(find('models/word2vec_sample/pruned.word2vec.txt'))
|
|
@@ -26,7 +26,7 @@ def get_embed_model():
|
|
| 26 |
model = gensim.models.KeyedVectors.load_word2vec_format(word2vec_sample, binary=False)
|
| 27 |
return model
|
| 28 |
|
| 29 |
-
@st.
|
| 30 |
def get_top_n_closest(query_word, candidate, n):
|
| 31 |
model = get_embed_model()
|
| 32 |
t = time.time()
|
|
@@ -44,7 +44,7 @@ def get_top_n_closest(query_word, candidate, n):
|
|
| 44 |
top = [p_c[i] for i in sorted]
|
| 45 |
return top
|
| 46 |
|
| 47 |
-
@st.
|
| 48 |
def annotate_text(text, words):
|
| 49 |
annotated = [text]
|
| 50 |
for word in words:
|
|
@@ -63,23 +63,23 @@ def annotate_text(text, words):
|
|
| 63 |
return tuple(annotated)
|
| 64 |
|
| 65 |
|
| 66 |
-
@st.
|
| 67 |
def preprocess_text(s):
|
| 68 |
return list(filter(lambda x: x!= '', (''.join(c if c.isalnum() or c == ' ' else ' ' for c in s)).split(' ')))
|
| 69 |
|
| 70 |
-
@st.
|
| 71 |
def get_pairwise_distances(model):
|
| 72 |
df = pd.read_csv(f"{ASSETS_PATH}/{model}/pairwise_distances.csv").set_index('index')
|
| 73 |
return df
|
| 74 |
|
| 75 |
-
@st.
|
| 76 |
def get_pairwise_distances_chunked(model, chunk):
|
| 77 |
# for df in pd.read_csv(f"{ASSETS_PATH}/{model}/pairwise_distances.csv", chunksize = 16):
|
| 78 |
# print(df.iloc[0]['queries'])
|
| 79 |
# if chunk == int(df.iloc[0]['queries']):
|
| 80 |
# return df
|
| 81 |
return get_pairwise_distances(model)
|
| 82 |
-
@st.
|
| 83 |
def get_query_strings():
|
| 84 |
df = pd.read_json(f"{ASSETS_PATH}/IUR_Reddit_test_queries_english.jsonl", lines = True)
|
| 85 |
df['index'] = df.reset_index().index
|
|
@@ -88,7 +88,7 @@ def get_query_strings():
|
|
| 88 |
# df.to_parquet(f"{ASSETS_PATH}/IUR_Reddit_test_queries_english.parquet", index = 'index', partition_cols = 'partition')
|
| 89 |
|
| 90 |
# return pd.read_parquet(f"{ASSETS_PATH}/IUR_Reddit_test_queries_english.parquet", columns=['fullText', 'index', 'authorIDs'])
|
| 91 |
-
@st.
|
| 92 |
def get_candidate_strings():
|
| 93 |
df = pd.read_json(f"{ASSETS_PATH}/IUR_Reddit_test_candidates_english.jsonl", lines = True)
|
| 94 |
df['i'] = df['index']
|
|
@@ -99,24 +99,24 @@ def get_candidate_strings():
|
|
| 99 |
# df['partition'] = df['index']%100
|
| 100 |
# df.to_parquet(f"{ASSETS_PATH}/IUR_Reddit_test_candidates_english.parquet", index = 'index', partition_cols = 'partition')
|
| 101 |
# return pd.read_parquet(f"{ASSETS_PATH}/IUR_Reddit_test_candidates_english.parquet", columns=['fullText', 'index', 'authorIDs'])
|
| 102 |
-
@st.
|
| 103 |
def get_embedding_dataset(model):
|
| 104 |
data = load_from_disk(f"{ASSETS_PATH}/{model}/embedding")
|
| 105 |
return data
|
| 106 |
-
@st.
|
| 107 |
def get_bad_queries(model):
|
| 108 |
df = get_query_strings().iloc[list(get_pairwise_distances(model)['queries'].unique())][['fullText', 'index', 'authorIDs']]
|
| 109 |
return df
|
| 110 |
-
@st.
|
| 111 |
def get_gt_candidates(model, author):
|
| 112 |
gt_candidates = get_candidate_strings()
|
| 113 |
df = gt_candidates[gt_candidates['authorIDs'] == author]
|
| 114 |
return df
|
| 115 |
-
@st.
|
| 116 |
def get_candidate_text(l):
|
| 117 |
return get_candidate_strings().at[l,'fullText']
|
| 118 |
|
| 119 |
-
@st.
|
| 120 |
def get_annotated_text(text, word, pos):
|
| 121 |
print("here", word, pos)
|
| 122 |
start= text.index(word, pos)
|
|
|
|
| 18 |
import nltk
|
| 19 |
import gensim
|
| 20 |
|
| 21 |
+
@st.cache(suppress_st_warning=True, allow_output_mutation=True)
|
| 22 |
def get_embed_model():
|
| 23 |
nltk.download("word2vec_sample")
|
| 24 |
word2vec_sample = str(find('models/word2vec_sample/pruned.word2vec.txt'))
|
|
|
|
| 26 |
model = gensim.models.KeyedVectors.load_word2vec_format(word2vec_sample, binary=False)
|
| 27 |
return model
|
| 28 |
|
| 29 |
+
@st.cache(suppress_st_warning=True, allow_output_mutation=True)
|
| 30 |
def get_top_n_closest(query_word, candidate, n):
|
| 31 |
model = get_embed_model()
|
| 32 |
t = time.time()
|
|
|
|
| 44 |
top = [p_c[i] for i in sorted]
|
| 45 |
return top
|
| 46 |
|
| 47 |
+
@st.cache(suppress_st_warning=True, allow_output_mutation=True)
|
| 48 |
def annotate_text(text, words):
|
| 49 |
annotated = [text]
|
| 50 |
for word in words:
|
|
|
|
| 63 |
return tuple(annotated)
|
| 64 |
|
| 65 |
|
| 66 |
+
@st.cache(suppress_st_warning=True, allow_output_mutation=True)
|
| 67 |
def preprocess_text(s):
|
| 68 |
return list(filter(lambda x: x!= '', (''.join(c if c.isalnum() or c == ' ' else ' ' for c in s)).split(' ')))
|
| 69 |
|
| 70 |
+
@st.cache(suppress_st_warning=True, allow_output_mutation=True)
|
| 71 |
def get_pairwise_distances(model):
|
| 72 |
df = pd.read_csv(f"{ASSETS_PATH}/{model}/pairwise_distances.csv").set_index('index')
|
| 73 |
return df
|
| 74 |
|
| 75 |
+
@st.cache(suppress_st_warning=True, allow_output_mutation=True)
|
| 76 |
def get_pairwise_distances_chunked(model, chunk):
|
| 77 |
# for df in pd.read_csv(f"{ASSETS_PATH}/{model}/pairwise_distances.csv", chunksize = 16):
|
| 78 |
# print(df.iloc[0]['queries'])
|
| 79 |
# if chunk == int(df.iloc[0]['queries']):
|
| 80 |
# return df
|
| 81 |
return get_pairwise_distances(model)
|
| 82 |
+
@st.cache(suppress_st_warning=True, allow_output_mutation=True)
|
| 83 |
def get_query_strings():
|
| 84 |
df = pd.read_json(f"{ASSETS_PATH}/IUR_Reddit_test_queries_english.jsonl", lines = True)
|
| 85 |
df['index'] = df.reset_index().index
|
|
|
|
| 88 |
# df.to_parquet(f"{ASSETS_PATH}/IUR_Reddit_test_queries_english.parquet", index = 'index', partition_cols = 'partition')
|
| 89 |
|
| 90 |
# return pd.read_parquet(f"{ASSETS_PATH}/IUR_Reddit_test_queries_english.parquet", columns=['fullText', 'index', 'authorIDs'])
|
| 91 |
+
@st.cache(suppress_st_warning=True, allow_output_mutation=True)
|
| 92 |
def get_candidate_strings():
|
| 93 |
df = pd.read_json(f"{ASSETS_PATH}/IUR_Reddit_test_candidates_english.jsonl", lines = True)
|
| 94 |
df['i'] = df['index']
|
|
|
|
| 99 |
# df['partition'] = df['index']%100
|
| 100 |
# df.to_parquet(f"{ASSETS_PATH}/IUR_Reddit_test_candidates_english.parquet", index = 'index', partition_cols = 'partition')
|
| 101 |
# return pd.read_parquet(f"{ASSETS_PATH}/IUR_Reddit_test_candidates_english.parquet", columns=['fullText', 'index', 'authorIDs'])
|
| 102 |
+
@st.cache(suppress_st_warning=True, allow_output_mutation=True)
|
| 103 |
def get_embedding_dataset(model):
|
| 104 |
data = load_from_disk(f"{ASSETS_PATH}/{model}/embedding")
|
| 105 |
return data
|
| 106 |
+
@st.cache(suppress_st_warning=True, allow_output_mutation=True)
|
| 107 |
def get_bad_queries(model):
|
| 108 |
df = get_query_strings().iloc[list(get_pairwise_distances(model)['queries'].unique())][['fullText', 'index', 'authorIDs']]
|
| 109 |
return df
|
| 110 |
+
@st.cache(suppress_st_warning=True, allow_output_mutation=True)
|
| 111 |
def get_gt_candidates(model, author):
|
| 112 |
gt_candidates = get_candidate_strings()
|
| 113 |
df = gt_candidates[gt_candidates['authorIDs'] == author]
|
| 114 |
return df
|
| 115 |
+
@st.cache(suppress_st_warning=True, allow_output_mutation=True)
|
| 116 |
def get_candidate_text(l):
|
| 117 |
return get_candidate_strings().at[l,'fullText']
|
| 118 |
|
| 119 |
+
@st.cache(suppress_st_warning=True, allow_output_mutation=True)
|
| 120 |
def get_annotated_text(text, word, pos):
|
| 121 |
print("here", word, pos)
|
| 122 |
start= text.index(word, pos)
|
requirements.txt
CHANGED
|
@@ -4,4 +4,3 @@ pandas==1.5.2
|
|
| 4 |
st-annotated-text==3.0.0
|
| 5 |
nltk==3.8.1
|
| 6 |
gensim==4.3.1
|
| 7 |
-
streamlit==1.20.0
|
|
|
|
| 4 |
st-annotated-text==3.0.0
|
| 5 |
nltk==3.8.1
|
| 6 |
gensim==4.3.1
|
|
|