AIEcosystem commited on
Commit
e342cce
·
verified ·
1 Parent(s): 99f6853

Update src/streamlit_app.py

Browse files
Files changed (1) hide show
  1. src/streamlit_app.py +242 -38
src/streamlit_app.py CHANGED
@@ -1,40 +1,244 @@
1
- import altair as alt
2
- import numpy as np
3
- import pandas as pd
4
  import streamlit as st
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
 
6
- """
7
- # Welcome to Streamlit!
8
-
9
- Edit `/streamlit_app.py` to customize this app to your heart's desire :heart:.
10
- If you have any questions, checkout our [documentation](https://docs.streamlit.io) and [community
11
- forums](https://discuss.streamlit.io).
12
-
13
- In the meantime, below is an example of what you can do with just a few lines of code:
14
- """
15
-
16
- num_points = st.slider("Number of points in spiral", 1, 10000, 1100)
17
- num_turns = st.slider("Number of turns in spiral", 1, 300, 31)
18
-
19
- indices = np.linspace(0, 1, num_points)
20
- theta = 2 * np.pi * num_turns * indices
21
- radius = indices
22
-
23
- x = radius * np.cos(theta)
24
- y = radius * np.sin(theta)
25
-
26
- df = pd.DataFrame({
27
- "x": x,
28
- "y": y,
29
- "idx": indices,
30
- "rand": np.random.randn(num_points),
31
- })
32
-
33
- st.altair_chart(alt.Chart(df, height=700, width=700)
34
- .mark_point(filled=True)
35
- .encode(
36
- x=alt.X("x", axis=None),
37
- y=alt.Y("y", axis=None),
38
- color=alt.Color("idx", legend=None, scale=alt.Scale()),
39
- size=alt.Size("rand", legend=None, scale=alt.Scale(range=[1, 150])),
40
- ))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ os.environ['HF_HOME'] = '/tmp'
3
+ import time
4
  import streamlit as st
5
+ import pandas as pd
6
+ import io
7
+ import plotly.express as px
8
+ import zipfile
9
+ import json
10
+ from cryptography.fernet import Fernet
11
+ from streamlit_extras.stylable_container import stylable_container
12
+ from typing import Optional
13
+ from gliner import GLiNER
14
+ from comet_ml import Experiment
15
+
16
+
17
+
18
+
19
+
20
+
21
+ # --- Page Configuration and UI Elements ---
22
+ st.set_page_config(layout="wide", page_title="Named Entity Recognition App")
23
+ st.subheader("DataHarvest", divider="violet")
24
+ st.link_button("by nlpblogs", "https://nlpblogs.com", type="tertiary")
25
+ st.markdown(':rainbow[**Supported Languages: English**]')
26
+
27
+ expander = st.expander("**Important notes**")
28
+ expander.write("""**Named Entities:** This DataHarvest web app predicts eight (8) labels:"Location", "Organization", "Product_or_Good", "Date", "Quantity", "Transportation_Mode", "Person", "Document_or_Form_ID"
29
+
30
+ Results are presented in easy-to-read tables, visualized in an interactive tree map, pie chart and bar chart, and are available for download along with a Glossary of tags.
31
+
32
+ **How to Use:** Type or paste your text into the text area below, then press Ctrl + Enter. Click the 'Results' button to extract and tag entities in your text data.
33
+
34
+ **Usage Limits:** You can request results unlimited times for one (1) month.
35
+
36
+ **Technical issues:** If your connection times out, please refresh the page or reopen the app's URL.
37
+
38
+ For any errors or inquiries, please contact us at info@nlpblogs.com""")
39
+
40
+ with st.sidebar:
41
+ st.write("Use the following code to embed the DataHarvest web app on your website. Feel free to adjust the width and height values to fit your page.")
42
+ code = '''
43
+ <iframe
44
+ src="https://aiecosystem-chainsense.hf.space"
45
+ frameborder="0"
46
+ width="850"
47
+ height="450"
48
+ ></iframe>
49
+
50
+ '''
51
+ st.code(code, language="html")
52
+ st.text("")
53
+ st.text("")
54
+ st.divider()
55
+ st.subheader("🚀 Ready to build your own AI Web App?", divider="violet")
56
+ st.link_button("AI Web App Builder", " https://nlpblogs.com/custom-web-app-development/", type="primary")
57
+
58
+ # --- Comet ML Setup ---
59
+ COMET_API_KEY = os.environ.get("COMET_API_KEY")
60
+ COMET_WORKSPACE = os.environ.get("COMET_WORKSPACE")
61
+ COMET_PROJECT_NAME = os.environ.get("COMET_PROJECT_NAME")
62
+ comet_initialized = bool(COMET_API_KEY and COMET_WORKSPACE and COMET_PROJECT_NAME)
63
+
64
+ if not comet_initialized:
65
+ st.warning("Comet ML not initialized. Check environment variables.")
66
+
67
+ # --- Label Definitions ---
68
+ labels = ["person", "country", "city", "organization", "date", "seconds", "money", "percent value", "position"]
69
+
70
+ # Corrected mapping dictionary
71
+
72
+ # Create a mapping dictionary for labels to categories
73
+ category_mapping = {
74
+ "People": ["person", "organization", "position"],
75
+ "Locations": ["country", "city"],
76
+ "Time": ["date", "seconds"],
77
+ "Numbers": ["money", "percent value"]
78
+ }
79
+
80
+
81
+
82
+
83
+ # --- Model Loading ---
84
+ @st.cache_resource
85
+ def load_ner_model():
86
+ """Loads the GLiNER model and caches it."""
87
+ try:
88
+ return GLiNER.from_pretrained("knowledgator/gliner-multitask-large-v0.5", nested_ner=True, num_gen_sequences=2, gen_constraints= labels)
89
+ except Exception as e:
90
+ st.error(f"Failed to load NER model. Please check your internet connection or model availability: {e}")
91
+ st.stop()
92
+ model = load_ner_model()
93
+
94
+ # Flatten the mapping to a single dictionary
95
+ reverse_category_mapping = {label: category for category, label_list in category_mapping.items() for label in label_list}
96
+
97
+ # --- Text Input and Clear Button ---
98
+ text = st.text_area("Type or paste your text below, and then press Ctrl + Enter", height=250, key='my_text_area')
99
+
100
+ def clear_text():
101
+ """Clears the text area."""
102
+ st.session_state['my_text_area'] = ""
103
+
104
+ st.button("Clear text", on_click=clear_text)
105
+
106
+
107
+ # --- Results Section ---
108
+ if st.button("Results"):
109
+ start_time = time.time()
110
+ if not text.strip():
111
+ st.warning("Please enter some text to extract entities.")
112
+ else:
113
+ with st.spinner("Extracting entities...", show_time=True):
114
+ entities = model.predict_entities(text, labels)
115
+ df = pd.DataFrame(entities)
116
+
117
+ if not df.empty:
118
+ df['category'] = df['label'].map(reverse_category_mapping)
119
+ if comet_initialized:
120
+ experiment = Experiment(
121
+ api_key=COMET_API_KEY,
122
+ workspace=COMET_WORKSPACE,
123
+ project_name=COMET_PROJECT_NAME,
124
+ )
125
+ experiment.log_parameter("input_text", text)
126
+ experiment.log_table("predicted_entities", df)
127
+
128
+ st.subheader("Grouped Entities by Category", divider = "violet")
129
+
130
+ # Create tabs for each category
131
+ category_names = sorted(list(category_mapping.keys()))
132
+ category_tabs = st.tabs(category_names)
133
+
134
+ for i, category_name in enumerate(category_names):
135
+ with category_tabs[i]:
136
+ df_category_filtered = df[df['category'] == category_name]
137
+ if not df_category_filtered.empty:
138
+ st.dataframe(df_category_filtered.drop(columns=['category']), use_container_width=True)
139
+ else:
140
+ st.info(f"No entities found for the '{category_name}' category.")
141
+
142
+
143
+
144
+ with st.expander("See Glossary of tags"):
145
+ st.write('''
146
+ - **text**: ['entity extracted from your text data']
147
+ - **score**: ['accuracy score; how accurately a tag has been assigned to a given entity']
148
+ - **label**: ['label (tag) assigned to a given extracted entity']
149
+ - **start**: ['index of the start of the corresponding entity']
150
+ - **end**: ['index of the end of the corresponding entity']
151
+ ''')
152
+ st.divider()
153
+
154
+ # Tree map
155
+ st.subheader("Tree map", divider = "violet")
156
+ fig_treemap = px.treemap(df, path=[px.Constant("all"), 'category', 'label', 'text'], values='score', color='category')
157
+ fig_treemap.update_layout(margin=dict(t=50, l=25, r=25, b=25), paper_bgcolor='#E8F5E9', plot_bgcolor='#E8F5E9')
158
+ st.plotly_chart(fig_treemap)
159
+
160
+ # Pie and Bar charts
161
+ grouped_counts = df['category'].value_counts().reset_index()
162
+ grouped_counts.columns = ['category', 'count']
163
+ col1, col2 = st.columns(2)
164
+
165
+ with col1:
166
+ st.subheader("Pie chart", divider = "violet")
167
+ fig_pie = px.pie(grouped_counts, values='count', names='category', hover_data=['count'], labels={'count': 'count'}, title='Percentage of predicted categories')
168
+ fig_pie.update_traces(textposition='inside', textinfo='percent+label')
169
+ fig_pie.update_layout(
170
+ paper_bgcolor='#E8F5E9',
171
+ plot_bgcolor='#E8F5E9'
172
+ )
173
+ st.plotly_chart(fig_pie)
174
+
175
+
176
+
177
 
178
+ with col2:
179
+ st.subheader("Bar chart", divider = "violet")
180
+ fig_bar = px.bar(grouped_counts, x="count", y="category", color="category", text_auto=True, title='Occurrences of predicted categories')
181
+ fig_bar.update_layout( # Changed from fig_pie to fig_bar
182
+ paper_bgcolor='#E8F5E9',
183
+ plot_bgcolor='#E8F5E9'
184
+ )
185
+ st.plotly_chart(fig_bar)
186
+
187
+ # Most Frequent Entities
188
+ st.subheader("Most Frequent Entities", divider="violet")
189
+ word_counts = df['text'].value_counts().reset_index()
190
+ word_counts.columns = ['Entity', 'Count']
191
+ repeating_entities = word_counts[word_counts['Count'] > 1]
192
+ if not repeating_entities.empty:
193
+ st.dataframe(repeating_entities, use_container_width=True)
194
+ fig_repeating_bar = px.bar(repeating_entities, x='Entity', y='Count', color='Entity')
195
+ fig_repeating_bar.update_layout(xaxis={'categoryorder': 'total descending'},
196
+ paper_bgcolor='#E8F5E9',
197
+ plot_bgcolor='#E8F5E9')
198
+ st.plotly_chart(fig_repeating_bar)
199
+ else:
200
+ st.warning("No entities were found that occur more than once.")
201
+
202
+ # Download Section
203
+ st.divider()
204
+
205
+ dfa = pd.DataFrame(
206
+ data={
207
+ 'Column Name': ['text', 'label', 'score', 'start', 'end'],
208
+ 'Description': [
209
+ 'entity extracted from your text data',
210
+ 'label (tag) assigned to a given extracted entity',
211
+ 'accuracy score; how accurately a tag has been assigned to a given entity',
212
+ 'index of the start of the corresponding entity',
213
+ 'index of the end of the corresponding entity',
214
+
215
+ ]
216
+ }
217
+ )
218
+ buf = io.BytesIO()
219
+ with zipfile.ZipFile(buf, "w") as myzip:
220
+ myzip.writestr("Summary of the results.csv", df.to_csv(index=False))
221
+ myzip.writestr("Glossary of tags.csv", dfa.to_csv(index=False))
222
+
223
+ with stylable_container(
224
+ key="download_button",
225
+ css_styles="""button { background-color: red; border: 1px solid black; padding: 5px; color: white; }""",
226
+ ):
227
+ st.download_button(
228
+ label="Download results and glossary (zip)",
229
+ data=buf.getvalue(),
230
+ file_name="nlpblogs_results.zip",
231
+ mime="application/zip",
232
+ )
233
+
234
+ if comet_initialized:
235
+ experiment.log_figure(figure=fig_treemap, figure_name="entity_treemap_categories")
236
+ experiment.end()
237
+ else: # If df is empty
238
+ st.warning("No entities were found in the provided text.")
239
+
240
+ end_time = time.time()
241
+ elapsed_time = end_time - start_time
242
+ st.text("")
243
+ st.text("")
244
+ st.info(f"Results processed in **{elapsed_time:.2f} seconds**.")