AIEcosystem commited on
Commit
d830047
·
verified ·
1 Parent(s): a63bf2b

Update src/streamlit_app.py

Browse files
Files changed (1) hide show
  1. src/streamlit_app.py +326 -38
src/streamlit_app.py CHANGED
@@ -1,40 +1,328 @@
1
- import altair as alt
2
- import numpy as np
3
- import pandas as pd
4
  import streamlit as st
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
 
6
- """
7
- # Welcome to Streamlit!
8
-
9
- Edit `/streamlit_app.py` to customize this app to your heart's desire :heart:.
10
- If you have any questions, checkout our [documentation](https://docs.streamlit.io) and [community
11
- forums](https://discuss.streamlit.io).
12
-
13
- In the meantime, below is an example of what you can do with just a few lines of code:
14
- """
15
-
16
- num_points = st.slider("Number of points in spiral", 1, 10000, 1100)
17
- num_turns = st.slider("Number of turns in spiral", 1, 300, 31)
18
-
19
- indices = np.linspace(0, 1, num_points)
20
- theta = 2 * np.pi * num_turns * indices
21
- radius = indices
22
-
23
- x = radius * np.cos(theta)
24
- y = radius * np.sin(theta)
25
-
26
- df = pd.DataFrame({
27
- "x": x,
28
- "y": y,
29
- "idx": indices,
30
- "rand": np.random.randn(num_points),
31
- })
32
-
33
- st.altair_chart(alt.Chart(df, height=700, width=700)
34
- .mark_point(filled=True)
35
- .encode(
36
- x=alt.X("x", axis=None),
37
- y=alt.Y("y", axis=None),
38
- color=alt.Color("idx", legend=None, scale=alt.Scale()),
39
- size=alt.Size("rand", legend=None, scale=alt.Scale(range=[1, 150])),
40
- ))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ os.environ['HF_HOME'] = '/tmp'
3
+ import time
4
  import streamlit as st
5
+ import pandas as pd
6
+ import io
7
+ import plotly.express as px
8
+ import zipfile
9
+ import json
10
+ from cryptography.fernet import Fernet
11
+ from streamlit_extras.stylable_container import stylable_container
12
+ from typing import Optional
13
+ from gliner import GLiNER
14
+ from comet_ml import Experiment
15
+
16
+
17
+ st.markdown(
18
+ """
19
+ <style>
20
+ /* Main app background and text color */
21
+ .stApp {
22
+ background-color: #FFF8F0; /* A very light, creamy orange */
23
+ color: #000000; /* Black for text */
24
+ }
25
+ /* Sidebar background color */
26
+ .css-1d36184 {
27
+ background-color: #FFC080; /* A soft orange for the sidebar */
28
+ secondary-background-color: #FFC080;
29
+ }
30
+ /* Expander background color */
31
+ .streamlit-expanderContent {
32
+ background-color: #FFF8F0;
33
+ }
34
+ /* Expander header background color */
35
+ .streamlit-expanderHeader {
36
+ background-color: #FFF8F0;
37
+ }
38
+ /* Text Area background and text color */
39
+ .stTextArea textarea {
40
+ background-color: #FFDDAA; /* A light, soft orange */
41
+ color: #000000; /* Black for text */
42
+ }
43
+ /* Button background and text color */
44
+ .stButton > button {
45
+ background-color: #FFDDAA;
46
+ color: #000000;
47
+ }
48
+ /* Warning box background and text color */
49
+ .stAlert.st-warning {
50
+ background-color: #FFBB88; /* A slightly darker orange for warnings */
51
+ color: #000000;
52
+ }
53
+ /* Success box background and text color */
54
+ .stAlert.st-success {
55
+ background-color: #FFBB88; /* A slightly darker orange for success boxes */
56
+ color: #000000;
57
+ }
58
+ </style>
59
+ """,
60
+ unsafe_allow_html=True
61
+ )
62
+
63
+
64
+
65
+
66
+
67
+
68
+
69
+ # --- Page Configuration and UI Elements ---
70
+ st.set_page_config(layout="wide", page_title="Named Entity Recognition App")
71
+ st.subheader("Multilingual", divider="orange")
72
+ st.link_button("by nlpblogs", "https://nlpblogs.com", type="tertiary")
73
+ expander = st.expander("**Important notes**")
74
+ expander.write("""**Named Entities:** This Business Core predicts twenty-six (26) labels: "Person", "Contact", "Company", "Department", "Vendor", "Client", "Office", "Warehouse", "Address", "City", "State", "Country", "Date", "Time", "Time Period", "Revenue", "Cost", "Budget", "Invoice Number", "Product", "Service", "Task", "Project", "Status", "Asset", "Transaction"
75
+ Results are presented in easy-to-read tables, visualized in an interactive tree map, pie chart and bar chart, and are available for download along with a Glossary of tags.
76
+ **How to Use:** Type or paste your text into the text area below, then press Ctrl + Enter. Click the 'Results' button to extract and tag entities in your text data.
77
+ **Usage Limits:** You can request results unlimited times for one (1) month.
78
+ **Supported Languages:** English
79
+ **Technical issues:** If your connection times out, please refresh the page or reopen the app's URL.
80
+ For any errors or inquiries, please contact us at info@nlpblogs.com""")
81
+
82
+ with st.sidebar:
83
+ st.write("Use the following code to embed the Business Core web app on your website. Feel free to adjust the width and height values to fit your page.")
84
+ code = '''
85
+ <iframe
86
+ src="https://aiecosystem-business-core.hf.space"
87
+ frameborder="0"
88
+ width="850"
89
+ height="450"
90
+ ></iframe>
91
+ '''
92
+ st.code(code, language="html")
93
+ st.text("")
94
+ st.text("")
95
+ st.divider()
96
+ st.subheader("🚀 Ready to build your own AI Web App?", divider="orange")
97
+ st.link_button("AI Web App Builder", "https://nlpblogs.com/custom-web-app-development/ ", type="primary")
98
+
99
+ # --- Comet ML Setup ---
100
+ COMET_API_KEY = os.environ.get("COMET_API_KEY")
101
+ COMET_WORKSPACE = os.environ.get("COMET_WORKSPACE")
102
+ COMET_PROJECT_NAME = os.environ.get("COMET_PROJECT_NAME")
103
+ comet_initialized = bool(COMET_API_KEY and COMET_WORKSPACE and COMET_PROJECT_NAME)
104
+
105
+ if not comet_initialized:
106
+ st.warning("Comet ML not initialized. Check environment variables.")
107
+
108
+ # --- Label Definitions ---
109
+
110
+ labels = [
111
+ "prefix", "person name",
112
+ "job_title", "company_name", "job_area",
113
+ "email_address",
114
+ "account_number", "address", "city", "postal_code",
115
+ "password", "job_type", "state",
116
+ "county",
117
+ "phone_number",
118
+ "personal identification_number",
119
+ "gender", "biological_sex",
120
+ "nationality",
121
+ "medical_condition", "age",
122
+
123
+ ]
124
+
125
+
126
+ # Create a mapping dictionary for labels to categories
127
+
128
+ category_mapping = {
129
+
130
+
131
+ "Personal Information": [
132
+ "person_name",
133
+ "prefix",
134
+ "gender",
135
+ "biological_sex",
136
+ "age"
137
+ ],
138
+ "Contact Information": [
139
+ "email_address",
140
+ "address",
141
+ "city",
142
+ "state",
143
+ "county",
144
+ "postal_code",
145
+ "phone_number"
146
+ ],
147
+ "Professional Information": [
148
+ "job_title",
149
+ "job_area",
150
+ "job_type",
151
+ "company_name"
152
+ ],
153
+ "Sensitive Information": [
154
+ "password",
155
+ "personal_identification_number",
156
+
157
+ "nationality",
158
+
159
+ "medical_condition",
160
+ "account_number"
161
+ ]
162
+ }
163
+
164
+
165
+
166
+
167
+
168
+
169
+ # --- Model Loading ---
170
+ @st.cache_resource
171
+ def load_ner_model():
172
+ """Loads the GLiNER model and caches it."""
173
+ try:
174
+ return GLiNER.from_pretrained("thegenerativegeneration/gliner-finetune-small-v2.5", nested_ner=True, num_gen_sequences=2, gen_constraints= labels)
175
+ except Exception as e:
176
+ st.error(f"Failed to load NER model. Please check your internet connection or model availability: {e}")
177
+ st.stop()
178
+ model = load_ner_model()
179
+
180
+ # Flatten the mapping to a single dictionary
181
+ reverse_category_mapping = {label: category for category, label_list in category_mapping.items() for label in label_list}
182
+
183
+ # --- Text Input and Clear Button ---
184
+ text = st.text_area("Type or paste your text below, and then press Ctrl + Enter", height=250, key='my_text_area')
185
+
186
+ def clear_text():
187
+ """Clears the text area."""
188
+ st.session_state['my_text_area'] = ""
189
+
190
+ st.button("Clear text", on_click=clear_text)
191
+
192
+
193
+ # --- Results Section ---
194
+ if st.button("Results"):
195
+ start_time = time.time()
196
+ if not text.strip():
197
+ st.warning("Please enter some text to extract entities.")
198
+ else:
199
+ with st.spinner("Extracting entities...", show_time=True):
200
+ entities = model.predict_entities(text, labels)
201
+ df = pd.DataFrame(entities)
202
+
203
+ if not df.empty:
204
+ df['category'] = df['label'].map(reverse_category_mapping)
205
+ if comet_initialized:
206
+ experiment = Experiment(
207
+ api_key=COMET_API_KEY,
208
+ workspace=COMET_WORKSPACE,
209
+ project_name=COMET_PROJECT_NAME,
210
+ )
211
+ experiment.log_parameter("input_text", text)
212
+ experiment.log_table("predicted_entities", df)
213
+
214
+ st.subheader("Grouped Entities by Category", divider = "orange")
215
+
216
+ # Create tabs for each category
217
+ category_names = sorted(list(category_mapping.keys()))
218
+ category_tabs = st.tabs(category_names)
219
+
220
+ for i, category_name in enumerate(category_names):
221
+ with category_tabs[i]:
222
+ df_category_filtered = df[df['category'] == category_name]
223
+ if not df_category_filtered.empty:
224
+ st.dataframe(df_category_filtered.drop(columns=['category']), use_container_width=True)
225
+ else:
226
+ st.info(f"No entities found for the '{category_name}' category.")
227
+
228
+
229
 
230
+ with st.expander("See Glossary of tags"):
231
+ st.write('''
232
+ - **text**: ['entity extracted from your text data']
233
+ - **score**: ['accuracy score; how accurately a tag has been assigned to a given entity']
234
+ - **label**: ['label (tag) assigned to a given extracted entity']
235
+ - **category**: ['the high-level category for the label']
236
+ - **start**: ['index of the start of the corresponding entity']
237
+ - **end**: ['index of the end of the corresponding entity']
238
+ ''')
239
+ st.divider()
240
+
241
+ # Tree map
242
+ st.subheader("Tree map", divider = "orange")
243
+ fig_treemap = px.treemap(df, path=[px.Constant("all"), 'category', 'label', 'text'], values='score', color='category')
244
+ fig_treemap.update_layout(margin=dict(t=50, l=25, r=25, b=25), paper_bgcolor='#FFF8F0', plot_bgcolor='#FFF8F0')
245
+ st.plotly_chart(fig_treemap)
246
+
247
+ # Pie and Bar charts
248
+ grouped_counts = df['category'].value_counts().reset_index()
249
+ grouped_counts.columns = ['category', 'count']
250
+ col1, col2 = st.columns(2)
251
+
252
+ with col1:
253
+ st.subheader("Pie chart", divider = "orange")
254
+ fig_pie = px.pie(grouped_counts, values='count', names='category', hover_data=['count'], labels={'count': 'count'}, title='Percentage of predicted categories')
255
+ fig_pie.update_traces(textposition='inside', textinfo='percent+label')
256
+ fig_pie.update_layout(
257
+ paper_bgcolor='#FFF8F0',
258
+ plot_bgcolor='#FFF8F0'
259
+ )
260
+ st.plotly_chart(fig_pie)
261
+
262
+ with col2:
263
+ st.subheader("Bar chart", divider = "orange")
264
+ fig_bar = px.bar(grouped_counts, x="count", y="category", color="category", text_auto=True, title='Occurrences of predicted categories')
265
+ fig_bar.update_layout(
266
+ paper_bgcolor='#FFF8F0',
267
+ plot_bgcolor='#FFF8F0'
268
+ )
269
+ st.plotly_chart(fig_bar)
270
+
271
+ # Most Frequent Entities
272
+ st.subheader("Most Frequent Entities", divider="orange")
273
+ word_counts = df['text'].value_counts().reset_index()
274
+ word_counts.columns = ['Entity', 'Count']
275
+ repeating_entities = word_counts[word_counts['Count'] > 1]
276
+ if not repeating_entities.empty:
277
+ st.dataframe(repeating_entities, use_container_width=True)
278
+ fig_repeating_bar = px.bar(repeating_entities, x='Entity', y='Count', color='Entity')
279
+ fig_repeating_bar.update_layout(xaxis={'categoryorder': 'total descending'},
280
+ paper_bgcolor='#FFF8F0',
281
+ plot_bgcolor='#FFF8F0')
282
+ st.plotly_chart(fig_repeating_bar)
283
+ else:
284
+ st.warning("No entities were found that occur more than once.")
285
+
286
+ # Download Section
287
+ st.divider()
288
+
289
+ dfa = pd.DataFrame(
290
+ data={
291
+ 'Column Name': ['text', 'label', 'score', 'start', 'end', 'category'],
292
+ 'Description': [
293
+ 'entity extracted from your text data',
294
+ 'label (tag) assigned to a given extracted entity',
295
+ 'accuracy score; how accurately a tag has been assigned to a given entity',
296
+ 'index of the start of the corresponding entity',
297
+ 'index of the end of the corresponding entity',
298
+ 'the broader category the entity belongs to',
299
+ ]
300
+ }
301
+ )
302
+ buf = io.BytesIO()
303
+ with zipfile.ZipFile(buf, "w") as myzip:
304
+ myzip.writestr("Summary of the results.csv", df.to_csv(index=False))
305
+ myzip.writestr("Glossary of tags.csv", dfa.to_csv(index=False))
306
+
307
+ with stylable_container(
308
+ key="download_button",
309
+ css_styles="""button { background-color: red; border: 1px solid black; padding: 5px; color: white; }""",
310
+ ):
311
+ st.download_button(
312
+ label="Download results and glossary (zip)",
313
+ data=buf.getvalue(),
314
+ file_name="nlpblogs_results.zip",
315
+ mime="application/zip",
316
+ )
317
+
318
+ if comet_initialized:
319
+ experiment.log_figure(figure=fig_treemap, figure_name="entity_treemap_categories")
320
+ experiment.end()
321
+ else: # If df is empty
322
+ st.warning("No entities were found in the provided text.")
323
+
324
+ end_time = time.time()
325
+ elapsed_time = end_time - start_time
326
+ st.text("")
327
+ st.text("")
328
+ st.info(f"Results processed in **{elapsed_time:.2f} seconds**.")