Sambit20030731 commited on
Commit
b15fa5a
·
verified ·
1 Parent(s): 87548b2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +350 -131
app.py CHANGED
@@ -1,162 +1,381 @@
1
- import gradio as gr
 
2
  import pandas as pd
 
3
  from fuzzywuzzy import fuzz
4
- import tempfile
5
-
6
-
7
- def process_csv(file):
8
- import pandas as pd
9
- # import pyodbc
10
- from fuzzywuzzy import fuzz
11
- from openpyxl import load_workbook
12
- from openpyxl.styles import PatternFill
13
-
14
- df = pd.read_excel(file)
15
-
16
- df['Address'] = df['Address (street)'].astype(str) + '-' + df['Postal code'].astype(str) + '-' + df['City'].astype(str) + '-' + df['Country'].astype(str) + df['Region'].astype(str)
17
- df['Name'] = df['Vendor Name'].astype(str)
18
-
19
- df['Name']=df['Name'].str.lower()
20
- df['Address']=df['Address'].str.lower()
21
-
22
- df.sort_values(['Name'], inplace=True)
23
- df = df.reset_index(drop=True)
24
-
25
- df['name_fuzzy_ratio']=''
26
- df['address_fuzzy_ratio']=''
27
- df['name_based_group']=''
28
- df['address_based_group']=''
29
-
30
- last_row_index = len(df)-1
31
- df.at[0,'name_fuzzy_ratio']=100
32
- df.at[0,'address_fuzzy_ratio']=100
33
- df.at[last_row_index,'name_fuzzy_ratio']=100
34
- df.at[last_row_index,'address_fuzzy_ratio']=100
35
-
36
- for i in range(1,last_row_index):
37
- current_name = df['Name'].iloc[i]
38
- previous_name = df['Name'].iloc[i-1]
39
- fuzzy_ratio = fuzz.ratio(previous_name,current_name)
40
- df.at[i,'name_fuzzy_ratio'] = fuzzy_ratio
41
-
42
- df['name_fuzzy_ratio'] = pd.to_numeric(df['name_fuzzy_ratio'], errors='coerce')
43
-
44
- group_counter = 1
45
- df.at[0,'name_based_group'] = group_counter
46
-
47
- for i in range (1, len(df)):
48
- if df.at[i,'name_fuzzy_ratio'] > 80:
49
- df.at[i,'name_based_group'] = df.at[i-1,'name_based_group']
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
50
  else:
51
- group_counter += 1
52
- df.at[i,'name_based_group'] = group_counter
53
-
54
- group = df.at[0,'name_based_group']
55
-
56
- df.sort_values(['name_based_group','Address'], inplace=True)
57
- df = df.reset_index(drop=True)
58
-
59
- for i in range(1,last_row_index):
60
- current_address = df['Address'].iloc[i]
61
- previous_address = df['Address'].iloc[i-1]
62
- fuzzy_ratio = fuzz.ratio(previous_address, current_address)
63
- df.at[i,'address_fuzzy_ratio'] = fuzzy_ratio
64
-
65
- df['address_fuzzy_ratio'] = pd.to_numeric(df['address_fuzzy_ratio'], errors='coerce')
66
 
67
- address_group_counter = 1
68
- df.at[0,'address_based_group'] = str(address_group_counter)
69
-
70
- for i in range(1,len(df)):
71
- if df.at[i,'address_fuzzy_ratio'] > 70:
72
- df.at[i,'address_based_group'] = df.at[i-1, 'address_based_group']
73
  else:
74
- if df.at[i,'name_based_group'] != group:
75
- address_group_counter = 1
76
- group = df.at[i,'name_based_group']
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
77
  else:
78
- address_group_counter +=1
79
- df.at[i,'address_based_group'] = str(address_group_counter)
80
-
81
-
82
- #Concatenate for unique group name
83
- df['Group'] = df.apply(lambda row: 'Group_{}_{}'.format(row['name_based_group'], row['address_based_group']), axis = 1)
84
-
85
- columns_to_drop = ['name_fuzzy_ratio','address_fuzzy_ratio','Address','Name']
86
- df.drop(columns=columns_to_drop, inplace=True)
87
-
88
- # Check for duplicates in 'Group' column
89
- duplicate_groups = df['Group'].duplicated(keep=False)
90
-
91
- # Create 'Remarks' column and mark duplicates as 'Duplicate' and unique as 'Unique'
92
- df['Remarks'] = ['Duplicate' if is_duplicate else 'Unique' for is_duplicate in duplicate_groups]
93
-
94
- print(df)
95
-
96
- # excel_writer = pd.ExcelWriter("C://Users//snigd//Downloads//output_vendor_2.xlsx", engine='openpyxl')
97
- # df.to_excel(excel_writer, index=False, sheet_name='Sheet1')
98
-
99
- # # Access the workbook
100
- # workbook = excel_writer.book
101
- # worksheet = workbook['Sheet1']
102
-
103
- # # Apply row coloring based on the value in the 'Remarks' column
104
- # duplicate_fill = PatternFill(start_color="FFFF00", end_color="FFFF00", fill_type="solid")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
105
 
106
- # for idx, row in df.iterrows():
107
- # if row['Remarks'] == 'Duplicate':
108
- # for cell in worksheet[idx + 2]:
109
- # cell.fill = duplicate_fill
110
 
111
- # # Save the changes
112
- # excel_writer.save()
113
 
114
- # print("Excel file saved successfully.")
 
115
 
116
- # # Printing the resulting DataFrame
 
 
 
 
117
 
118
- # # Saving the processed DataFrame to a new CSV file
119
 
120
- # # Create SQLAlchemy engine to connect to SQL Server
121
- # # This part is commented out, as it seems to be not used
122
 
123
- # # Prepare the DataFrame for download as a CSV string
124
- # with tempfile.NamedTemporaryFile(prefix="Outputs", suffix=".csv", delete=False) as temp_file:
125
- # df.to_excel(temp_file.name, index=False)
126
- # return temp_file.name
127
 
128
- with tempfile.NamedTemporaryFile(prefix="Outputs", suffix=".xlsx", delete=False) as temp_file:
129
- df.to_excel(temp_file.name, index=False)
130
 
131
- excel_writer = pd.ExcelWriter(temp_file.name, engine='openpyxl')
132
- df.to_excel(excel_writer, index=False, sheet_name='Sheet1')
 
 
133
 
134
  # Access the workbook
135
- workbook = excel_writer.book
136
- worksheet = workbook['Sheet1']
137
 
138
  # Apply row coloring based on the value in the 'Remarks' column
139
- duplicate_fill = PatternFill(start_color="FFFF00", end_color="FFFF00", fill_type="solid")
 
 
 
 
 
 
 
 
 
 
 
140
 
141
- for idx, row in df.iterrows():
142
- if row['Remarks'] == 'Duplicate':
143
- for cell in worksheet[idx + 2]:
144
- cell.fill = duplicate_fill
145
 
146
  # Save the changes
147
- excel_writer.close()
148
 
149
- print("Excel file saved successfully.")
150
 
151
- return temp_file.name
152
 
153
 
154
  interface = gr.Interface(
155
  fn=process_csv,
156
- inputs=gr.File(label="Upload XLSX File", file_count="single"),
 
 
 
 
 
 
 
157
  outputs=gr.File(label="Download File"),
158
- title="Vendor Master De-Duplication Tool",
159
- description="Upload a XLSX file and download a new one with duplicates removed."
160
  )
161
 
162
- interface.launch(share=True)
 
1
+ import pathlib
2
+ import textwrap
3
  import pandas as pd
4
+ import numpy as np
5
  from fuzzywuzzy import fuzz
6
+ from openpyxl import load_workbook
7
+ from openpyxl.styles import PatternFill
8
+ import google.generativeai as genai
9
+ from IPython.display import display
10
+ from IPython.display import Markdown
11
+ from openpyxl.styles.alignment import Alignment
12
+ from google.colab import userdata
13
+ GOOGLE_API_KEY='AIzaSyCtACPu9EOnEa1_iAWsv_u__PQRpaCT564'
14
+ genai.configure(api_key=GOOGLE_API_KEY)
15
+ model = genai.GenerativeModel('gemini-1.0-pro')
16
+ def to_markdown(text):
17
+ text = text.replace('•', ' *')
18
+ return Markdown(textwrap.indent(text, '> ', predicate=lambda _: True))
19
+ # Function to apply to df1 to create the cont_person_name column
20
+ def process_fuzzy_ratios(rows_dict):
21
+ fuzz_data = {}
22
+ for key, row in enumerate(rows_dict):
23
+ if key == 0:
24
+ # For the first row, delete specified columns
25
+ del row["address_fuzzy_ratio"]
26
+ del row["bank_fuzzy_ratio"]
27
+ del row["name_fuzzy_ratio"]
28
+ del row["accgrp_fuzzy_ratio"]
29
+ del row["tax_fuzzy_ratio"]
30
+ del row["postal_fuzzy_ratio"]
31
+ else:
32
+ # For subsequent rows, store data in fuzz_data dictionary
33
+ fuzz_data["row_" + str(key + 1)] = {
34
+ "address_fuzzy_ratio": row.pop("address_fuzzy_ratio"),
35
+ "bank_fuzzy_ratio": row.pop("bank_fuzzy_ratio"),
36
+ "name_fuzzy_ratio": row.pop("name_fuzzy_ratio"),
37
+ "accgrp_fuzzy_ratio": row.pop("accgrp_fuzzy_ratio"),
38
+ "tax_fuzzy_ratio": row.pop("tax_fuzzy_ratio"),
39
+ "postal_fuzzy_ratio": row.pop("postal_fuzzy_ratio")
40
+ }
41
+ return fuzz_data, rows_dict
42
+ def gemini_analysis(dataframe):
43
+ prev_row_duplicate = False
44
+ prev_row_number = None
45
+
46
+ for index, row in dataframe.iterrows():
47
+ if row['Remarks'] == 'Duplicate':
48
+ if prev_row_duplicate:
49
+ duplicate_pairs=[]
50
+ row1 = dataframe.loc[index-1].to_dict()
51
+ row2 = row.to_dict()
52
+ duplicate_pairs.append(row1)
53
+ duplicate_pairs.append(row2)
54
+ fuzzy_ratios, duplicate_pairs = process_fuzzy_ratios(duplicate_pairs)
55
+ for dictionary in duplicate_pairs:
56
+ for _ in range(12):
57
+ if dictionary:
58
+ dictionary.popitem()
59
+ main_data_str = "[{}]".format(', '.join([str(d) for d in duplicate_pairs]))
60
+ fuzzy_data_str = "{}".format(fuzzy_ratios)
61
+ qs="I have the data",main_data_str,"The corresponding fuzzy ratios are here: ",fuzzy_data_str,"Give a concise explanation why these two rows are duplicate based on analyzing the main data and explaining which column values are same and which column values are different?"
62
+ try:
63
+ response = model.generate_content(qs)
64
+ dataframe.at[index-1, 'Explanation'] = response.text
65
+ except requests.HTTPError as e:
66
+ print(f"Error fetching Gemini response': {e}")
67
+ except ValueError as ve:
68
+ print(f"ValueError occurred: {ve}")
69
+ except Exception as ex:
70
+ print(f"An error occurred: {ex}")
71
+ dataframe.at[index-1, 'Explanation'] = response.text
72
+ prev_row_duplicate = True
73
+ prev_row_number = index
74
+ else:
75
+ prev_row_duplicate = False
76
+ prev_row_number = None
77
+
78
+ def process_csv(file, remove_null_columns):
79
+ sheet_name1 = 'General Data '
80
+ sheet_name2 = 'Contact Person'
81
+ df = pd.read_excel(file, sheet_name=sheet_name1,engine='openpyxl')
82
+ # Replace null values with a blank space
83
+ df=df.fillna(" ")
84
+ df1 = pd.read_excel(file, sheet_name=sheet_name2)
85
+ # Replace null values with a blank space
86
+ df1 = df1.fillna(" ")
87
+ # Creating new columns by concatenating original columns
88
+ df['Address'] = df['STREET'].astype(str) +'-'+ df['CITY1'].astype(str) +'-'+ df['COUNTRY'].astype(str) + '-' + df['REGION'].astype(str)
89
+ df['Name'] = df['NAMEFIRST'].astype(str)+'-'+ df['NAMELAST'].astype(str) +'-'+ df['NAME3'].astype(str) + '-' + df['NAME4'].astype(str)
90
+ df['Bank'] = df['BANKL'].astype(str)+'-'+df['BANKN'].astype(str)
91
+ df['Tax'] = df['TAXTYPE'].astype(str)+'-'+df['TAXNUM'].astype(str)
92
+ df1['cont_person_name'] = df1['PARNR'].astype(str)+'-'+ df1['VNAME'].astype(str) +'-'+ df1['LNAME'].astype(str)
93
+ df1['cont_person_address'] = df1['COUNTRY'].astype(str) +'-'+ df1['REGION'].astype(str) +'-'+ df1['POSTLCD'].astype(str) +'-'+ df1['CITY'].astype(str) + '-' + df1['STREET'].astype(str)
94
+
95
+ # Converting all concatenated columns to lowercase
96
+ df['Name']=df['Name'].str.lower()
97
+ df['Address']=df['Address'].str.lower()
98
+ df['Bank']=df['Bank'].str.lower()
99
+ df['Tax']=df['Tax'].str.lower()
100
+ df1['cont_person_name']=df1['cont_person_name'].str.lower()
101
+ df1['cont_person_address']=df1['cont_person_address'].str.lower()
102
+ #Adding contact_person_name and address to sheet1(General Data)
103
+
104
+ # Grouping names in df2 based on LIFNR (ID)
105
+ grouped_names = df1.groupby("LIFNR")["cont_person_name"].agg(lambda x: ', '.join(x)).reset_index()
106
+
107
+ # Create a dictionary mapping LIFNR to concatenated names
108
+ name_map = dict(zip(grouped_names["LIFNR"], grouped_names["cont_person_name"]))
109
+ def create_cont_person_name(row):
110
+ if row["LIFNR"] in name_map:
111
+ return name_map[row["LIFNR"]]
112
  else:
113
+ return ""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
114
 
115
+ grouped_names = df1.groupby("LIFNR")["cont_person_address"].agg(lambda x: ', '.join(x)).reset_index()
116
+ add_map = dict(zip(grouped_names["LIFNR"], grouped_names["cont_person_address"]))
117
+ def create_cont_person_add(row):
118
+ if row["LIFNR"] in add_map:
119
+ return add_map[row["LIFNR"]]
 
120
  else:
121
+ return ""
122
+
123
+ # Apply the function to create the cont_person_name column
124
+ df["cont_person_name"] = df.apply(create_cont_person_name, axis=1)
125
+ df["cont_person_address"] = df.apply(create_cont_person_add, axis=1)
126
+ df['name_fuzzy_ratio']=''
127
+ df['accgrp_fuzzy_ratio']=''
128
+ df['address_fuzzy_ratio']=''
129
+ df['bank_fuzzy_ratio']=''
130
+ df['tax_fuzzy_ratio']=''
131
+ df['postal_fuzzy_ratio']=''
132
+ df1['cont_person_name_fuzzy_ratio']=''
133
+ df1['cont_person_address_fuzzy_ratio']=''
134
+
135
+ df['name_based_group']=''
136
+ df['accgrp_based_group']=''
137
+ df['address_based_group']=''
138
+ df['bank_based_group']=''
139
+ df['tax_based_group']=''
140
+ df['postal_based_group']=''
141
+ df1['cont_person_name_based_group']=''
142
+ df1['cont_person_address_based_group']=''
143
+
144
+ last_row_index = len(df)-1
145
+ last_row_index1 = len(df1)-1
146
+
147
+ df.sort_values(['Tax'], inplace=True)
148
+ df = df.reset_index(drop=True)
149
+ df.at[0,'tax_fuzzy_ratio']=100
150
+ df.at[last_row_index,'tax_fuzzy_ratio']=100
151
+ for i in range(1,last_row_index):
152
+ current_tax = df['Tax'].iloc[i]
153
+ previous_tax = df['Tax'].iloc[i-1]
154
+ fuzzy_ratio = fuzz.ratio(previous_tax,current_tax)
155
+ df.at[i,'tax_fuzzy_ratio'] = fuzzy_ratio
156
+
157
+ df['tax_fuzzy_ratio'] = pd.to_numeric(df['tax_fuzzy_ratio'], errors='coerce')
158
+
159
+ group_counter = 1
160
+ df.at[0,'tax_based_group'] = group_counter
161
+
162
+ for i in range (1, len(df)):
163
+ if df.at[i,'tax_fuzzy_ratio'] > 90:
164
+ df.at[i,'tax_based_group'] = df.at[i-1,'tax_based_group']
165
  else:
166
+ group_counter += 1
167
+ df.at[i,'tax_based_group'] = group_counter
168
+ group = df.at[0,'tax_based_group']
169
+
170
+ df.sort_values(['tax_based_group','Bank'], inplace=True)
171
+ df = df.reset_index(drop=True)
172
+ df.at[0,'bank_fuzzy_ratio']=100
173
+ df.at[last_row_index,'bank_fuzzy_ratio']=100
174
+ for i in range(1,last_row_index):
175
+ current_address = df['Bank'].iloc[i]
176
+ previous_address = df['Bank'].iloc[i-1]
177
+ fuzzy_ratio = fuzz.ratio(previous_address, current_address)
178
+ df.at[i,'bank_fuzzy_ratio'] = fuzzy_ratio
179
+
180
+ df['bank_fuzzy_ratio'] = pd.to_numeric(df['bank_fuzzy_ratio'], errors='coerce')
181
+
182
+ address_group_counter = 1
183
+ df.at[0,'bank_based_group'] = str(address_group_counter)
184
+
185
+ for i in range(1,len(df)):
186
+ if df.at[i,'bank_fuzzy_ratio'] >= 100:
187
+ df.at[i,'bank_based_group'] = df.at[i-1, 'bank_based_group']
188
+ else:
189
+ if df.at[i,'tax_based_group'] != group:
190
+ address_group_counter = 1
191
+ group = df.at[i,'tax_based_group']
192
+ else:
193
+ address_group_counter +=1
194
+ df.at[i,'bank_based_group'] = str(address_group_counter)
195
+ df['Group_tax_bank'] = df.apply(lambda row: '{}_{}'.format(row['tax_based_group'], row['bank_based_group']), axis = 1)
196
+ group = df.at[0,'Group_tax_bank']
197
+
198
+ df.sort_values(['Group_tax_bank','Address'], inplace=True)
199
+ df = df.reset_index(drop=True)
200
+ df.at[0,'address_fuzzy_ratio']=100
201
+ df.at[last_row_index,'address_fuzzy_ratio']=100
202
+ for i in range(1,last_row_index):
203
+ current_address = df['Address'].iloc[i]
204
+ previous_address = df['Address'].iloc[i-1]
205
+ fuzzy_ratio = fuzz.ratio(previous_address, current_address)
206
+ df.at[i,'address_fuzzy_ratio'] = fuzzy_ratio
207
+
208
+ df['address_fuzzy_ratio'] = pd.to_numeric(df['address_fuzzy_ratio'], errors='coerce')
209
+
210
+ address_group_counter = 1
211
+ df.at[0,'address_based_group'] = str(address_group_counter)
212
+
213
+ for i in range(1,len(df)):
214
+ if df.at[i,'address_fuzzy_ratio'] > 70:
215
+ df.at[i,'address_based_group'] = df.at[i-1, 'address_based_group']
216
+ else:
217
+ if df.at[i,'Group_tax_bank'] != group:
218
+ address_group_counter = 1
219
+ group = df.at[i,'Group_tax_bank']
220
+ else:
221
+ address_group_counter +=1
222
+ df.at[i,'address_based_group'] = str(address_group_counter)
223
+ df['Group_tax_bank_add'] = df.apply(lambda row: '{}_{}'.format(row['Group_tax_bank'], row['address_based_group']), axis = 1)
224
+ group = df.at[0,'Group_tax_bank_add']
225
+
226
+ df.sort_values(['Group_tax_bank_add','Name'], inplace=True)
227
+ df = df.reset_index(drop=True)
228
+ df.at[0,'name_fuzzy_ratio']=100
229
+ df.at[last_row_index,'name_fuzzy_ratio']=100
230
+ for i in range(1,last_row_index):
231
+ current_address = df['Name'].iloc[i]
232
+ previous_address = df['Name'].iloc[i-1]
233
+ fuzzy_ratio = fuzz.ratio(previous_address, current_address)
234
+ df.at[i,'name_fuzzy_ratio'] = fuzzy_ratio
235
+
236
+ df['name_fuzzy_ratio'] = pd.to_numeric(df['name_fuzzy_ratio'], errors='coerce')
237
+
238
+ address_group_counter = 1
239
+ df.at[0,'name_based_group'] = str(address_group_counter)
240
+
241
+ for i in range(1,len(df)):
242
+ if df.at[i,'name_fuzzy_ratio'] > 80:
243
+ df.at[i,'name_based_group'] = df.at[i-1, 'name_based_group']
244
+ else:
245
+ if df.at[i,'Group_tax_bank_add'] != group:
246
+ address_group_counter = 1
247
+ group = df.at[i,'Group_tax_bank_add']
248
+ else:
249
+ address_group_counter +=1
250
+ df.at[i,'name_based_group'] = str(address_group_counter)
251
+ df['Group_tax_bank_add_name'] = df.apply(lambda row: '{}_{}'.format(row['Group_tax_bank_add'], row['name_based_group']), axis = 1)
252
+ group = df.at[0,'Group_tax_bank_add_name']
253
+
254
+ df.sort_values(['Group_tax_bank_add_name','POSTCODE1'], inplace=True)
255
+ df = df.reset_index(drop=True)
256
+ df.at[0,'postal_fuzzy_ratio']=100
257
+ df.at[last_row_index,'postal_fuzzy_ratio']=100
258
+ for i in range(1,last_row_index):
259
+ current_address = df['POSTCODE1'].iloc[i]
260
+ previous_address = df['POSTCODE1'].iloc[i-1]
261
+ fuzzy_ratio = fuzz.ratio(previous_address, current_address)
262
+ df.at[i,'postal_fuzzy_ratio'] = fuzzy_ratio
263
+
264
+ df['postal_fuzzy_ratio'] = pd.to_numeric(df['postal_fuzzy_ratio'], errors='coerce')
265
+
266
+ address_group_counter = 1
267
+ df.at[0,'postal_based_group'] = str(address_group_counter)
268
+
269
+ for i in range(1,len(df)):
270
+ if df.at[i,'postal_fuzzy_ratio'] > 90:
271
+ df.at[i,'postal_based_group'] = df.at[i-1, 'postal_based_group']
272
+ else:
273
+ if df.at[i,'Group_tax_bank_add_name'] != group:
274
+ address_group_counter = 1
275
+ group = df.at[i,'Group_tax_bank_add_name']
276
+ else:
277
+ address_group_counter +=1
278
+ df.at[i,'postal_based_group'] = str(address_group_counter)
279
+ df['Group_tax_bank_add_name_post'] = df.apply(lambda row: '{}_{}'.format(row['Group_tax_bank_add_name'], row['postal_based_group']), axis = 1)
280
+ group = df.at[0,'Group_tax_bank_add_name_post']
281
+
282
+ df.sort_values(['Group_tax_bank_add_name_post','KTOKK'], inplace=True)
283
+ df = df.reset_index(drop=True)
284
+ df.at[0,'accgrp_fuzzy_ratio']=100
285
+ df.at[last_row_index,'accgrp_fuzzy_ratio']=100
286
+ for i in range(1,last_row_index):
287
+ current_address = df['KTOKK'].iloc[i]
288
+ previous_address = df['KTOKK'].iloc[i-1]
289
+ fuzzy_ratio = fuzz.ratio(previous_address, current_address)
290
+ df.at[i,'accgrp_fuzzy_ratio'] = fuzzy_ratio
291
+
292
+ df['accgrp_fuzzy_ratio'] = pd.to_numeric(df['accgrp_fuzzy_ratio'], errors='coerce')
293
+
294
+ address_group_counter = 1
295
+ df.at[0,'accgrp_based_group'] = str(address_group_counter)
296
+
297
+ for i in range(1,len(df)):
298
+ if df.at[i,'accgrp_fuzzy_ratio'] >=100:
299
+ df.at[i,'accgrp_based_group'] = df.at[i-1, 'accgrp_based_group']
300
+ else:
301
+ if df.at[i,'Group_tax_bank_add_name_post'] != group:
302
+ address_group_counter = 1
303
+ group = df.at[i,'Group_tax_bank_add_name_post']
304
+ else:
305
+ address_group_counter +=1
306
+ df.at[i,'accgrp_based_group'] = str(address_group_counter)
307
+ df['Group_tax_bank_add_name_post_accgrp'] = df.apply(lambda row: '{}_{}'.format(row['Group_tax_bank_add_name_post'], row['accgrp_based_group']), axis = 1)
308
+ group = df.at[0,'Group_tax_bank_add_name_post_accgrp']
309
 
310
+ duplicate_groups = df['Group_tax_bank_add_name_post_accgrp'].duplicated(keep=False)
311
+ df['Remarks'] = ['Duplicate' if is_duplicate else 'Unique' for is_duplicate in duplicate_groups]
 
 
312
 
 
 
313
 
314
+ df.replace(" ", np.nan, inplace=True)
315
+ nan_percentage = df.isna().mean(axis=0)
316
 
317
+ # Filter columns with more than 70% NaN values
318
+ columns_to_drop = nan_percentage[nan_percentage > 0.7].index
319
+ if remove_null_columns=='Yes':
320
+ df.drop(columns=columns_to_drop, inplace=True)
321
+ df.replace(np.nan, " ", inplace=True)
322
 
 
323
 
324
+ # Call the function with your DataFrame
325
+ gemini_analysis(df)
326
 
327
+ columns_to_drop = ['name_fuzzy_ratio','accgrp_fuzzy_ratio','address_fuzzy_ratio','bank_fuzzy_ratio','tax_fuzzy_ratio','postal_fuzzy_ratio','name_based_group','accgrp_based_group','address_based_group','bank_based_group','tax_based_group','postal_based_group','Group_tax_bank','Group_tax_bank_add', 'Group_tax_bank_add_name', 'Group_tax_bank_add_name_post']
328
+ df = df.drop(columns=columns_to_drop, axis=1)
 
 
329
 
 
 
330
 
331
+ with tempfile.NamedTemporaryFile(prefix="Outputs", suffix=".xlsx", delete=False) as temp_file:
332
+ df.to_excel(temp_file.name, index=False)
333
+ excel_writer = pd.ExcelWriter(temp_file.name, engine='openpyxl')
334
+ df.to_excel(excel_writer, index=False, sheet_name='Sheet1')
335
 
336
  # Access the workbook
337
+ workbook = excel_writer.book
338
+ worksheet = workbook['Sheet1']
339
 
340
  # Apply row coloring based on the value in the 'Remarks' column
341
+ duplicate_fill = PatternFill(start_color="FFFF00", end_color="FFFF00", fill_type="solid")
342
+
343
+ for idx, row in df.iterrows():
344
+ if row['Remarks'] == 'Duplicate':
345
+ for cell in worksheet[idx + 2]:
346
+ cell.alignment = Alignment(wrap_text=True)
347
+ cell.fill = duplicate_fill
348
+
349
+ # Iterate over columns and set their width based on a specific calculation
350
+ for col in worksheet.columns:
351
+ col_letter = col[0].column_letter
352
+ worksheet.column_dimensions[col_letter].width = 28
353
 
354
+ # Iterate over rows and set their height based on a specific calculation
355
+ for row in worksheet.iter_rows():
356
+ worksheet.row_dimensions[row[0].row].height = 20 # Set the row height to 25 (adjust as needed)
 
357
 
358
  # Save the changes
359
+ excel_writer.close()
360
 
361
+ print("Excel file saved successfully.")
362
 
363
+ return temp_file.name
364
 
365
 
366
  interface = gr.Interface(
367
  fn=process_csv,
368
+ inputs=[
369
+ gr.File(label="Upload XLSX File", file_count="single"),
370
+ gr.Radio(
371
+ ["Yes", "No"],
372
+ label="Remove Columns?",
373
+ info="The columns with 70% or More Null Values will be removed"
374
+ )
375
+ ],
376
  outputs=gr.File(label="Download File"),
377
+ title="Vendor Master De-Duplication Tool",
378
+ description="Upload a XLSX file and choose which column to check for duplicates."
379
  )
380
 
381
+ interface.launch(share=True)