Sambit20030731's picture
Update app.py
b15fa5a verified
import pathlib
import textwrap
import pandas as pd
import numpy as np
from fuzzywuzzy import fuzz
from openpyxl import load_workbook
from openpyxl.styles import PatternFill
import google.generativeai as genai
from IPython.display import display
from IPython.display import Markdown
from openpyxl.styles.alignment import Alignment
from google.colab import userdata
GOOGLE_API_KEY='AIzaSyCtACPu9EOnEa1_iAWsv_u__PQRpaCT564'
genai.configure(api_key=GOOGLE_API_KEY)
model = genai.GenerativeModel('gemini-1.0-pro')
def to_markdown(text):
text = text.replace('•', ' *')
return Markdown(textwrap.indent(text, '> ', predicate=lambda _: True))
# Function to apply to df1 to create the cont_person_name column
def process_fuzzy_ratios(rows_dict):
fuzz_data = {}
for key, row in enumerate(rows_dict):
if key == 0:
# For the first row, delete specified columns
del row["address_fuzzy_ratio"]
del row["bank_fuzzy_ratio"]
del row["name_fuzzy_ratio"]
del row["accgrp_fuzzy_ratio"]
del row["tax_fuzzy_ratio"]
del row["postal_fuzzy_ratio"]
else:
# For subsequent rows, store data in fuzz_data dictionary
fuzz_data["row_" + str(key + 1)] = {
"address_fuzzy_ratio": row.pop("address_fuzzy_ratio"),
"bank_fuzzy_ratio": row.pop("bank_fuzzy_ratio"),
"name_fuzzy_ratio": row.pop("name_fuzzy_ratio"),
"accgrp_fuzzy_ratio": row.pop("accgrp_fuzzy_ratio"),
"tax_fuzzy_ratio": row.pop("tax_fuzzy_ratio"),
"postal_fuzzy_ratio": row.pop("postal_fuzzy_ratio")
}
return fuzz_data, rows_dict
def gemini_analysis(dataframe):
prev_row_duplicate = False
prev_row_number = None
for index, row in dataframe.iterrows():
if row['Remarks'] == 'Duplicate':
if prev_row_duplicate:
duplicate_pairs=[]
row1 = dataframe.loc[index-1].to_dict()
row2 = row.to_dict()
duplicate_pairs.append(row1)
duplicate_pairs.append(row2)
fuzzy_ratios, duplicate_pairs = process_fuzzy_ratios(duplicate_pairs)
for dictionary in duplicate_pairs:
for _ in range(12):
if dictionary:
dictionary.popitem()
main_data_str = "[{}]".format(', '.join([str(d) for d in duplicate_pairs]))
fuzzy_data_str = "{}".format(fuzzy_ratios)
qs="I have the data",main_data_str,"The corresponding fuzzy ratios are here: ",fuzzy_data_str,"Give a concise explanation why these two rows are duplicate based on analyzing the main data and explaining which column values are same and which column values are different?"
try:
response = model.generate_content(qs)
dataframe.at[index-1, 'Explanation'] = response.text
except requests.HTTPError as e:
print(f"Error fetching Gemini response': {e}")
except ValueError as ve:
print(f"ValueError occurred: {ve}")
except Exception as ex:
print(f"An error occurred: {ex}")
dataframe.at[index-1, 'Explanation'] = response.text
prev_row_duplicate = True
prev_row_number = index
else:
prev_row_duplicate = False
prev_row_number = None
def process_csv(file, remove_null_columns):
sheet_name1 = 'General Data '
sheet_name2 = 'Contact Person'
df = pd.read_excel(file, sheet_name=sheet_name1,engine='openpyxl')
# Replace null values with a blank space
df=df.fillna(" ")
df1 = pd.read_excel(file, sheet_name=sheet_name2)
# Replace null values with a blank space
df1 = df1.fillna(" ")
# Creating new columns by concatenating original columns
df['Address'] = df['STREET'].astype(str) +'-'+ df['CITY1'].astype(str) +'-'+ df['COUNTRY'].astype(str) + '-' + df['REGION'].astype(str)
df['Name'] = df['NAMEFIRST'].astype(str)+'-'+ df['NAMELAST'].astype(str) +'-'+ df['NAME3'].astype(str) + '-' + df['NAME4'].astype(str)
df['Bank'] = df['BANKL'].astype(str)+'-'+df['BANKN'].astype(str)
df['Tax'] = df['TAXTYPE'].astype(str)+'-'+df['TAXNUM'].astype(str)
df1['cont_person_name'] = df1['PARNR'].astype(str)+'-'+ df1['VNAME'].astype(str) +'-'+ df1['LNAME'].astype(str)
df1['cont_person_address'] = df1['COUNTRY'].astype(str) +'-'+ df1['REGION'].astype(str) +'-'+ df1['POSTLCD'].astype(str) +'-'+ df1['CITY'].astype(str) + '-' + df1['STREET'].astype(str)
# Converting all concatenated columns to lowercase
df['Name']=df['Name'].str.lower()
df['Address']=df['Address'].str.lower()
df['Bank']=df['Bank'].str.lower()
df['Tax']=df['Tax'].str.lower()
df1['cont_person_name']=df1['cont_person_name'].str.lower()
df1['cont_person_address']=df1['cont_person_address'].str.lower()
#Adding contact_person_name and address to sheet1(General Data)
# Grouping names in df2 based on LIFNR (ID)
grouped_names = df1.groupby("LIFNR")["cont_person_name"].agg(lambda x: ', '.join(x)).reset_index()
# Create a dictionary mapping LIFNR to concatenated names
name_map = dict(zip(grouped_names["LIFNR"], grouped_names["cont_person_name"]))
def create_cont_person_name(row):
if row["LIFNR"] in name_map:
return name_map[row["LIFNR"]]
else:
return ""
grouped_names = df1.groupby("LIFNR")["cont_person_address"].agg(lambda x: ', '.join(x)).reset_index()
add_map = dict(zip(grouped_names["LIFNR"], grouped_names["cont_person_address"]))
def create_cont_person_add(row):
if row["LIFNR"] in add_map:
return add_map[row["LIFNR"]]
else:
return ""
# Apply the function to create the cont_person_name column
df["cont_person_name"] = df.apply(create_cont_person_name, axis=1)
df["cont_person_address"] = df.apply(create_cont_person_add, axis=1)
df['name_fuzzy_ratio']=''
df['accgrp_fuzzy_ratio']=''
df['address_fuzzy_ratio']=''
df['bank_fuzzy_ratio']=''
df['tax_fuzzy_ratio']=''
df['postal_fuzzy_ratio']=''
df1['cont_person_name_fuzzy_ratio']=''
df1['cont_person_address_fuzzy_ratio']=''
df['name_based_group']=''
df['accgrp_based_group']=''
df['address_based_group']=''
df['bank_based_group']=''
df['tax_based_group']=''
df['postal_based_group']=''
df1['cont_person_name_based_group']=''
df1['cont_person_address_based_group']=''
last_row_index = len(df)-1
last_row_index1 = len(df1)-1
df.sort_values(['Tax'], inplace=True)
df = df.reset_index(drop=True)
df.at[0,'tax_fuzzy_ratio']=100
df.at[last_row_index,'tax_fuzzy_ratio']=100
for i in range(1,last_row_index):
current_tax = df['Tax'].iloc[i]
previous_tax = df['Tax'].iloc[i-1]
fuzzy_ratio = fuzz.ratio(previous_tax,current_tax)
df.at[i,'tax_fuzzy_ratio'] = fuzzy_ratio
df['tax_fuzzy_ratio'] = pd.to_numeric(df['tax_fuzzy_ratio'], errors='coerce')
group_counter = 1
df.at[0,'tax_based_group'] = group_counter
for i in range (1, len(df)):
if df.at[i,'tax_fuzzy_ratio'] > 90:
df.at[i,'tax_based_group'] = df.at[i-1,'tax_based_group']
else:
group_counter += 1
df.at[i,'tax_based_group'] = group_counter
group = df.at[0,'tax_based_group']
df.sort_values(['tax_based_group','Bank'], inplace=True)
df = df.reset_index(drop=True)
df.at[0,'bank_fuzzy_ratio']=100
df.at[last_row_index,'bank_fuzzy_ratio']=100
for i in range(1,last_row_index):
current_address = df['Bank'].iloc[i]
previous_address = df['Bank'].iloc[i-1]
fuzzy_ratio = fuzz.ratio(previous_address, current_address)
df.at[i,'bank_fuzzy_ratio'] = fuzzy_ratio
df['bank_fuzzy_ratio'] = pd.to_numeric(df['bank_fuzzy_ratio'], errors='coerce')
address_group_counter = 1
df.at[0,'bank_based_group'] = str(address_group_counter)
for i in range(1,len(df)):
if df.at[i,'bank_fuzzy_ratio'] >= 100:
df.at[i,'bank_based_group'] = df.at[i-1, 'bank_based_group']
else:
if df.at[i,'tax_based_group'] != group:
address_group_counter = 1
group = df.at[i,'tax_based_group']
else:
address_group_counter +=1
df.at[i,'bank_based_group'] = str(address_group_counter)
df['Group_tax_bank'] = df.apply(lambda row: '{}_{}'.format(row['tax_based_group'], row['bank_based_group']), axis = 1)
group = df.at[0,'Group_tax_bank']
df.sort_values(['Group_tax_bank','Address'], inplace=True)
df = df.reset_index(drop=True)
df.at[0,'address_fuzzy_ratio']=100
df.at[last_row_index,'address_fuzzy_ratio']=100
for i in range(1,last_row_index):
current_address = df['Address'].iloc[i]
previous_address = df['Address'].iloc[i-1]
fuzzy_ratio = fuzz.ratio(previous_address, current_address)
df.at[i,'address_fuzzy_ratio'] = fuzzy_ratio
df['address_fuzzy_ratio'] = pd.to_numeric(df['address_fuzzy_ratio'], errors='coerce')
address_group_counter = 1
df.at[0,'address_based_group'] = str(address_group_counter)
for i in range(1,len(df)):
if df.at[i,'address_fuzzy_ratio'] > 70:
df.at[i,'address_based_group'] = df.at[i-1, 'address_based_group']
else:
if df.at[i,'Group_tax_bank'] != group:
address_group_counter = 1
group = df.at[i,'Group_tax_bank']
else:
address_group_counter +=1
df.at[i,'address_based_group'] = str(address_group_counter)
df['Group_tax_bank_add'] = df.apply(lambda row: '{}_{}'.format(row['Group_tax_bank'], row['address_based_group']), axis = 1)
group = df.at[0,'Group_tax_bank_add']
df.sort_values(['Group_tax_bank_add','Name'], inplace=True)
df = df.reset_index(drop=True)
df.at[0,'name_fuzzy_ratio']=100
df.at[last_row_index,'name_fuzzy_ratio']=100
for i in range(1,last_row_index):
current_address = df['Name'].iloc[i]
previous_address = df['Name'].iloc[i-1]
fuzzy_ratio = fuzz.ratio(previous_address, current_address)
df.at[i,'name_fuzzy_ratio'] = fuzzy_ratio
df['name_fuzzy_ratio'] = pd.to_numeric(df['name_fuzzy_ratio'], errors='coerce')
address_group_counter = 1
df.at[0,'name_based_group'] = str(address_group_counter)
for i in range(1,len(df)):
if df.at[i,'name_fuzzy_ratio'] > 80:
df.at[i,'name_based_group'] = df.at[i-1, 'name_based_group']
else:
if df.at[i,'Group_tax_bank_add'] != group:
address_group_counter = 1
group = df.at[i,'Group_tax_bank_add']
else:
address_group_counter +=1
df.at[i,'name_based_group'] = str(address_group_counter)
df['Group_tax_bank_add_name'] = df.apply(lambda row: '{}_{}'.format(row['Group_tax_bank_add'], row['name_based_group']), axis = 1)
group = df.at[0,'Group_tax_bank_add_name']
df.sort_values(['Group_tax_bank_add_name','POSTCODE1'], inplace=True)
df = df.reset_index(drop=True)
df.at[0,'postal_fuzzy_ratio']=100
df.at[last_row_index,'postal_fuzzy_ratio']=100
for i in range(1,last_row_index):
current_address = df['POSTCODE1'].iloc[i]
previous_address = df['POSTCODE1'].iloc[i-1]
fuzzy_ratio = fuzz.ratio(previous_address, current_address)
df.at[i,'postal_fuzzy_ratio'] = fuzzy_ratio
df['postal_fuzzy_ratio'] = pd.to_numeric(df['postal_fuzzy_ratio'], errors='coerce')
address_group_counter = 1
df.at[0,'postal_based_group'] = str(address_group_counter)
for i in range(1,len(df)):
if df.at[i,'postal_fuzzy_ratio'] > 90:
df.at[i,'postal_based_group'] = df.at[i-1, 'postal_based_group']
else:
if df.at[i,'Group_tax_bank_add_name'] != group:
address_group_counter = 1
group = df.at[i,'Group_tax_bank_add_name']
else:
address_group_counter +=1
df.at[i,'postal_based_group'] = str(address_group_counter)
df['Group_tax_bank_add_name_post'] = df.apply(lambda row: '{}_{}'.format(row['Group_tax_bank_add_name'], row['postal_based_group']), axis = 1)
group = df.at[0,'Group_tax_bank_add_name_post']
df.sort_values(['Group_tax_bank_add_name_post','KTOKK'], inplace=True)
df = df.reset_index(drop=True)
df.at[0,'accgrp_fuzzy_ratio']=100
df.at[last_row_index,'accgrp_fuzzy_ratio']=100
for i in range(1,last_row_index):
current_address = df['KTOKK'].iloc[i]
previous_address = df['KTOKK'].iloc[i-1]
fuzzy_ratio = fuzz.ratio(previous_address, current_address)
df.at[i,'accgrp_fuzzy_ratio'] = fuzzy_ratio
df['accgrp_fuzzy_ratio'] = pd.to_numeric(df['accgrp_fuzzy_ratio'], errors='coerce')
address_group_counter = 1
df.at[0,'accgrp_based_group'] = str(address_group_counter)
for i in range(1,len(df)):
if df.at[i,'accgrp_fuzzy_ratio'] >=100:
df.at[i,'accgrp_based_group'] = df.at[i-1, 'accgrp_based_group']
else:
if df.at[i,'Group_tax_bank_add_name_post'] != group:
address_group_counter = 1
group = df.at[i,'Group_tax_bank_add_name_post']
else:
address_group_counter +=1
df.at[i,'accgrp_based_group'] = str(address_group_counter)
df['Group_tax_bank_add_name_post_accgrp'] = df.apply(lambda row: '{}_{}'.format(row['Group_tax_bank_add_name_post'], row['accgrp_based_group']), axis = 1)
group = df.at[0,'Group_tax_bank_add_name_post_accgrp']
duplicate_groups = df['Group_tax_bank_add_name_post_accgrp'].duplicated(keep=False)
df['Remarks'] = ['Duplicate' if is_duplicate else 'Unique' for is_duplicate in duplicate_groups]
df.replace(" ", np.nan, inplace=True)
nan_percentage = df.isna().mean(axis=0)
# Filter columns with more than 70% NaN values
columns_to_drop = nan_percentage[nan_percentage > 0.7].index
if remove_null_columns=='Yes':
df.drop(columns=columns_to_drop, inplace=True)
df.replace(np.nan, " ", inplace=True)
# Call the function with your DataFrame
gemini_analysis(df)
columns_to_drop = ['name_fuzzy_ratio','accgrp_fuzzy_ratio','address_fuzzy_ratio','bank_fuzzy_ratio','tax_fuzzy_ratio','postal_fuzzy_ratio','name_based_group','accgrp_based_group','address_based_group','bank_based_group','tax_based_group','postal_based_group','Group_tax_bank','Group_tax_bank_add', 'Group_tax_bank_add_name', 'Group_tax_bank_add_name_post']
df = df.drop(columns=columns_to_drop, axis=1)
with tempfile.NamedTemporaryFile(prefix="Outputs", suffix=".xlsx", delete=False) as temp_file:
df.to_excel(temp_file.name, index=False)
excel_writer = pd.ExcelWriter(temp_file.name, engine='openpyxl')
df.to_excel(excel_writer, index=False, sheet_name='Sheet1')
# Access the workbook
workbook = excel_writer.book
worksheet = workbook['Sheet1']
# Apply row coloring based on the value in the 'Remarks' column
duplicate_fill = PatternFill(start_color="FFFF00", end_color="FFFF00", fill_type="solid")
for idx, row in df.iterrows():
if row['Remarks'] == 'Duplicate':
for cell in worksheet[idx + 2]:
cell.alignment = Alignment(wrap_text=True)
cell.fill = duplicate_fill
# Iterate over columns and set their width based on a specific calculation
for col in worksheet.columns:
col_letter = col[0].column_letter
worksheet.column_dimensions[col_letter].width = 28
# Iterate over rows and set their height based on a specific calculation
for row in worksheet.iter_rows():
worksheet.row_dimensions[row[0].row].height = 20 # Set the row height to 25 (adjust as needed)
# Save the changes
excel_writer.close()
print("Excel file saved successfully.")
return temp_file.name
interface = gr.Interface(
fn=process_csv,
inputs=[
gr.File(label="Upload XLSX File", file_count="single"),
gr.Radio(
["Yes", "No"],
label="Remove Columns?",
info="The columns with 70% or More Null Values will be removed"
)
],
outputs=gr.File(label="Download File"),
title="Vendor Master De-Duplication Tool",
description="Upload a XLSX file and choose which column to check for duplicates."
)
interface.launch(share=True)