Spaces:
Runtime error
Runtime error
| # -*- coding: utf-8 -*- | |
| """[Uma Namboothiripad]Assignment_2.ipynb | |
| Automatically generated by Colaboratory. | |
| Original file is located at | |
| https://colab.research.google.com/drive/1_sofOjXRDnId49NOup4sdiVS1E_51T-b | |
| Load the dataset below | |
| """ | |
| !pip install -U spacy | |
| #first install the library that would help us use BERT in an easy to use interface | |
| #https://github.com/UKPLab/sentence-transformers/tree/master/sentence_transformers | |
| !pip install -U sentence-transformers | |
| """I was having issues connecting my csv file to the colab notebook, so I ended up connecting this to my drive""" | |
| import spacy | |
| from spacy.lang.en.stop_words import STOP_WORDS | |
| from string import punctuation | |
| from collections import Counter | |
| from heapq import nlargest | |
| from google.colab import drive | |
| drive.mount('/content/drive') | |
| import pandas as pd | |
| from tqdm import tqdm | |
| from sentence_transformers import SentenceTransformer, util | |
| ! pip install -q kaggle | |
| ! pip install lightgbm | |
| """Setup Kaggle json credentials""" | |
| from google.colab import files | |
| files.upload() | |
| !mkdir ~/.kaggle/ | |
| !cp kaggle.json ~/.kaggle/ | |
| !chmod 600 ~/.kaggle/kaggle.json | |
| !kaggle datasets list | |
| !kaggle datasets download -d hamzafarooq50/hotel-listings-and-reviews/HotelListInBarcelona__en2019100120191005.csv | |
| !ls | |
| !python -m spacy download en_core_web_sm | |
| !kaggle datasets download --force -d hamzafarooq50/hotel-listings-and-reviews/hotelReviewsInBarcelona__en2019100120191005.csv | |
| !ls | |
| nlp = spacy.load("en_core_web_sm") | |
| import re | |
| import nltk | |
| nltk.download('punkt') | |
| from nltk.tokenize import word_tokenize | |
| nltk.download('stopwords') | |
| from nltk.corpus import stopwords | |
| nltk.download('wordnet') | |
| nltk.download('omw-1.4') | |
| from nltk.stem import WordNetLemmatizer | |
| from nltk.stem import WordNetLemmatizer | |
| import os | |
| import spacy | |
| nlp = spacy.load("en_core_web_sm") | |
| from spacy import displacy | |
| text = """Example text""" | |
| #text = "I really hope that France does not win the World Cup and Morocco makes it to the finals" | |
| doc = nlp(text) | |
| sentence_spans = list(doc.sents) | |
| displacy.render(doc, jupyter = True, style="ent") | |
| stopwords = list(STOP_WORDS) | |
| from string import punctuation | |
| punctuation = punctuation+ '\n' | |
| import pandas as pd | |
| import scipy.spatial | |
| import pickle as pkl | |
| !pip install -U sentence-transformers | |
| from sentence_transformers import SentenceTransformer | |
| embedder = SentenceTransformer('all-MiniLM-L6-v2') | |
| #embedder = SentenceTransformer('bert-base-nli-mean-tokens') | |
| !pip install -U sentence-transformers | |
| from sentence_transformers import SentenceTransformer | |
| embedder = SentenceTransformer('all-MiniLM-L6-v2') | |
| embedder = SentenceTransformer('bert-base-nli-mean-tokens') | |
| df = pd.read_csv('/content/drive/My Drive/Colab Notebooks/Assignment#2/HotelListInBarcelona__en2019100120191005.csv',sep=",", encoding='cp1252') | |
| !kaggle datasets download --force -d hamzafarooq50/hotel-listings-and-reviews | |
| df.head() | |
| df['hotel_name'].value_counts() | |
| df['hotel_name'].drop_duplicates() | |
| df_combined = df.sort_values(['hotel_name']).groupby('hotel_name', sort=False).hotel_features.apply(''.join).reset_index(name='hotel_features') | |
| df_combined.head().T | |
| import re | |
| df_combined['hotel_features'] = df_combined['hotel_features'].apply(lambda x: re.sub('[^a-zA-z0-9\s]','',x)) | |
| def lower_case(input_str): | |
| input_str = input_str.lower() | |
| return input_str | |
| df_combined['hotel_features']= df_combined['hotel_features'].apply(lambda x: lower_case(x)) | |
| df = df_combined | |
| df_sentences = df_combined.set_index("hotel_features") | |
| df_sentences = df_sentences["hotel_name"].to_dict() | |
| df_sentences_list = list(df_sentences.keys()) | |
| len(df_sentences_list) | |
| list(df_sentences.keys())[:5] | |
| df_sentences_list = [str(d) for d in tqdm(df_sentences_list)] | |
| # Corpus with example sentences | |
| corpus = df_sentences_list | |
| corpus_embeddings = embedder.encode(corpus,show_progress_bar=True) | |
| corpus_embeddings[0] | |
| queries = ['Hotel near tourist locations and with free WIFI', | |
| ] | |
| query_embeddings = embedder.encode(queries,show_progress_bar=True) | |
| import torch | |
| # Query sentences: | |
| queries = ['Hotel at least 10 minutes away from sagrada familia' | |
| ] | |
| # Find the closest 3 sentences of the corpus for each query sentence based on cosine similarity | |
| top_k = min(3, len(corpus)) | |
| for query in queries: | |
| query_embedding = embedder.encode(query, convert_to_tensor=True) | |
| # We use cosine-similarity and torch.topk to find the highest 5 scores | |
| cos_scores = util.pytorch_cos_sim(query_embedding, corpus_embeddings)[0] | |
| top_results = torch.topk(cos_scores, k=top_k) | |
| print("\n\n======================\n\n") | |
| print("Query:", query) | |
| print("\nTop 3 most similar sentences in corpus:") | |
| for score, idx in zip(top_results[0], top_results[1]): | |
| print("(Score: {:.4f})".format(score)) | |
| print(corpus[idx], "(Score: {:.4f})".format(score)) | |
| row_dict = df.loc[df['hotel_features']== corpus[idx]] | |
| print("paper_id: " , row_dict['hotel_name'] , "\n") | |
| # for idx, distance in results[0:closest_n]: | |
| # print("Score: ", "(Score: %.4f)" % (1-distance) , "\n" ) | |
| # print("Paragraph: ", corpus[idx].strip(), "\n" ) | |
| # row_dict = df.loc[df['all_review']== corpus[idx]] | |
| # print("paper_id: " , row_dict['Hotel'] , "\n") | |
| model = SentenceTransformer('sentence-transformers/paraphrase-xlm-r-multilingual-v1') | |
| embeddings = model.encode(corpus) | |
| #print(embeddings) | |
| query_embedding.shape | |
| # Query sentences: | |
| queries = ['Hotel at least 10 minutes away from good food', | |
| 'quiet' | |
| ] | |
| # Find the closest 5 sentences of the corpus for each query sentence based on cosine similarity | |
| top_k = min(5, len(corpus)) | |
| for query in queries: | |
| query_embedding = model.encode(query, convert_to_tensor=True) | |
| # We use cosine-similarity and torch.topk to find the highest 5 scores | |
| cos_scores = util.pytorch_cos_sim(query_embedding, embeddings)[0] | |
| top_results = torch.topk(cos_scores, k=top_k) | |
| print("\n\n======================\n\n") | |
| print("Query:", query) | |
| print("\nTop 5 most similar sentences in corpus:") | |
| for score, idx in zip(top_results[0], top_results[1]): | |
| print("(Score: {:.4f})".format(score)) | |
| print(corpus[idx], "(Score: {:.4f})".format(score)) | |
| row_dict = df.loc[df['hotel_features']== corpus[idx]] | |
| print("paper_id: " , row_dict['hotel_name'] , "\n") | |
| df | |
| hits = util.semantic_search(query_embedding, embeddings, top_k=5) | |
| hits = hits[0] #Get the hits for the first query | |
| for hit in hits: | |
| print (hit) | |
| print("(Score: {:.4f})".format(hit['score'])) | |
| print(corpus[hit['corpus_id']]) | |
| row_dict = df.loc[df['hotel_features']== corpus[hit['corpus_id']]] | |
| print("paper_id: " , row_dict['hotel_name'] , "\n") | |
| !pip install -r requirements.txt | |