Spaces:
Build error
Build error
| import streamlit as st | |
| from audiorecorder import audiorecorder | |
| import torch | |
| from transformers import pipeline | |
| import torch | |
| import torchaudio | |
| from langchain.embeddings.openai import OpenAIEmbeddings | |
| from langchain import HuggingFaceHub, LLMChain, PromptTemplate | |
| from langchain.memory import ConversationBufferWindowMemory | |
| from langchain.chat_models import ChatOpenAI | |
| from langchain.chains import ConversationalRetrievalChain | |
| from langchain.document_loaders.csv_loader import CSVLoader | |
| from langchain.vectorstores import FAISS | |
| import tempfile | |
| from streamlit_chat import message | |
| import streamlit as st | |
| from elevenlabs import set_api_key | |
| from elevenlabs import clone, generate, play | |
| from pydub import AudioSegment | |
| import os | |
| import re | |
| import sys | |
| import pandas as pd | |
| from helper import parse_transcription,hindi_to_english,translate_english_to_hindi,hindi_tts | |
| def extract_text_from_html(html): | |
| cleanr = re.compile('<.*?>') | |
| cleantext = re.sub(cleanr, '', html) | |
| def conversational_chat(query): | |
| result = llm_chain({"question": query, | |
| "chat_history": st.session_state['history']}) | |
| st.session_state['history'].append((query, result["answer"])) | |
| return result["answer"] | |
| def save_uploaded_file_as_mp3(uploaded_file, output_file_path): | |
| audio = AudioSegment.from_file(uploaded_file) | |
| audio.export(output_file_path, format="mp3") | |
| user_api_key = st.sidebar.text_input( | |
| label="#### Your OpenAI API key π", | |
| placeholder="Paste your openAI API key, sk-", | |
| type="password") | |
| def ui(): | |
| if user_api_key is not None and user_api_key.strip() != "": | |
| os.environ["OPENAI_API_KEY"] =user_api_key | |
| template = """ | |
| Behave like a Telecomm customer servce call agent and don't include any website address, compnay name or any other parameter in your output | |
| {history} | |
| Me:{human_input} | |
| Jack: | |
| """ | |
| prompt = PromptTemplate( | |
| input_variables=["history", "human_input"], | |
| template=template | |
| ) | |
| llm_chain = LLMChain( | |
| llm = ChatOpenAI(temperature=0.0,model_name='gpt-3.5-turbo'), | |
| prompt=prompt, | |
| verbose=True, | |
| memory=ConversationBufferWindowMemory(k=2) | |
| ) | |
| if 'history' not in st.session_state: | |
| st.session_state['history'] = [] | |
| if 'generated' not in st.session_state: | |
| st.session_state['generated'] = ["Hello ! Ask me anything about " + " π€"] | |
| if 'past' not in st.session_state: | |
| st.session_state['past'] = ["Hey ! π"] | |
| if user_api_key is not None and user_api_key.strip() != "": | |
| eleven_labs_api_key = st.sidebar.text_input( | |
| label="#### Your Eleven Labs API key π", | |
| placeholder="Paste your Eleven Labs API key", | |
| type="password") | |
| set_api_key(user_api_key) | |
| #container for the chat history | |
| response_container = st.container() | |
| #container for the user's text input | |
| container = st.container() | |
| with container: | |
| with st.form(key='my_form', clear_on_submit=True): | |
| audio_file = audiorecorder("Click to record", "Recording...") | |
| wav_file = open("./output_audio.mp3", "wb") | |
| wav_file.write(audio_file.tobytes()) | |
| submit_button = st.form_submit_button(label='Send') | |
| if submit_button : | |
| output_file_path = "./output_audio.mp3" | |
| # save_uploaded_file_as_mp3(audio_file,output_file_path ) | |
| hindi_input_audio,sample_rate=torchaudio.load(output_file_path) | |
| #applying the audio recognition | |
| hindi_transcription=parse_transcription('./output_audio.mp3') | |
| st.success(f"Audio file saved as {output_file_path}") | |
| #convert hindi to english | |
| english_input=hindi_to_english(hindi_transcription) | |
| #feeding the input to the LLM | |
| english_output = conversational_chat(english_input) | |
| #converting english to hindi | |
| hin_output=translate_english_to_hindi(english_output) | |
| #getting the hindi_tts | |
| hindi_output_audio=hindi_tts(hin_output) | |
| st.session_state['past'].append(hindi_input_audio) | |
| st.session_state['generated'].append(hindi_output_audio) | |
| if st.session_state['generated']: | |
| with response_container: | |
| for i in range(len(st.session_state['generated'])): | |
| st.audio(st.session_state["past"][i],format='audio/wav') | |
| st.audio(st.session_state["generated"][i],format='audio/wav') | |
| if __name__ == '__main__': | |
| ui() | |