| from __future__ import annotations | |
| import langchain | |
| import vertexai | |
| from vertexai.language_models import TextGenerationModel | |
| import streamlit as st | |
| from langchain_community.llms import VertexAI | |
| from langchain.prompts import PromptTemplate | |
| from langchain.chat_models import ChatVertexAI | |
| from typing import Any | |
| from langchain.base_language import BaseLanguageModel | |
| from langchain.chains.llm import LLMChain | |
| from langchain.embeddings import VertexAIEmbeddings | |
| import os | |
| os.environ['GOOGLE_APPLICATION_CREDENTIALS']="agileai-poc-10f5fe13f8a2.json" | |
| model = TextGenerationModel.from_pretrained("text-bison@001") | |
| # project_id = "agileai-poc" | |
| # loc = "us-central1" | |
| # vertexai.init(project=project_id, location=loc) | |
| # params = VertexAI( | |
| # model_name="text-bison@001", | |
| # max_output_tokens=256, | |
| # temperature=0.2, | |
| # top_p=0.8 | |
| # ) | |
| prompt="modify the text and highlight the points of the given input which type of tone it contains " | |
| # class txt_gen(LLMChain): | |
| # """LLM Chain specifically for generating multi paragraph rich text product description using emojis.""" | |
| # @classmethod | |
| # def from_llm( | |
| # cls, llm: BaseLanguageModel, prompt: str, **kwargs: Any | |
| # ) -> txt_gen: | |
| # """Load txt_gen Chain from LLM.""" | |
| # return cls(llm=params, prompt=prompt, **kwargs) | |
| # def generate_text(input): | |
| # with open(prompt, "r") as file: | |
| # prompt_template = file.read() | |
| # PROMPT = PromptTemplate( | |
| # input_variables=[input], template=prompt_template | |
| # ) | |
| # DescGen_chain = txt_gen.from_llm(llm=params, prompt=PROMPT) | |
| # DescGen_query = DescGen_chain.apply_and_parse( | |
| # [{"input":input}] | |
| # ) | |
| # return DescGen_query[0]["text"] | |
| c1,c2,c3=st.columns(3) | |
| with c1: | |
| input=st.text_input("Enter your content :") | |
| submit=st.button("Submit") | |
| if submit: | |
| # description = st.write(generate_text(input)) | |
| desc=st.write(model.predict(prompt)) | |
| # print(model.predict(prompt)) | |
| # with c3: | |
| # output=st.write(description) | |