Refactor import statements to use absolute paths and add a new prompt template for handling rude queries
Browse files- py-src/lets_talk/agent.py +38 -38
- py-src/lets_talk/prompts.py +46 -1
- py-src/lets_talk/rag.py +1 -1
    	
        py-src/lets_talk/agent.py
    CHANGED
    
    | @@ -50,7 +50,7 @@ def call_model(model, state: Dict[str, Any]) -> Dict[str, list[BaseMessage]]: | |
| 50 | 
             
                    context = state.get("context", "")
         | 
| 51 |  | 
| 52 | 
             
                    # Insert system message with context before the latest user message
         | 
| 53 | 
            -
                    from prompts import call_llm_prompt_template
         | 
| 54 | 
             
                    sys_prompt = call_llm_prompt_template.format(
         | 
| 55 | 
             
                        context=context,
         | 
| 56 | 
             
                    )
         | 
| @@ -79,47 +79,47 @@ def call_model(model, state: Dict[str, Any]) -> Dict[str, list[BaseMessage]]: | |
| 79 | 
             
                    return {"messages": [HumanMessage(content=error_msg)]}
         | 
| 80 |  | 
| 81 |  | 
| 82 | 
            -
            def call_model(model, state: Dict[str, Any]) -> Dict[str, list[BaseMessage]]:
         | 
| 83 | 
            -
             | 
| 84 | 
            -
             | 
| 85 |  | 
| 86 | 
            -
             | 
| 87 | 
            -
             | 
| 88 | 
            -
             | 
| 89 |  | 
| 90 | 
            -
             | 
| 91 | 
            -
             | 
| 92 | 
            -
             | 
| 93 | 
            -
             | 
| 94 | 
            -
             | 
| 95 | 
            -
             | 
| 96 |  | 
| 97 | 
            -
             | 
| 98 | 
            -
             | 
| 99 | 
            -
             | 
| 100 | 
            -
             | 
| 101 | 
            -
             | 
| 102 | 
            -
             | 
| 103 | 
            -
             | 
| 104 | 
            -
             | 
| 105 | 
            -
             | 
| 106 | 
            -
             | 
| 107 | 
            -
             | 
| 108 | 
            -
             | 
| 109 | 
            -
             | 
| 110 | 
            -
             | 
| 111 | 
            -
             | 
| 112 | 
            -
             | 
| 113 |  | 
| 114 | 
            -
             | 
| 115 | 
            -
             | 
| 116 | 
            -
             | 
| 117 | 
            -
             | 
| 118 | 
            -
             | 
| 119 | 
            -
             | 
| 120 | 
            -
             | 
| 121 | 
            -
             | 
| 122 | 
            -
             | 
| 123 |  | 
| 124 |  | 
| 125 | 
             
            def should_continue(state: Dict[str, Any]) -> Union[Literal["action"], Literal["end"]]:
         | 
|  | |
| 50 | 
             
                    context = state.get("context", "")
         | 
| 51 |  | 
| 52 | 
             
                    # Insert system message with context before the latest user message
         | 
| 53 | 
            +
                    from lets_talk.prompts import call_llm_prompt_template
         | 
| 54 | 
             
                    sys_prompt = call_llm_prompt_template.format(
         | 
| 55 | 
             
                        context=context,
         | 
| 56 | 
             
                    )
         | 
|  | |
| 79 | 
             
                    return {"messages": [HumanMessage(content=error_msg)]}
         | 
| 80 |  | 
| 81 |  | 
| 82 | 
            +
            # def call_model(model, state: Dict[str, Any]) -> Dict[str, list[BaseMessage]]:
         | 
| 83 | 
            +
            #     """
         | 
| 84 | 
            +
            #     Process the current state through the language model.
         | 
| 85 |  | 
| 86 | 
            +
            #     Args:
         | 
| 87 | 
            +
            #         model: Language model with tools bound
         | 
| 88 | 
            +
            #         state: Current state containing messages and context
         | 
| 89 |  | 
| 90 | 
            +
            #     Returns:
         | 
| 91 | 
            +
            #         Updated state with model's response added to messages
         | 
| 92 | 
            +
            #     """
         | 
| 93 | 
            +
            #     try:
         | 
| 94 | 
            +
            #         messages = state["messages"]
         | 
| 95 | 
            +
            #         context = state.get("context", "")
         | 
| 96 |  | 
| 97 | 
            +
            #         # Add context from documents if available
         | 
| 98 | 
            +
            #         if context:
         | 
| 99 | 
            +
            #             # Insert system message with context before the latest user message
         | 
| 100 | 
            +
            #             context_message = SystemMessage(content=rag_prompt_template.format(context=context))
         | 
| 101 | 
            +
             | 
| 102 | 
            +
            #             # Find the position of the last user message
         | 
| 103 | 
            +
            #             for i in range(len(messages)-1, -1, -1):
         | 
| 104 | 
            +
            #                 if isinstance(messages[i], HumanMessage):
         | 
| 105 | 
            +
            #                     # Insert context right after the last user message
         | 
| 106 | 
            +
            #                     enhanced_messages = messages[:i+1] + [context_message] + messages[i+1:]
         | 
| 107 | 
            +
            #                     break
         | 
| 108 | 
            +
            #             else:
         | 
| 109 | 
            +
            #                 # No user message found, just append context
         | 
| 110 | 
            +
            #                 enhanced_messages = messages + [context_message]
         | 
| 111 | 
            +
            #         else:
         | 
| 112 | 
            +
            #             enhanced_messages = messages
         | 
| 113 |  | 
| 114 | 
            +
            #         # Get response from the model
         | 
| 115 | 
            +
            #         response = model.invoke(enhanced_messages)
         | 
| 116 | 
            +
            #         return {"messages": [response]}
         | 
| 117 | 
            +
            #     except Exception as e:
         | 
| 118 | 
            +
            #         # Handle exceptions gracefully
         | 
| 119 | 
            +
            #         error_msg = f"Error calling model: {str(e)}"
         | 
| 120 | 
            +
            #         print(error_msg)  # Log the error
         | 
| 121 | 
            +
            #         # Return a fallback response
         | 
| 122 | 
            +
            #         return {"messages": [HumanMessage(content=error_msg)]}
         | 
| 123 |  | 
| 124 |  | 
| 125 | 
             
            def should_continue(state: Dict[str, Any]) -> Union[Literal["action"], Literal["end"]]:
         | 
    	
        py-src/lets_talk/prompts.py
    CHANGED
    
    | @@ -38,4 +38,49 @@ Query: | |
| 38 | 
             
            # Output Format
         | 
| 39 |  | 
| 40 | 
             
            Respond only with "YES" or "NO".
         | 
| 41 | 
            -
            """
         | 
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 38 | 
             
            # Output Format
         | 
| 39 |  | 
| 40 | 
             
            Respond only with "YES" or "NO".
         | 
| 41 | 
            +
            """
         | 
| 42 | 
            +
             | 
| 43 | 
            +
            rude_query_answer_prompt_template = """\
         | 
| 44 | 
            +
            Respond to negative, rude, or derogatory questions or statements with respect, positivity, and an uplifting tone.
         | 
| 45 | 
            +
             | 
| 46 | 
            +
            Address the initial sentiment or statement with understanding and empathy before providing a positive response. Aim to uplift the conversation, converting any negative interaction into a positive engagement.
         | 
| 47 | 
            +
             | 
| 48 | 
            +
            # Steps
         | 
| 49 | 
            +
             | 
| 50 | 
            +
            1. Identify the negative or derogatory sentiment in the input.
         | 
| 51 | 
            +
            2. Acknowledge the sentiment or emotion behind the statement with empathy.
         | 
| 52 | 
            +
            3. Respond with positivity, focusing on encouraging and uplifting language.
         | 
| 53 | 
            +
            4. Conclude with a respectful and positive closing remark.
         | 
| 54 | 
            +
             | 
| 55 | 
            +
            # Output Format
         | 
| 56 | 
            +
             | 
| 57 | 
            +
            Respond using concise sentences or short paragraphs, maintaining a respectful and positive tone throughout.
         | 
| 58 | 
            +
             | 
| 59 | 
            +
            # Examples
         | 
| 60 | 
            +
             | 
| 61 | 
            +
            **Example 1:**
         | 
| 62 | 
            +
             | 
| 63 | 
            +
            - **Input:** "Go away"
         | 
| 64 | 
            +
            - **Output:** "I understand you might need some space, and I'm here to help whenever you're ready. Take care!"
         | 
| 65 | 
            +
             | 
| 66 | 
            +
            **Example 2:**
         | 
| 67 | 
            +
             | 
| 68 | 
            +
            - **Input:** "I am angry now"
         | 
| 69 | 
            +
            - **Output:** "It's okay to feel angry sometimes. If you need someone to talk to, I'm here for you, and we'll find a way through this together!"
         | 
| 70 | 
            +
             | 
| 71 | 
            +
            **Example 3:**
         | 
| 72 | 
            +
             | 
| 73 | 
            +
            - **Input:** "Tell me something emse"
         | 
| 74 | 
            +
            - **Output:** "Sure, I'd love to share something uplifting with you! Did you know that taking a moment to appreciate small things can brighten your day? :)"
         | 
| 75 | 
            +
             | 
| 76 | 
            +
            **Example 4:**
         | 
| 77 | 
            +
             | 
| 78 | 
            +
            - **Input:** "RIP you are awful"
         | 
| 79 | 
            +
            - **Output:** "I'm sorry if I disappointed you. I'm here to improve and assist you better. Let's turn this around together!"
         | 
| 80 | 
            +
             | 
| 81 | 
            +
            # Notes
         | 
| 82 | 
            +
             | 
| 83 | 
            +
            - Always maintain a positive and empathetic approach, even when the input is challenging.
         | 
| 84 | 
            +
            - Aim to uplift and provide encouragement, transforming the interaction into a positive experience.
         | 
| 85 | 
            +
            """
         | 
| 86 | 
            +
             | 
    	
        py-src/lets_talk/rag.py
    CHANGED
    
    | @@ -22,7 +22,7 @@ retriever = vector_store.as_retriever() | |
| 22 | 
             
            llm = ChatOpenAI(model=config.LLM_MODEL, temperature=config.LLM_TEMPERATURE)
         | 
| 23 |  | 
| 24 |  | 
| 25 | 
            -
            from prompts import rag_prompt_template
         | 
| 26 |  | 
| 27 | 
             
            rag_prompt = ChatPromptTemplate.from_template(rag_prompt_template)
         | 
| 28 |  | 
|  | |
| 22 | 
             
            llm = ChatOpenAI(model=config.LLM_MODEL, temperature=config.LLM_TEMPERATURE)
         | 
| 23 |  | 
| 24 |  | 
| 25 | 
            +
            from lets_talk.prompts import rag_prompt_template
         | 
| 26 |  | 
| 27 | 
             
            rag_prompt = ChatPromptTemplate.from_template(rag_prompt_template)
         | 
| 28 |  |