Spaces:
Sleeping
Sleeping
| # app/services/llm_service.py | |
| import logging | |
| # --- MODIFIED: Import the ASYNC client --- | |
| from openai import AsyncOpenAI, OpenAIError | |
| import time | |
| from app.core.config import settings | |
| from app.core import state | |
| logger = logging.getLogger(__name__) | |
| def initialize_openai_client(): | |
| """Initializes the ASYNCHRONOUS OpenAI client into the state object.""" | |
| if state.openai_client is not None: | |
| logger.debug("OpenAI client already initialized in state.") | |
| return True | |
| api_key = settings.OPENAI_API_KEY | |
| if not api_key: | |
| logger.error("OpenAI API key is not configured. LLM service unavailable.") | |
| state.openai_client = None | |
| return False | |
| logger.info("Initializing AsyncOpenAI client into state...") | |
| try: | |
| # --- MODIFIED: Use AsyncOpenAI instead of OpenAI --- | |
| state.openai_client = AsyncOpenAI(api_key=api_key) | |
| logger.info("AsyncOpenAI client initialized successfully into state.") | |
| return True | |
| except Exception as e: | |
| logger.exception(f"Failed to initialize AsyncOpenAI client: {e}") | |
| state.openai_client = None | |
| return False | |
| # ... (format_system_prompt function remains the same) ... | |
| def format_system_prompt() -> str: | |
| """ | |
| The definitive system prompt with rules for structure, completeness, | |
| and multi-path logical reasoning. | |
| """ | |
| return """ | |
| You are an expert technical writer specializing in Temenos Transact. Your primary goal is to take the raw **Documentation Context** and restructure it into a clear, comprehensive guide that fully answers the **User Question**. | |
| --- | |
| ✅ LOGIC & REASONING RULES (CRITICAL): | |
| 1. **Multi-Method Synthesis Rule:** If the context describes multiple distinct methods to achieve the user's goal (e.g., configuration through a UI screen like 'Payment Order Product Setup' AND through an API hook like 'PP.COMPONENT.API.HOOK'), you MUST document all of them. Do not choose one method and ignore others. Structure your response with a clear heading for each method (e.g., ### Method 1: ..., ### Method 2: ...). | |
| 2. **Handling Specificity:** When describing a method that applies to a specific component (e.g., 'Payment Order application'), clearly state this in your explanation for that method. | |
| 3. **Example Inclusion:** Under each method presented, you MUST include any corresponding numerical examples from the context. Clearly state that these examples illustrate the configuration for that specific method. | |
| 4. **Technical Term Integrity:** You MUST NOT rephrase or alter technical terms, menu navigation paths, or table names. Preserve them exactly as they appear in the context. | |
| --- | |
| ✅ COMPLETENESS & FORMATTING RULES (STRICT): | |
| 1. **Include Every Field:** Within each method's section, your guide must mention every single configuration field found in the documentation context relevant to that method. | |
| 2. **Structure:** Use Markdown for structure. Use numbered steps (`1.`, `2.`) for procedural instructions within each method. Use bullet points (`-`) for options or lists. | |
| 3. **Verification Table:** If the context provides a clear set of configuration fields, conclude your response with a "Verification Table" that summarizes all fields mentioned. | |
| --- | |
| ✅ FINAL RECOURSE: | |
| Only say “cannot provide” if the context is completely empty. | |
| """ | |
| # --- MODIFIED: The entire function is now async/await --- | |
| async def generate_answer(query: str, context_used: str) -> str: | |
| """ | |
| Generates a response by asking the LLM to answer the user's query | |
| based on the provided context using an async client. | |
| """ | |
| if state.openai_client is None: | |
| logger.error("OpenAI client is not initialized in state. Cannot generate answer.") | |
| return "⚠️ Configuration Error: LLM service is unavailable." | |
| if not context_used or not context_used.strip(): | |
| logger.warning("generate_answer called with empty context. Cannot answer query.") | |
| return "Based on the available documents, I cannot answer this question." | |
| logger.info("Generating LLM response based on user query and retrieved context...") | |
| messages = [ | |
| {"role": "system", "content": format_system_prompt()}, | |
| {"role": "user", "content": f"Context:\n---\n{context_used}\n---\n\nQuestion: {query}\n\nAnswer:"} | |
| ] | |
| try: | |
| prompt_chars = sum(len(msg.get("content", "")) for msg in messages) | |
| logger.info(f"Calling OpenAI API model: {settings.OPENAI_MODEL_NAME}. Prompt length approx: {prompt_chars} chars.") | |
| start_time = time.time() | |
| # --- MODIFIED: The API call is now awaited --- | |
| response = await state.openai_client.chat.completions.create( | |
| model=settings.OPENAI_MODEL_NAME, | |
| messages=messages, | |
| temperature=0.2, | |
| max_tokens=4096 | |
| ) | |
| duration = time.time() - start_time | |
| answer = response.choices[0].message.content.strip() | |
| finish_reason = response.choices[0].finish_reason | |
| logger.info(f"OpenAI API call successful. Duration: {duration:.2f}s. Finish Reason: {finish_reason}") | |
| except OpenAIError as e: | |
| logger.exception(f"OpenAI API error: {e}") | |
| return f"⚠️ Sorry, LLM API error: {e}" | |
| except Exception as e: | |
| logger.exception(f"Unexpected error during LLM call: {e}") | |
| return "⚠️ Sorry, unexpected error generating response." | |
| if not answer: | |
| logger.warning("OpenAI LLM returned an empty response.") | |
| answer = "⚠️ LLM returned an empty response." | |
| else: | |
| logger.info("LLM query-based response generated.") | |
| logger.info(f"LLM Request preview: {messages}") | |
| logger.info(f"LLM Response preview: {answer}...") | |
| return answer |