|
|
from abc import ABC, abstractmethod |
|
|
|
|
|
|
|
|
class RateLimitError(Exception): |
|
|
"""Custom error for handling rate limit errors from the OpenAI API.""" |
|
|
|
|
|
pass |
|
|
|
|
|
|
|
|
class LLMService(ABC): |
|
|
""" |
|
|
Abstract base class for Language Model Services. |
|
|
|
|
|
This class defines the interface for interacting with various language model services. |
|
|
Concrete implementations should provide the specific logic for invoking the language model. |
|
|
|
|
|
Methods: |
|
|
invoke(user_prompt: str | None = None, |
|
|
system_prompt: str | None = None, |
|
|
partial_assistant_prompt: str | None = None, |
|
|
max_tokens: int = 1000, |
|
|
temperature: float = 0.0) -> str | None: |
|
|
Abstract method to invoke the language model with given prompts and parameters. |
|
|
""" |
|
|
|
|
|
@abstractmethod |
|
|
async def invoke( |
|
|
self, |
|
|
user_prompt: str | None = None, |
|
|
system_prompt: str | None = None, |
|
|
partial_assistant_prompt: str | None = None, |
|
|
max_tokens: int = 1000, |
|
|
temperature: float = 0.0, |
|
|
) -> str | None: |
|
|
""" |
|
|
Invoke the language model with the given prompts and parameters. |
|
|
|
|
|
Args: |
|
|
user_prompt (str | None): The main prompt from the user. |
|
|
system_prompt (str | None): A system message to set the context. |
|
|
partial_assistant_prompt (str | None): A partial response from the assistant. |
|
|
max_tokens (int): Maximum number of tokens in the response. |
|
|
temperature (float): Sampling temperature for response generation. |
|
|
|
|
|
Returns: |
|
|
str | None: The generated response from the language model, or None if no response. |
|
|
""" |
|
|
pass |
|
|
|