|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import os |
|
|
from typing import Any, Callable, Dict, Optional, Type, Union |
|
|
|
|
|
from pydantic import BaseModel |
|
|
|
|
|
from camel.models import ModelFactory |
|
|
from camel.types import ModelType |
|
|
from camel.types.enums import ModelPlatformType |
|
|
from camel.utils import ( |
|
|
api_keys_required, |
|
|
get_pydantic_model, |
|
|
) |
|
|
|
|
|
from .base import BaseConverter |
|
|
|
|
|
DEFAULT_CONVERTER_PROMPTS = """ |
|
|
Extract key entities and attributes from the user |
|
|
provided text, and convert them into a structured JSON format. |
|
|
""" |
|
|
|
|
|
|
|
|
class OpenAISchemaConverter(BaseConverter): |
|
|
r"""OpenAISchemaConverter is a class that converts a string or a function |
|
|
into a BaseModel schema. |
|
|
|
|
|
Args: |
|
|
model_type (ModelType, optional): The model type to be used. |
|
|
(default: ModelType.GPT_4O_MINI) |
|
|
model_config_dict (Optional[Dict[str, Any]], optional): A dictionary |
|
|
that will be fed into:obj:`openai.ChatCompletion.create()`. If |
|
|
:obj:`None`, :obj:`ChatGPTConfig().as_dict()` will be used. |
|
|
(default: :obj:`None`) |
|
|
api_key (Optional[str], optional): The API key for authenticating |
|
|
with the OpenAI service. (default: :obj:`None`) |
|
|
output_schema (Optional[Type[BaseModel]], optional): The expected |
|
|
format of the response. (default: :obj:`None`) |
|
|
prompt (Optional[str], optional): The prompt to be used. |
|
|
(default: :obj:`None`) |
|
|
|
|
|
""" |
|
|
|
|
|
@api_keys_required( |
|
|
[ |
|
|
("api_key", "OPENAI_API_KEY"), |
|
|
] |
|
|
) |
|
|
def __init__( |
|
|
self, |
|
|
model_type: ModelType = ModelType.GPT_4O_MINI, |
|
|
model_config_dict: Optional[Dict[str, Any]] = None, |
|
|
api_key: Optional[str] = None, |
|
|
): |
|
|
self.model_type = model_type |
|
|
self.model_config_dict = model_config_dict or {} |
|
|
api_key = api_key or os.environ.get("OPENAI_API_KEY") |
|
|
self._client = ModelFactory.create( |
|
|
ModelPlatformType.OPENAI, |
|
|
model_type, |
|
|
api_key=api_key, |
|
|
)._client |
|
|
super().__init__() |
|
|
|
|
|
def convert( |
|
|
self, |
|
|
content: str, |
|
|
output_schema: Union[Type[BaseModel], str, Callable], |
|
|
prompt: Optional[str] = DEFAULT_CONVERTER_PROMPTS, |
|
|
) -> BaseModel: |
|
|
r"""Formats the input content into the expected BaseModel |
|
|
|
|
|
Args: |
|
|
content (str): The content to be formatted. |
|
|
output_schema (Union[Type[BaseModel], str, Callable]): The expected |
|
|
format of the response. |
|
|
|
|
|
Returns: |
|
|
BaseModel: The formatted response. |
|
|
""" |
|
|
prompt = prompt or DEFAULT_CONVERTER_PROMPTS |
|
|
if output_schema is None: |
|
|
raise ValueError("Expected an output schema, got None.") |
|
|
if not isinstance(output_schema, type): |
|
|
output_schema = get_pydantic_model(output_schema) |
|
|
elif not issubclass(output_schema, BaseModel): |
|
|
raise ValueError( |
|
|
f"Expected a BaseModel, got {type(output_schema)}" |
|
|
) |
|
|
|
|
|
self.model_config_dict["response_format"] = output_schema |
|
|
response = self._client.beta.chat.completions.parse( |
|
|
messages=[ |
|
|
{'role': 'system', 'content': prompt}, |
|
|
{'role': 'user', 'content': content}, |
|
|
], |
|
|
model=self.model_type, |
|
|
**self.model_config_dict, |
|
|
) |
|
|
|
|
|
message = response.choices[0].message |
|
|
|
|
|
if not isinstance(message.parsed, output_schema): |
|
|
raise ValueError( |
|
|
f"Expected a {output_schema}, got {type(message.parsed)}." |
|
|
) |
|
|
|
|
|
return message.parsed |
|
|
|