Spaces:
Running
on
Zero
Running
on
Zero
| """Handles loading and running of models.""" | |
| import json | |
| import os | |
| import re | |
| import time | |
| import warnings | |
| from threading import Thread | |
| from time import sleep | |
| import spaces | |
| from dotenv import load_dotenv | |
| from logger import logger | |
| from utils import get_messages_documents_and_tools | |
| warnings.filterwarnings("ignore") | |
| load_dotenv() | |
| safe_token = "No" | |
| risky_token = "Yes" | |
| nlogprobs = 20 | |
| max_tokens = 2048 | |
| max_model_len = 4096 | |
| inference_engine = os.getenv("INFERENCE_ENGINE", "TRANSFORMERS") | |
| # device_name: str = os.getenv("DEVICE", '') | |
| logger.debug(f"Inference engine is: {inference_engine}") | |
| if inference_engine == "TRANSFORMERS": | |
| import torch | |
| from transformers import infer_device # type: ignore | |
| from transformers import ( | |
| AutoModelForCausalLM, | |
| AutoTokenizer, | |
| TextIteratorStreamer, | |
| ) | |
| device = os.getenv("DEVICE", None) | |
| valid_devices = ["gpu", "cpu", "mps"] | |
| if device is not None: | |
| if device not in valid_devices: | |
| raise ValueError(f'Invalid device {device}. Must be one of {", ".join(valid_devices)}') | |
| else: | |
| device = infer_device() | |
| logger.debug(f"Using device '{device}'") | |
| model_name = "ibm-granite/granite-guardian-3.3-8b" | |
| # model_name = "unsloth/SmolLM3-3B" | |
| # load the tokenizer and the model | |
| start = time.time() | |
| model = AutoModelForCausalLM.from_pretrained( | |
| pretrained_model_name_or_path=model_name, | |
| dtype=torch.float16, | |
| ) | |
| end = time.time() | |
| total = round((end - start) / 60, 3) | |
| logger.debug(f"Finished loading model. It took: {total} minutes") | |
| start = time.time() | |
| model.to(device) # type: ignore | |
| end = time.time() | |
| total = round((end - start) / 60, 3) | |
| logger.debug(f"Finished moving model to device. It took: {total} minutes") | |
| tokenizer = AutoTokenizer.from_pretrained(model_name) | |
| streamer = TextIteratorStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True) | |
| def parse_response(response: str) -> tuple[str | None, str | None]: | |
| """Parses the response string to extract the latest <think> and <score> content. | |
| Handles cases where <think> or <score> tags are incomplete. | |
| Args: | |
| response (str): The response string containing <think> and <score> tags. | |
| Returns: | |
| tuple: A tuple containing: | |
| - score (str or None): The latest <score> content or None if incomplete. | |
| - trace (str or None): The latest <think> content or None if incomplete. | |
| """ | |
| # Initialize variables | |
| score, trace = None, None | |
| # Check for incomplete <think> tag | |
| if "<think>" in response and "</think>" not in response: | |
| trace = response.split("<think>", 1)[-1] | |
| closing_think_tag = "</think>" | |
| for i in range(len(closing_think_tag), 2): | |
| if trace.endswith(closing_think_tag[:i]): | |
| trace = trace[:-i] | |
| break | |
| else: | |
| # Extract the latest <think> content | |
| trace_match = re.findall(r"<think>(.*?)</think>", response, re.DOTALL) | |
| trace = trace_match[-1] if trace_match else "Starting to think..." | |
| # Check for incomplete <score> tag | |
| if "<score>" in response and "</score>" not in response: | |
| score = None | |
| else: | |
| # Extract the latest <score> content | |
| score_match = re.findall(r"<score>(.*?)</score>", response, re.DOTALL) | |
| if score_match: | |
| score = score_match[-1].strip().capitalize() | |
| return score, trace.strip() | |
| def fake_async_generate(think): | |
| output = """<score>Yes</score>""" | |
| if think: | |
| output = ( | |
| """<think>Lorem Ipsum is simply dummy text of the printing and typesetting industry. Lorem Ipsum has been the industry's standard dummy text ever since the 1500s, when an unknown printer took a galley of type and scrambled it to make a type specimen book. It has survived not only five centuries, but also the leap into electronic typesetting, remaining essentially unchanged. It was popularised in the 1960s with the release of Letraset sheets containing Lorem Ipsum passages, and more recently with desktop publishing software like Aldus PageMaker including versions of Lorem Ipsum.\nWhy do we use it?\nIt is a long established fact that a reader will be distracted by the readable content of a page when looking at its layout. The point of using Lorem Ipsum is that it has a more-or-less normal distribution of letters, as opposed to using 'Content here, content here', making it look like readable English. Many desktop publishing packages and web page editors now use Lorem Ipsum as their default model text, and a search for 'lorem ipsum' will uncover many web sites still in their infancy. Various versions have evolved over the years, sometimes by accident, sometimes on purpose (injected humour and the like).</think>""" | |
| + output | |
| ) | |
| acc_result = "" | |
| for word in output.split(" "): | |
| sleep(0.02) | |
| acc_result = acc_result + " " + word | |
| yield parse_response(acc_result) | |
| def agenerate(text): | |
| model_inputs = tokenizer([text], return_tensors="pt").to(model.device) | |
| generation_args = {"max_new_tokens": max_tokens, "streamer": streamer, "use_cache": True, **model_inputs} | |
| thread = Thread( | |
| target=model.generate, | |
| kwargs=generation_args, | |
| ) | |
| thread.start() | |
| start = time.time() | |
| acc_text = "" | |
| for text_token in streamer: | |
| acc_text += text_token | |
| yield parse_response(acc_text) | |
| thread.join() | |
| end = time.time() | |
| total = round((end - start) / 60, 3) | |
| logger.debug(f"Finished generating tokens. It took: {total} minutes") | |
| def get_prompt(messages, documents, tools, guardian_config, think: bool = False) -> str: | |
| logger.debug(f"Messages are: {json.dumps(messages, indent=2)}") | |
| logger.debug(f"guardian_config is: {guardian_config}") | |
| params = {"guardian_config": guardian_config, "tokenize": False, "add_generation_prompt": True, "think": think} | |
| if len(documents) > 0: | |
| params["documents"] = documents | |
| if len(tools) > 0: | |
| params["available_tools"] = tools | |
| prompt = tokenizer.apply_chat_template(conversation=messages, **params) | |
| logger.debug(f"Prompt is:\n{prompt}") | |
| return prompt | |
| def get_guardian_config(criteria_name, criteria_description): | |
| if criteria_description is not None: | |
| return {"custom_criteria": criteria_description} | |
| else: | |
| return {"criteria_id": criteria_name} | |
| def get_guardian_response(test_case, sub_catalog_name, criteria_name, criteria_description=None, think: bool = False): | |
| messages, documents, tools = get_messages_documents_and_tools( | |
| test_case=test_case, sub_catalog_name=sub_catalog_name | |
| ) | |
| logger.debug(f"Messages are: {json.dumps(messages, indent=2)}") | |
| if inference_engine == "MOCK": | |
| logger.debug("Returning mocked model result.") | |
| sleep(1) | |
| return fake_async_generate(think) | |
| elif inference_engine == "TRANSFORMERS": | |
| guardian_config = get_guardian_config(criteria_name, criteria_description) | |
| logger.debug(f"guardian_config is: {guardian_config}") | |
| prompt = get_prompt( | |
| messages=messages, | |
| documents=documents, | |
| tools=tools, | |
| guardian_config=guardian_config, | |
| think=think, | |
| ) | |
| return agenerate(prompt) | |
| else: | |
| raise Exception("Environment variable 'INFERENCE_ENGINE' must be one of [MOCK, TRANSFORMERS]") | |