Spaces:
Running
on
Zero
Running
on
Zero
File size: 7,484 Bytes
cbbd4c5 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 |
"""Handles loading and running of models."""
import json
import os
import re
import time
import warnings
from threading import Thread
from time import sleep
import spaces
from dotenv import load_dotenv
from logger import logger
from utils import get_messages_documents_and_tools
warnings.filterwarnings("ignore")
load_dotenv()
safe_token = "No"
risky_token = "Yes"
nlogprobs = 20
max_tokens = 2048
max_model_len = 4096
inference_engine = os.getenv("INFERENCE_ENGINE", "TRANSFORMERS")
# device_name: str = os.getenv("DEVICE", '')
logger.debug(f"Inference engine is: {inference_engine}")
if inference_engine == "TRANSFORMERS":
import torch
from transformers import infer_device # type: ignore
from transformers import (
AutoModelForCausalLM,
AutoTokenizer,
TextIteratorStreamer,
)
device = os.getenv("DEVICE", None)
valid_devices = ["gpu", "cpu", "mps"]
if device is not None:
if device not in valid_devices:
raise ValueError(f'Invalid device {device}. Must be one of {", ".join(valid_devices)}')
else:
device = infer_device()
logger.debug(f"Using device '{device}'")
model_name = "ibm-granite/granite-guardian-3.3-8b"
# model_name = "unsloth/SmolLM3-3B"
# load the tokenizer and the model
start = time.time()
model = AutoModelForCausalLM.from_pretrained(
pretrained_model_name_or_path=model_name,
dtype=torch.float16,
)
end = time.time()
total = round((end - start) / 60, 3)
logger.debug(f"Finished loading model. It took: {total} minutes")
start = time.time()
model.to(device) # type: ignore
end = time.time()
total = round((end - start) / 60, 3)
logger.debug(f"Finished moving model to device. It took: {total} minutes")
tokenizer = AutoTokenizer.from_pretrained(model_name)
streamer = TextIteratorStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True)
def parse_response(response: str) -> tuple[str | None, str | None]:
"""Parses the response string to extract the latest <think> and <score> content.
Handles cases where <think> or <score> tags are incomplete.
Args:
response (str): The response string containing <think> and <score> tags.
Returns:
tuple: A tuple containing:
- score (str or None): The latest <score> content or None if incomplete.
- trace (str or None): The latest <think> content or None if incomplete.
"""
# Initialize variables
score, trace = None, None
# Check for incomplete <think> tag
if "<think>" in response and "</think>" not in response:
trace = response.split("<think>", 1)[-1]
closing_think_tag = "</think>"
for i in range(len(closing_think_tag), 2):
if trace.endswith(closing_think_tag[:i]):
trace = trace[:-i]
break
else:
# Extract the latest <think> content
trace_match = re.findall(r"<think>(.*?)</think>", response, re.DOTALL)
trace = trace_match[-1] if trace_match else "Starting to think..."
# Check for incomplete <score> tag
if "<score>" in response and "</score>" not in response:
score = None
else:
# Extract the latest <score> content
score_match = re.findall(r"<score>(.*?)</score>", response, re.DOTALL)
if score_match:
score = score_match[-1].strip().capitalize()
return score, trace.strip()
def fake_async_generate(think):
output = """<score>Yes</score>"""
if think:
output = (
"""<think>Lorem Ipsum is simply dummy text of the printing and typesetting industry. Lorem Ipsum has been the industry's standard dummy text ever since the 1500s, when an unknown printer took a galley of type and scrambled it to make a type specimen book. It has survived not only five centuries, but also the leap into electronic typesetting, remaining essentially unchanged. It was popularised in the 1960s with the release of Letraset sheets containing Lorem Ipsum passages, and more recently with desktop publishing software like Aldus PageMaker including versions of Lorem Ipsum.\nWhy do we use it?\nIt is a long established fact that a reader will be distracted by the readable content of a page when looking at its layout. The point of using Lorem Ipsum is that it has a more-or-less normal distribution of letters, as opposed to using 'Content here, content here', making it look like readable English. Many desktop publishing packages and web page editors now use Lorem Ipsum as their default model text, and a search for 'lorem ipsum' will uncover many web sites still in their infancy. Various versions have evolved over the years, sometimes by accident, sometimes on purpose (injected humour and the like).</think>"""
+ output
)
acc_result = ""
for word in output.split(" "):
sleep(0.02)
acc_result = acc_result + " " + word
yield parse_response(acc_result)
@spaces.GPU
def agenerate(text):
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generation_args = {"max_new_tokens": max_tokens, "streamer": streamer, "use_cache": True, **model_inputs}
thread = Thread(
target=model.generate,
kwargs=generation_args,
)
thread.start()
start = time.time()
acc_text = ""
for text_token in streamer:
acc_text += text_token
yield parse_response(acc_text)
thread.join()
end = time.time()
total = round((end - start) / 60, 3)
logger.debug(f"Finished generating tokens. It took: {total} minutes")
@spaces.GPU
def get_prompt(messages, documents, tools, guardian_config, think: bool = False) -> str:
logger.debug(f"Messages are: {json.dumps(messages, indent=2)}")
logger.debug(f"guardian_config is: {guardian_config}")
params = {"guardian_config": guardian_config, "tokenize": False, "add_generation_prompt": True, "think": think}
if len(documents) > 0:
params["documents"] = documents
if len(tools) > 0:
params["available_tools"] = tools
prompt = tokenizer.apply_chat_template(conversation=messages, **params)
logger.debug(f"Prompt is:\n{prompt}")
return prompt
def get_guardian_config(criteria_name, criteria_description):
if criteria_description is not None:
return {"custom_criteria": criteria_description}
else:
return {"criteria_id": criteria_name}
def get_guardian_response(test_case, sub_catalog_name, criteria_name, criteria_description=None, think: bool = False):
messages, documents, tools = get_messages_documents_and_tools(
test_case=test_case, sub_catalog_name=sub_catalog_name
)
logger.debug(f"Messages are: {json.dumps(messages, indent=2)}")
if inference_engine == "MOCK":
logger.debug("Returning mocked model result.")
sleep(1)
return fake_async_generate(think)
elif inference_engine == "TRANSFORMERS":
guardian_config = get_guardian_config(criteria_name, criteria_description)
logger.debug(f"guardian_config is: {guardian_config}")
prompt = get_prompt(
messages=messages,
documents=documents,
tools=tools,
guardian_config=guardian_config,
think=think,
)
return agenerate(prompt)
else:
raise Exception("Environment variable 'INFERENCE_ENGINE' must be one of [MOCK, TRANSFORMERS]")
|