|
|
from typing import List, Optional, Tuple, Union |
|
|
|
|
|
import torch.utils.checkpoint |
|
|
import transformers |
|
|
from torch.nn import CrossEntropyLoss |
|
|
from transformers import GenerationConfig |
|
|
from transformers.modeling_outputs import CausalLMOutputWithPast |
|
|
from transformers.modeling_utils import PreTrainedModel |
|
|
from transformers.utils import logging |
|
|
|
|
|
from .configuration_neo_chat import NEOChatConfig |
|
|
from .conversation import get_conv_template |
|
|
from .modeling_neo_vit import NEOVisionModel |
|
|
from .modeling_qwen3 import Qwen3ForCausalLM |
|
|
|
|
|
logger = logging.get_logger(__name__) |
|
|
|
|
|
|
|
|
def version_cmp(v1, v2, op='eq'): |
|
|
import operator |
|
|
|
|
|
from packaging import version |
|
|
op_func = getattr(operator, op) |
|
|
return op_func(version.parse(v1), version.parse(v2)) |
|
|
|
|
|
|
|
|
def build_abs_positions_from_grid_hw(grid_hw: torch.Tensor, device=None): |
|
|
""" |
|
|
Compute patch coordinates (x, y) |
|
|
|
|
|
Args: |
|
|
grid_hw: (B, 2) tensor representing (H, W) per image |
|
|
""" |
|
|
device = grid_hw.device |
|
|
B = grid_hw.shape[0] |
|
|
|
|
|
|
|
|
H = grid_hw[:, 0] |
|
|
W = grid_hw[:, 1] |
|
|
N = H * W |
|
|
N_total = N.sum() |
|
|
|
|
|
|
|
|
patch_to_sample = torch.repeat_interleave(torch.arange(B, device=device), N) |
|
|
|
|
|
|
|
|
patch_id_within_image = torch.arange(N_total, device=device) |
|
|
patch_id_within_image = patch_id_within_image - torch.cumsum( |
|
|
torch.cat([torch.tensor([0], device=device), N[:-1]]), dim=0 |
|
|
)[patch_to_sample] |
|
|
|
|
|
|
|
|
W_per_patch = W[patch_to_sample] |
|
|
abs_x = patch_id_within_image % W_per_patch |
|
|
abs_y = patch_id_within_image // W_per_patch |
|
|
|
|
|
return abs_x, abs_y |
|
|
|
|
|
|
|
|
class NEOChatModel(PreTrainedModel): |
|
|
config_class = NEOChatConfig |
|
|
main_input_name = 'pixel_values' |
|
|
base_model_prefix = 'language_model' |
|
|
_supports_flash_attn_2 = True |
|
|
supports_gradient_checkpointing = True |
|
|
_no_split_modules = [ |
|
|
"NEOVisionModel", |
|
|
"Qwen3DecoderLayer", |
|
|
] |
|
|
|
|
|
|
|
|
_tp_plan = '' |
|
|
|
|
|
def __init__(self, config: NEOChatConfig, vision_model=None, language_model=None, use_flash_attn=True): |
|
|
super().__init__(config) |
|
|
|
|
|
assert version_cmp(transformers.__version__, '4.37.0', 'ge') |
|
|
patch_size = config.vision_config.patch_size |
|
|
self.patch_size = patch_size |
|
|
self.template = config.template |
|
|
self.downsample_ratio = config.downsample_ratio |
|
|
config.llm_config._attn_implementation = 'eager' |
|
|
|
|
|
if vision_model is not None: |
|
|
self.vision_model = vision_model |
|
|
else: |
|
|
self.vision_model = NEOVisionModel(config.vision_config) |
|
|
if language_model is not None: |
|
|
self.language_model = language_model |
|
|
else: |
|
|
self.language_model = Qwen3ForCausalLM(config.llm_config) |
|
|
|
|
|
self.img_context_token_id = None |
|
|
self.img_start_token_id = None |
|
|
self.conv_template = get_conv_template(self.template) |
|
|
self.system_message = self.conv_template.system_message |
|
|
|
|
|
def forward( |
|
|
self, |
|
|
pixel_values: torch.FloatTensor, |
|
|
input_ids: torch.LongTensor = None, |
|
|
attention_mask: Optional[torch.Tensor] = None, |
|
|
position_ids: Optional[torch.LongTensor] = None, |
|
|
image_flags: Optional[torch.LongTensor] = None, |
|
|
past_key_values: Optional[List[torch.FloatTensor]] = None, |
|
|
labels: Optional[torch.LongTensor] = None, |
|
|
use_cache: Optional[bool] = None, |
|
|
output_attentions: Optional[bool] = None, |
|
|
output_hidden_states: Optional[bool] = None, |
|
|
return_dict: Optional[bool] = None, |
|
|
) -> Union[Tuple, CausalLMOutputWithPast]: |
|
|
raise NotImplementedError('forward') |
|
|
return_dict = return_dict if return_dict is not None else self.config.use_return_dict |
|
|
|
|
|
image_flags = image_flags.squeeze(-1) |
|
|
input_embeds = self.language_model.get_input_embeddings()(input_ids).clone() |
|
|
|
|
|
vit_embeds = self.extract_feature(pixel_values) |
|
|
vit_embeds = vit_embeds[image_flags == 1] |
|
|
|
|
|
B, N, C = input_embeds.shape |
|
|
input_embeds = input_embeds.reshape(B * N, C) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
input_ids = input_ids.reshape(B * N) |
|
|
selected = (input_ids == self.img_context_token_id) |
|
|
try: |
|
|
input_embeds[selected] = input_embeds[selected] * 0.0 + vit_embeds.reshape(-1, C) |
|
|
except Exception as e: |
|
|
vit_embeds = vit_embeds.reshape(-1, C) |
|
|
print(f'warning: {e}, input_embeds[selected].shape={input_embeds[selected].shape}, ' |
|
|
f'vit_embeds.shape={vit_embeds.shape}') |
|
|
n_token = min(selected.sum(), vit_embeds.size(0)) |
|
|
input_embeds[selected][:n_token] = input_embeds[selected][:n_token] * 0.0 + vit_embeds[:n_token] |
|
|
|
|
|
input_embeds = input_embeds.reshape(B, N, C) |
|
|
|
|
|
outputs = self.language_model( |
|
|
inputs_embeds=input_embeds, |
|
|
attention_mask=attention_mask, |
|
|
position_ids=position_ids, |
|
|
past_key_values=past_key_values, |
|
|
use_cache=use_cache, |
|
|
output_attentions=output_attentions, |
|
|
output_hidden_states=output_hidden_states, |
|
|
return_dict=return_dict, |
|
|
) |
|
|
logits = outputs.logits |
|
|
|
|
|
loss = None |
|
|
if labels is not None: |
|
|
|
|
|
shift_logits = logits[..., :-1, :].contiguous() |
|
|
shift_labels = labels[..., 1:].contiguous() |
|
|
|
|
|
loss_fct = CrossEntropyLoss() |
|
|
shift_logits = shift_logits.view(-1, self.language_model.config.vocab_size) |
|
|
shift_labels = shift_labels.view(-1) |
|
|
|
|
|
shift_labels = shift_labels.to(shift_logits.device) |
|
|
loss = loss_fct(shift_logits, shift_labels) |
|
|
|
|
|
if not return_dict: |
|
|
output = (logits,) + outputs[1:] |
|
|
return (loss,) + output if loss is not None else output |
|
|
|
|
|
return CausalLMOutputWithPast( |
|
|
loss=loss, |
|
|
logits=logits, |
|
|
past_key_values=outputs.past_key_values, |
|
|
hidden_states=outputs.hidden_states, |
|
|
attentions=outputs.attentions, |
|
|
) |
|
|
|
|
|
def extract_feature(self, pixel_values, grid_hw=None): |
|
|
|
|
|
return self.vision_model(pixel_values=pixel_values, |
|
|
output_hidden_states=False, |
|
|
return_dict=True, |
|
|
grid_hw=grid_hw).last_hidden_state |
|
|
|
|
|
def batch_chat(self, tokenizer, pixel_values, questions, generation_config, num_patches_list=None, |
|
|
history=None, return_history=False, IMG_START_TOKEN='<img>', IMG_END_TOKEN='</img>', |
|
|
IMG_CONTEXT_TOKEN='<IMG_CONTEXT>', verbose=False, image_counts=None): |
|
|
raise NotImplementedError('batch_chat') |
|
|
if history is not None or return_history: |
|
|
print('Now multi-turn chat is not supported in batch_chat.') |
|
|
raise NotImplementedError |
|
|
|
|
|
if image_counts is not None: |
|
|
num_patches_list = image_counts |
|
|
print('Warning: `image_counts` is deprecated. Please use `num_patches_list` instead.') |
|
|
|
|
|
img_context_token_id = tokenizer.convert_tokens_to_ids(IMG_CONTEXT_TOKEN) |
|
|
self.img_context_token_id = img_context_token_id |
|
|
|
|
|
if verbose and pixel_values is not None: |
|
|
image_bs = pixel_values.shape[0] |
|
|
print(f'dynamic ViT batch size: {image_bs}') |
|
|
|
|
|
queries = [] |
|
|
for idx, num_patches in enumerate(num_patches_list): |
|
|
question = questions[idx] |
|
|
if pixel_values is not None and '<image>' not in question: |
|
|
question = '<image>\n' + question |
|
|
template = get_conv_template(self.template) |
|
|
template.system_message = self.system_message |
|
|
template.append_message(template.roles[0], question) |
|
|
template.append_message(template.roles[1], None) |
|
|
query = template.get_prompt() |
|
|
|
|
|
image_tokens = IMG_START_TOKEN + IMG_CONTEXT_TOKEN + IMG_END_TOKEN |
|
|
query = query.replace('<image>', image_tokens, 1) |
|
|
queries.append(query) |
|
|
|
|
|
tokenizer.padding_side = 'left' |
|
|
model_inputs = tokenizer(queries, return_tensors='pt', padding=True) |
|
|
input_ids = model_inputs['input_ids'].to(self.device) |
|
|
attention_mask = model_inputs['attention_mask'].to(self.device) |
|
|
eos_token_id = tokenizer.convert_tokens_to_ids(template.sep.strip()) |
|
|
generation_config['eos_token_id'] = eos_token_id |
|
|
generation_output = self.generate( |
|
|
pixel_values=pixel_values, |
|
|
input_ids=input_ids, |
|
|
attention_mask=attention_mask, |
|
|
**generation_config |
|
|
) |
|
|
responses = tokenizer.batch_decode(generation_output, skip_special_tokens=True) |
|
|
responses = [response.split(template.sep.strip())[0].strip() for response in responses] |
|
|
return responses |
|
|
|
|
|
def chat(self, tokenizer, pixel_values, question, generation_config, history=None, return_history=False, grid_hw=None, |
|
|
IMG_START_TOKEN='<img>', IMG_END_TOKEN='</img>', IMG_CONTEXT_TOKEN='<IMG_CONTEXT>', verbose=False): |
|
|
|
|
|
if history is None and pixel_values is not None and '<image>' not in question: |
|
|
question = '<image>\n' + question |
|
|
|
|
|
img_context_token_id = tokenizer.convert_tokens_to_ids(IMG_CONTEXT_TOKEN) |
|
|
self.img_context_token_id = img_context_token_id |
|
|
self.img_start_token_id = tokenizer.convert_tokens_to_ids(IMG_START_TOKEN) |
|
|
|
|
|
template = get_conv_template(self.template) |
|
|
template.system_message = self.system_message |
|
|
eos_token_id = tokenizer.convert_tokens_to_ids(template.sep.strip()) |
|
|
|
|
|
history = [] if history is None else history |
|
|
for (old_question, old_answer) in history: |
|
|
template.append_message(template.roles[0], old_question) |
|
|
template.append_message(template.roles[1], old_answer) |
|
|
template.append_message(template.roles[0], question) |
|
|
template.append_message(template.roles[1], None) |
|
|
query = template.get_prompt() |
|
|
|
|
|
if verbose and pixel_values is not None: |
|
|
print(f'dynamic image size: {grid_hw * self.patch_size}') |
|
|
|
|
|
for i in range(grid_hw.shape[0]): |
|
|
num_patch_token = int(grid_hw[i, 0] * grid_hw[i, 1] * self.downsample_ratio**2) |
|
|
image_tokens = IMG_START_TOKEN + IMG_CONTEXT_TOKEN * num_patch_token + IMG_END_TOKEN |
|
|
query = query.replace('<image>', image_tokens, 1) |
|
|
|
|
|
model_inputs = tokenizer(query, return_tensors='pt') |
|
|
input_ids = model_inputs['input_ids'].to(self.device) |
|
|
attention_mask = model_inputs['attention_mask'].to(self.device) |
|
|
generation_config['eos_token_id'] = eos_token_id |
|
|
generation_output = self.generate( |
|
|
pixel_values=pixel_values, |
|
|
input_ids=input_ids, |
|
|
grid_hw=grid_hw, |
|
|
attention_mask=attention_mask, |
|
|
**generation_config |
|
|
) |
|
|
response = tokenizer.batch_decode(generation_output, skip_special_tokens=True)[0] |
|
|
response = response.split(template.sep.strip())[0].strip() |
|
|
history.append((question, response)) |
|
|
if return_history: |
|
|
return response, history |
|
|
else: |
|
|
query_to_print = query.replace(IMG_CONTEXT_TOKEN, '') |
|
|
query_to_print = query_to_print.replace(f'{IMG_START_TOKEN}{IMG_END_TOKEN}', '<image>') |
|
|
if verbose: |
|
|
print(query_to_print, response) |
|
|
return response |
|
|
|
|
|
@torch.no_grad() |
|
|
def generate( |
|
|
self, |
|
|
pixel_values: Optional[torch.FloatTensor] = None, |
|
|
input_ids: Optional[torch.FloatTensor] = None, |
|
|
grid_hw: Optional[torch.LongTensor] = None, |
|
|
attention_mask: Optional[torch.LongTensor] = None, |
|
|
visual_features: Optional[torch.FloatTensor] = None, |
|
|
generation_config: Optional[GenerationConfig] = None, |
|
|
output_hidden_states: Optional[bool] = None, |
|
|
**generate_kwargs, |
|
|
) -> torch.LongTensor: |
|
|
assert input_ids.shape[0] == 1 |
|
|
assert self.img_context_token_id is not None |
|
|
indexes = self.get_thw_indexes(input_ids[0], grid_hw) |
|
|
if pixel_values is not None: |
|
|
if visual_features is not None: |
|
|
vit_embeds = visual_features |
|
|
else: |
|
|
vit_embeds = self.extract_feature(pixel_values, grid_hw=grid_hw) |
|
|
|
|
|
input_embeds = self.language_model.get_input_embeddings()(input_ids) |
|
|
B, N, C = input_embeds.shape |
|
|
input_embeds = input_embeds.reshape(B * N, C) |
|
|
|
|
|
input_ids = input_ids.reshape(B * N) |
|
|
selected = (input_ids == self.img_context_token_id) |
|
|
assert selected.sum() != 0 |
|
|
input_embeds[selected] = vit_embeds.reshape(-1, C).to(input_embeds.device) |
|
|
|
|
|
input_embeds = input_embeds.reshape(B, N, C) |
|
|
else: |
|
|
input_embeds = self.language_model.get_input_embeddings()(input_ids) |
|
|
|
|
|
outputs = self.language_model.generate( |
|
|
inputs_embeds=input_embeds, |
|
|
indexes=indexes, |
|
|
attention_mask=attention_mask, |
|
|
generation_config=generation_config, |
|
|
output_hidden_states=output_hidden_states, |
|
|
use_cache=True, |
|
|
**generate_kwargs, |
|
|
) |
|
|
|
|
|
return outputs |
|
|
|
|
|
@property |
|
|
def lm_head(self): |
|
|
return self.language_model.get_output_embeddings() |
|
|
|
|
|
def get_output_embeddings(self): |
|
|
return self.language_model.get_output_embeddings() |
|
|
|
|
|
def get_input_embeddings(self): |
|
|
return self.language_model.get_input_embeddings() |
|
|
|
|
|
def set_input_embeddings(self, value): |
|
|
return self.language_model.set_input_embeddings(value) |
|
|
|
|
|
def set_output_embeddings(self, value): |
|
|
return self.language_model.set_output_embeddings(value) |
|
|
|
|
|
def get_thw_indexes(self, input_ids, grid_hw): |
|
|
img_start_shift = torch.cat([torch.zeros(1, dtype=torch.long).to(input_ids.device), |
|
|
(input_ids == self.img_start_token_id).long()], dim=0)[:-1] |
|
|
not_img_token = (input_ids != self.img_context_token_id).long() |
|
|
t_indexes = ((img_start_shift + not_img_token).cumsum(0) - 1) |
|
|
h_indexes = torch.zeros_like(t_indexes).to(t_indexes.device) |
|
|
w_indexes = torch.zeros_like(t_indexes).to(t_indexes.device) |
|
|
|
|
|
selected = (input_ids == self.img_context_token_id) |
|
|
if selected.long().sum() > 0: |
|
|
abs_pos_w, abs_pos_h = build_abs_positions_from_grid_hw( |
|
|
grid_hw // int(1 / self.downsample_ratio), device=t_indexes.device) |
|
|
h_indexes[selected] = abs_pos_h.to(t_indexes.device, t_indexes.dtype) |
|
|
w_indexes[selected] = abs_pos_w.to(t_indexes.device, t_indexes.dtype) |
|
|
return torch.stack([t_indexes, h_indexes, w_indexes], dim=0) |
|
|
|