|
|
import torch |
|
|
import accelerate |
|
|
|
|
|
def get_op_by_name(module, op_name): |
|
|
|
|
|
for name, m in module.named_modules(): |
|
|
if name == op_name: |
|
|
return m |
|
|
raise ValueError(f"Cannot find op {op_name} in module {module}") |
|
|
|
|
|
|
|
|
def set_op_by_name(layer, name, new_module): |
|
|
levels = name.split(".") |
|
|
if len(levels) > 1: |
|
|
mod_ = layer |
|
|
for l_idx in range(len(levels) - 1): |
|
|
if levels[l_idx].isdigit(): |
|
|
mod_ = mod_[int(levels[l_idx])] |
|
|
else: |
|
|
mod_ = getattr(mod_, levels[l_idx]) |
|
|
setattr(mod_, levels[-1], new_module) |
|
|
else: |
|
|
setattr(layer, name, new_module) |
|
|
|
|
|
|
|
|
def get_op_name(module, op): |
|
|
|
|
|
for name, m in module.named_modules(): |
|
|
if m is op: |
|
|
return name |
|
|
raise ValueError(f"Cannot find op {op} in module {module}") |
|
|
|
|
|
|
|
|
def append_str_prefix(x, prefix): |
|
|
if isinstance(x, str): |
|
|
return prefix + x |
|
|
elif isinstance(x, tuple): |
|
|
return tuple([append_str_prefix(y, prefix) for y in x]) |
|
|
elif isinstance(x, list): |
|
|
return [append_str_prefix(y, prefix) for y in x] |
|
|
else: |
|
|
return x |
|
|
|
|
|
|
|
|
|
|
|
def get_module_by_name_suffix(model, module_name: str): |
|
|
for name, module in model.named_modules(): |
|
|
if name.endswith(module_name): |
|
|
return module |
|
|
|
|
|
|
|
|
def simple_dispatch_model(model, device_map): |
|
|
from accelerate.hooks import add_hook_to_module, AlignDevicesHook |
|
|
|
|
|
if "" in device_map: |
|
|
d = device_map[""] |
|
|
model = model.to(torch.device(d)) |
|
|
model.hf_device_map = device_map |
|
|
return model |
|
|
|
|
|
tied_params = accelerate.utils.modeling.find_tied_parameters(model) |
|
|
if set(device_map.values()) == {"cpu"} or set(device_map.values()) == { |
|
|
"cpu", |
|
|
"disk", |
|
|
}: |
|
|
main_device = "cpu" |
|
|
else: |
|
|
main_device = [d for d in device_map.values() if d not in ["cpu", "disk"]][0] |
|
|
|
|
|
cpu_offload_group = [(n, d) for n, d in device_map.items() if d == "cpu"] |
|
|
prev_hook = None |
|
|
for idx, (n, d) in enumerate(cpu_offload_group): |
|
|
m = get_module_by_name_suffix(model, n) |
|
|
_, prev_hook = accelerate.cpu_offload_with_hook( |
|
|
m, execution_device=main_device, prev_module_hook=prev_hook |
|
|
) |
|
|
|
|
|
if len(cpu_offload_group) > 1: |
|
|
get_module_by_name_suffix( |
|
|
model, cpu_offload_group[0][0] |
|
|
)._hf_hook.prev_module_hook = prev_hook |
|
|
|
|
|
for n, d in device_map.items(): |
|
|
m = get_module_by_name_suffix(model, n) |
|
|
if d != "cpu": |
|
|
d = torch.device(d) |
|
|
hook = AlignDevicesHook(d, io_same_device=True, place_submodules=True) |
|
|
add_hook_to_module(m, hook) |
|
|
accelerate.utils.modeling.retie_parameters(model, tied_params) |
|
|
model.hf_device_map = device_map |
|
|
|
|
|
return model |
|
|
|