Upload folder using huggingface_hub
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitignore +167 -0
- .gitmodules +6 -0
- .vscode/settings.json +6 -0
- LICENSE +21 -0
- README.md +181 -0
- configs/internvl2/Eval/eval.yaml +21 -0
- configs/internvl2/MBQ_search/26b_weight_activation.yaml +26 -0
- configs/internvl2/MBQ_search/26b_weight_only.yaml +26 -0
- configs/internvl2/MBQ_search/8b_weight_activation.yaml +26 -0
- configs/internvl2/MBQ_search/8b_weight_only.yaml +26 -0
- configs/llava_onevision/Eval/eval.yaml +21 -0
- configs/llava_onevision/MBQ_search/72b_weight_activation.yaml +26 -0
- configs/llava_onevision/MBQ_search/72b_weight_only.yaml +26 -0
- configs/llava_onevision/MBQ_search/7b_weight_activation.yaml +26 -0
- configs/llava_onevision/MBQ_search/7b_weight_only.yaml +26 -0
- configs/qwen2_vl/Eval/eval.yaml +21 -0
- configs/qwen2_vl/MBQ_search/72b_weight_activation.yaml +26 -0
- configs/qwen2_vl/MBQ_search/72b_weight_only.yaml +26 -0
- configs/qwen2_vl/MBQ_search/7b_weight_activation.yaml +26 -0
- configs/qwen2_vl/MBQ_search/7b_weight_only.yaml +26 -0
- figures/logo_Infinigence-ai.png +3 -0
- figures/logo_nicsefc.jpg +3 -0
- main.py +567 -0
- main_quant.py +139 -0
- qmllm/calibration/__init__.py +0 -0
- qmllm/calibration/coco_vl.py +100 -0
- qmllm/calibration/pileval.py +44 -0
- qmllm/datasets/__init__.py +0 -0
- qmllm/datasets/multimodal_dataset.py +50 -0
- qmllm/methods/__init__.py +0 -0
- qmllm/methods/awq/__init__.py +0 -0
- qmllm/methods/awq/entry.py +40 -0
- qmllm/methods/awq/quantize/__init__.py +0 -0
- qmllm/methods/awq/quantize/auto_scale.py +572 -0
- qmllm/methods/awq/quantize/pre_quant.py +207 -0
- qmllm/methods/awq/quantize/qmodule.py +31 -0
- qmllm/methods/awq/quantize/quantizer.py +76 -0
- qmllm/methods/mbq/__init__.py +0 -0
- qmllm/methods/mbq/entry.py +58 -0
- qmllm/methods/mbq/quantize/__init__.py +0 -0
- qmllm/methods/mbq/quantize/auto_scale.py +664 -0
- qmllm/methods/mbq/quantize/auto_scale_distort.py +784 -0
- qmllm/methods/mbq/quantize/auto_scale_wa.py +681 -0
- qmllm/methods/mbq/quantize/auto_scale_wa_distort.py +840 -0
- qmllm/methods/mbq/quantize/pre_quant.py +481 -0
- qmllm/methods/mbq/quantize/qmodule.py +31 -0
- qmllm/methods/mbq/quantize/quantizer.py +102 -0
- qmllm/methods/rtn/__init__.py +0 -0
- qmllm/methods/rtn/entry.py +21 -0
- qmllm/methods/rtn/quantizer.py +85 -0
.gitignore
ADDED
|
@@ -0,0 +1,167 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# CUDA
|
| 2 |
+
*.egg-info*
|
| 3 |
+
|
| 4 |
+
# cache
|
| 5 |
+
/llm-awq/awq_cache/
|
| 6 |
+
# Byte-compiled / optimized / DLL files
|
| 7 |
+
__pycache__/
|
| 8 |
+
*.py[cod]
|
| 9 |
+
*$py.class
|
| 10 |
+
|
| 11 |
+
# C extensions
|
| 12 |
+
*.so
|
| 13 |
+
|
| 14 |
+
# Distribution / packaging
|
| 15 |
+
.Python
|
| 16 |
+
build/
|
| 17 |
+
develop-eggs/
|
| 18 |
+
dist/
|
| 19 |
+
downloads/
|
| 20 |
+
eggs/
|
| 21 |
+
.eggs/
|
| 22 |
+
lib/
|
| 23 |
+
lib64/
|
| 24 |
+
parts/
|
| 25 |
+
sdist/
|
| 26 |
+
var/
|
| 27 |
+
wheels/
|
| 28 |
+
share/python-wheels/
|
| 29 |
+
*.egg-info/
|
| 30 |
+
.installed.cfg
|
| 31 |
+
*.egg
|
| 32 |
+
MANIFEST
|
| 33 |
+
|
| 34 |
+
# PyInstaller
|
| 35 |
+
# Usually these files are written by a python script from a template
|
| 36 |
+
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
| 37 |
+
*.manifest
|
| 38 |
+
*.spec
|
| 39 |
+
|
| 40 |
+
# Installer logs
|
| 41 |
+
pip-log.txt
|
| 42 |
+
pip-delete-this-directory.txt
|
| 43 |
+
|
| 44 |
+
# Unit test / coverage reports
|
| 45 |
+
htmlcov/
|
| 46 |
+
.tox/
|
| 47 |
+
.nox/
|
| 48 |
+
.coverage
|
| 49 |
+
.coverage.*
|
| 50 |
+
.cache
|
| 51 |
+
nosetests.xml
|
| 52 |
+
coverage.xml
|
| 53 |
+
*.cover
|
| 54 |
+
*.py,cover
|
| 55 |
+
.hypothesis/
|
| 56 |
+
.pytest_cache/
|
| 57 |
+
cover/
|
| 58 |
+
|
| 59 |
+
# Translations
|
| 60 |
+
*.mo
|
| 61 |
+
*.pot
|
| 62 |
+
|
| 63 |
+
# Django stuff:
|
| 64 |
+
*.log
|
| 65 |
+
local_settings.py
|
| 66 |
+
db.sqlite3
|
| 67 |
+
db.sqlite3-journal
|
| 68 |
+
|
| 69 |
+
# Flask stuff:
|
| 70 |
+
instance/
|
| 71 |
+
.webassets-cache
|
| 72 |
+
|
| 73 |
+
# Scrapy stuff:
|
| 74 |
+
.scrapy
|
| 75 |
+
|
| 76 |
+
# Sphinx documentation
|
| 77 |
+
docs/_build/
|
| 78 |
+
|
| 79 |
+
# PyBuilder
|
| 80 |
+
.pybuilder/
|
| 81 |
+
target/
|
| 82 |
+
|
| 83 |
+
# Jupyter Notebook
|
| 84 |
+
.ipynb_checkpoints
|
| 85 |
+
|
| 86 |
+
# IPython
|
| 87 |
+
profile_default/
|
| 88 |
+
ipython_config.py
|
| 89 |
+
|
| 90 |
+
# pyenv
|
| 91 |
+
# For a library or package, you might want to ignore these files since the code is
|
| 92 |
+
# intended to run in multiple environments; otherwise, check them in:
|
| 93 |
+
# .python-version
|
| 94 |
+
|
| 95 |
+
# pipenv
|
| 96 |
+
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
| 97 |
+
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
| 98 |
+
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
| 99 |
+
# install all needed dependencies.
|
| 100 |
+
#Pipfile.lock
|
| 101 |
+
|
| 102 |
+
# poetry
|
| 103 |
+
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
|
| 104 |
+
# This is especially recommended for binary packages to ensure reproducibility, and is more
|
| 105 |
+
# commonly ignored for libraries.
|
| 106 |
+
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
|
| 107 |
+
#poetry.lock
|
| 108 |
+
|
| 109 |
+
# pdm
|
| 110 |
+
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
|
| 111 |
+
#pdm.lock
|
| 112 |
+
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
|
| 113 |
+
# in version control.
|
| 114 |
+
# https://pdm.fming.dev/latest/usage/project/#working-with-version-control
|
| 115 |
+
.pdm.toml
|
| 116 |
+
.pdm-python
|
| 117 |
+
.pdm-build/
|
| 118 |
+
|
| 119 |
+
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
|
| 120 |
+
__pypackages__/
|
| 121 |
+
|
| 122 |
+
# Celery stuff
|
| 123 |
+
celerybeat-schedule
|
| 124 |
+
celerybeat.pid
|
| 125 |
+
|
| 126 |
+
# SageMath parsed files
|
| 127 |
+
*.sage.py
|
| 128 |
+
|
| 129 |
+
# Environments
|
| 130 |
+
.env
|
| 131 |
+
.venv
|
| 132 |
+
env/
|
| 133 |
+
venv/
|
| 134 |
+
ENV/
|
| 135 |
+
env.bak/
|
| 136 |
+
venv.bak/
|
| 137 |
+
|
| 138 |
+
# Spyder project settings
|
| 139 |
+
.spyderproject
|
| 140 |
+
.spyproject
|
| 141 |
+
|
| 142 |
+
# Rope project settings
|
| 143 |
+
.ropeproject
|
| 144 |
+
|
| 145 |
+
# mkdocs documentation
|
| 146 |
+
/site
|
| 147 |
+
|
| 148 |
+
# mypy
|
| 149 |
+
.mypy_cache/
|
| 150 |
+
.dmypy.json
|
| 151 |
+
dmypy.json
|
| 152 |
+
|
| 153 |
+
# Pyre type checker
|
| 154 |
+
.pyre/
|
| 155 |
+
|
| 156 |
+
# pytype static type analyzer
|
| 157 |
+
.pytype/
|
| 158 |
+
|
| 159 |
+
# Cython debug symbols
|
| 160 |
+
cython_debug/
|
| 161 |
+
|
| 162 |
+
# PyCharm
|
| 163 |
+
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
|
| 164 |
+
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
|
| 165 |
+
# and can be added to the global gitignore or merged into this file. For a more nuclear
|
| 166 |
+
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
|
| 167 |
+
#.idea/
|
.gitmodules
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[submodule "3rdparty/lmms-eval"]
|
| 2 |
+
path = 3rdparty/lmms-eval
|
| 3 |
+
url = git@github.com:LSY-noya/lmms-eval.git
|
| 4 |
+
[submodule "3rdparty/LLaVA-NeXT"]
|
| 5 |
+
path = 3rdparty/LLaVA-NeXT
|
| 6 |
+
url = git@github.com:LSY-noya/LLaVA-NeXT.git
|
.vscode/settings.json
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"python.analysis.extraPaths": [
|
| 3 |
+
"./3rdparty/lmms-eval",
|
| 4 |
+
"./3rdparty/LLaVA-NeXT",
|
| 5 |
+
]
|
| 6 |
+
}
|
LICENSE
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
MIT License
|
| 2 |
+
|
| 3 |
+
Copyright (c) 2024
|
| 4 |
+
|
| 5 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
| 6 |
+
of this software and associated documentation files (the "Software"), to deal
|
| 7 |
+
in the Software without restriction, including without limitation the rights
|
| 8 |
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
| 9 |
+
copies of the Software, and to permit persons to whom the Software is
|
| 10 |
+
furnished to do so, subject to the following conditions:
|
| 11 |
+
|
| 12 |
+
The above copyright notice and this permission notice shall be included in all
|
| 13 |
+
copies or substantial portions of the Software.
|
| 14 |
+
|
| 15 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
| 16 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
| 17 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
| 18 |
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
| 19 |
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
| 20 |
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
| 21 |
+
SOFTWARE.
|
README.md
ADDED
|
@@ -0,0 +1,181 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# MBQ: Modality-Balanced Quantization for Large Vision-Language Models [[paper]](https://arxiv.org/abs/2412.19509)
|
| 2 |
+
|
| 3 |
+
## Installation
|
| 4 |
+
|
| 5 |
+
1. Clone this repo
|
| 6 |
+
```bash
|
| 7 |
+
git clone --recurse-submodules git@github.com:thu-nics/MBQ.git
|
| 8 |
+
```
|
| 9 |
+
|
| 10 |
+
2. Create a conda env
|
| 11 |
+
```bash
|
| 12 |
+
conda create -n qmllm python=3.10
|
| 13 |
+
```
|
| 14 |
+
|
| 15 |
+
3. Install packages and 3rdparty repos.
|
| 16 |
+
```bash
|
| 17 |
+
# Install LLaVA-NeXT
|
| 18 |
+
cd ./3rdparty/LLaVA-NeXT
|
| 19 |
+
pip install -e .
|
| 20 |
+
|
| 21 |
+
# Install lmms-eval
|
| 22 |
+
cd ./3rdparty/lmms-eval
|
| 23 |
+
pip install -e .
|
| 24 |
+
|
| 25 |
+
# Install qmllm
|
| 26 |
+
pip install -r requirements.txt
|
| 27 |
+
pip install -e .
|
| 28 |
+
```
|
| 29 |
+
|
| 30 |
+
## Apply model quantization in `qmllm` package
|
| 31 |
+
### Command-line Interface
|
| 32 |
+
|
| 33 |
+
Quantization search for MLLMs is executed based on `main_quant.py`. A variety of arguments are available to configure the quantization search process. We also support using YAML files for parameter configuration, you can refer to [yaml configs](./configs) to directly use and adjust parameters, or create your own custom configuration.
|
| 34 |
+
|
| 35 |
+
1. Model arguments
|
| 36 |
+
* `--model` : Select which model type is processed during quantization search. Must be a string corresponding to the name of the model type.
|
| 37 |
+
- only support `internvl2`, `llava_onevision`, `llava`, `qwen2_vl` now.
|
| 38 |
+
* `--model_args` : Control parameters passed to the model constructor. Accepts a string containing model path", for example `--model_args pretrained=OpenGVLab/InternVL2-8B`.
|
| 39 |
+
|
| 40 |
+
2. Calibration arguments
|
| 41 |
+
* `--calib_data` : Select which calibration data type is used during quantization search.
|
| 42 |
+
- only support `pileval` and `coco` now.
|
| 43 |
+
* `--n_samples` : The number of the samples used in quantization search.
|
| 44 |
+
* `--data_path` : Accept a string of the dataset path.
|
| 45 |
+
- for `pileval` , we use `mit-han-lab/pile-val-backup`.
|
| 46 |
+
- for `coco` , the data need to be a JSON or JSONL file, you can refer to [sharegpt4v](https://github.com/InternLM/InternLM-XComposer/blob/main/projects/ShareGPT4V/docs/Data.md#prepare-images) for data preparation.
|
| 47 |
+
* `--image_folder` : Accept a string of the image folder, you can refer to [sharegpt4v](https://github.com/InternLM/InternLM-XComposer/blob/main/projects/ShareGPT4V/docs/Data.md#prepare-images) for data preparation.
|
| 48 |
+
* `--few_shot_format` : Organize the calibration data in an interleaved format, currently by simply concatenating two samples.
|
| 49 |
+
- this option is valid only when `--calib_data=coco`.
|
| 50 |
+
* `--interleave_format` : Organize the calibration data with image-text pairs and pure text data, currently by simply insert 512 pure text token in two image-text pairs.
|
| 51 |
+
- this option is valid only when `--calib_data=coco`.
|
| 52 |
+
* `--text_data_path` : Accept a string of the pure text dataset path, this dataset will be used in interleave_format, we use `mit-han-lab/pile-val-backup`.
|
| 53 |
+
|
| 54 |
+
3. Quantization arguments
|
| 55 |
+
* `--method` : Select the quantization search type, support `mbq` , `awq` , `smoothquant` and `rtn`.
|
| 56 |
+
* `--run_process` : Specify this parameter to run the quantization search.
|
| 57 |
+
* `--w_bit`: Specify the weight bit.
|
| 58 |
+
* `--w_group`: Specify the group size in `weight-only per-group` quantization.
|
| 59 |
+
* `--a_bit`: Specify the activation bit.
|
| 60 |
+
* `--alpha`: The hyperparameter of Smoothquant.
|
| 61 |
+
* `--reweight`: Specify this parameter to use gradient to reweight the loss during quantization search.
|
| 62 |
+
* `--distort`: Specify this parameter to use distort feature map during quantization search.
|
| 63 |
+
* `--loss_mode`: Select the loss type during quantization search, support `mae` , `mse`.
|
| 64 |
+
* `--scale_path`: The path for saving quantization search results.
|
| 65 |
+
* `--pseudo_quant`: Specify this parameter to perform pseudo quantization for the model.
|
| 66 |
+
|
| 67 |
+
### Run Quantization
|
| 68 |
+
* We support using YAML files for parameter configuration, you can use it as follows:
|
| 69 |
+
```bash
|
| 70 |
+
python3 -W ignore main_quant.py \
|
| 71 |
+
--config configs/internvl2/MBQ_search/8b_weight_only.yaml
|
| 72 |
+
```
|
| 73 |
+
* For quantization, you should specify `--run_process` in the command and provide the appropriate `data path` and `quantization config`.
|
| 74 |
+
* The quantization search results will be stored in `scale_path`, and we use the results to perform quantization.
|
| 75 |
+
|
| 76 |
+
1. Weight-only Quantization with MBQ
|
| 77 |
+
```bash
|
| 78 |
+
python3 -W ignore main_quant.py \
|
| 79 |
+
--model internvl2
|
| 80 |
+
--model_args pretrained="OpenGVLab/InternVL2-8B" \
|
| 81 |
+
--calib_data coco \
|
| 82 |
+
--data_path "your/data/path/" \
|
| 83 |
+
--image_folder "your/image/folder" \
|
| 84 |
+
--n_samples 128 \
|
| 85 |
+
--interleave_format \
|
| 86 |
+
--method mbq \
|
| 87 |
+
--run_process \
|
| 88 |
+
--w_bit 4 \
|
| 89 |
+
--w_group 128 \
|
| 90 |
+
--reweight \
|
| 91 |
+
--loss_mode mae \
|
| 92 |
+
--scale_path "scale_cache/mbq/internvl2_w4g128.pt"
|
| 93 |
+
```
|
| 94 |
+
2. Weight-Activation Quantization with MBQ
|
| 95 |
+
```bash
|
| 96 |
+
python3 -W ignore main_quant.py \
|
| 97 |
+
--model internvl2
|
| 98 |
+
--model_args pretrained="OpenGVLab/InternVL2-8B" \
|
| 99 |
+
--calib_data coco \
|
| 100 |
+
--data_path "your/data/path/" \
|
| 101 |
+
--image_folder "your/image/folder" \
|
| 102 |
+
--n_samples 128 \
|
| 103 |
+
--method mbq \
|
| 104 |
+
--run_process \
|
| 105 |
+
--w_bit 4 \
|
| 106 |
+
--a_bit 8 \
|
| 107 |
+
--reweight \
|
| 108 |
+
--distort \
|
| 109 |
+
--loss_mode mae \
|
| 110 |
+
--scale_path "scale_cache/mbq/internvl2_w4a8.pt"
|
| 111 |
+
```
|
| 112 |
+
|
| 113 |
+
### Run Evaluation
|
| 114 |
+
* We support using YAML files for parameter configuration, you can use it as follows:
|
| 115 |
+
```bash
|
| 116 |
+
python3 -W ignore main.py \
|
| 117 |
+
--config configs/internvl2/Eval/eval.yaml
|
| 118 |
+
```
|
| 119 |
+
* For evaluation, you should specify `--pseudo_quant` in the command and provide the appropriate `scale path` and `quantization config`.
|
| 120 |
+
|
| 121 |
+
1. Evaluation with weight-only quantization
|
| 122 |
+
```bash
|
| 123 |
+
python3 -W ignore main.py \
|
| 124 |
+
--model internvl2
|
| 125 |
+
--model_args pretrained="OpenGVLab/InternVL2-8B" \
|
| 126 |
+
--tasks mmmu \
|
| 127 |
+
--batch_size 1 \
|
| 128 |
+
--log_samples \
|
| 129 |
+
--log_samples_suffix mmmu \
|
| 130 |
+
--method mbq \
|
| 131 |
+
--pseudo_quant \
|
| 132 |
+
--w_bit 4 \
|
| 133 |
+
--w_group 128 \
|
| 134 |
+
--output_path "your/output/path" \
|
| 135 |
+
--scale_path "scale_cache/mbq/internvl2_w4g128.pt"
|
| 136 |
+
```
|
| 137 |
+
|
| 138 |
+
2. Evaluation with weight-activation quantization
|
| 139 |
+
```bash
|
| 140 |
+
python3 -W ignore main.py \
|
| 141 |
+
--model internvl2
|
| 142 |
+
--model_args pretrained="OpenGVLab/InternVL2-8B" \
|
| 143 |
+
--tasks mmmu \
|
| 144 |
+
--batch_size 1 \
|
| 145 |
+
--log_samples \
|
| 146 |
+
--log_samples_suffix mmmu \
|
| 147 |
+
--method mbq \
|
| 148 |
+
--pseudo_quant \
|
| 149 |
+
--w_bit 4 \
|
| 150 |
+
--a_bit 8 \
|
| 151 |
+
--output_path "your/output/path" \
|
| 152 |
+
--scale_path "scale_cache/mbq/internvl2_w4a8.pt"
|
| 153 |
+
```
|
| 154 |
+
|
| 155 |
+
## Citation
|
| 156 |
+
```
|
| 157 |
+
@misc{li2024mbq,
|
| 158 |
+
title={MBQ: Modality-Balanced Quantization for Large Vision-Language Models},
|
| 159 |
+
author={Shiyao Li and Yingchun Hu and Xuefei Ning and Xihui Liu and Ke Hong and Xiaotao Jia and Xiuhong Li and Yaqi Yan and Pei Ran and Guohao Dai and Shengen Yan and Huazhong Yang and Yu Wang},
|
| 160 |
+
year={2024},
|
| 161 |
+
eprint={2412.19509},
|
| 162 |
+
archivePrefix={arXiv},
|
| 163 |
+
primaryClass={cs.CV},
|
| 164 |
+
url={https://arxiv.org/abs/2412.19509},
|
| 165 |
+
}
|
| 166 |
+
```
|
| 167 |
+
|
| 168 |
+
## Contact Us
|
| 169 |
+
|
| 170 |
+
* Shiyao Li: lishiyao20@mails.tsinghua.edu.cn
|
| 171 |
+
* Yingchun Hu: huych@buaa.edu.cn
|
| 172 |
+
* Xuefei Ning: foxdoraame@gmail.com
|
| 173 |
+
* Xiaotao Jia: jiaxt@buaa.edu.cn
|
| 174 |
+
* Yu Wang: yu-wang@tsinghua.edu.cn
|
| 175 |
+
|
| 176 |
+
This work is maintained by [NICS-EFC Lab](https://nicsefc.ee.tsinghua.edu.cn/) (Tsinghua University) and [Infinigence-AI](https://www.infini-ai.com/) (Beijing China).
|
| 177 |
+
|
| 178 |
+
<p align="middle">
|
| 179 |
+
<img src="figures/logo_nicsefc.jpg" width="35%" hspace="30" />
|
| 180 |
+
<img src="figures/logo_Infinigence-ai.png" width="35%" hspace="30" />
|
| 181 |
+
</p>
|
configs/internvl2/Eval/eval.yaml
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# model args
|
| 2 |
+
model: internvl2
|
| 3 |
+
model_args: pretrained="OpenGVLab/InternVL2-8B"
|
| 4 |
+
|
| 5 |
+
# task args
|
| 6 |
+
tasks: mmmu_val
|
| 7 |
+
batch_size: 1
|
| 8 |
+
log_samples: True
|
| 9 |
+
log_samples_suffix: mmmu_val
|
| 10 |
+
|
| 11 |
+
# quantization config
|
| 12 |
+
method: mbq
|
| 13 |
+
run_process: False # You don't need to run scale search, load the scale directly
|
| 14 |
+
pseudo_quant: True # if True, conduct pseudo quantization on the model, otherwise the model will stay in fp16
|
| 15 |
+
scale_path: scale_cache/mbq/internvl2_8b_w3g128.pt
|
| 16 |
+
w_bit: 3
|
| 17 |
+
w_group: 128
|
| 18 |
+
|
| 19 |
+
# output args
|
| 20 |
+
output_path: /your/output/log/path
|
| 21 |
+
|
configs/internvl2/MBQ_search/26b_weight_activation.yaml
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# model args
|
| 2 |
+
model: internvl2
|
| 3 |
+
model_args: pretrained="OpenGVLab/InternVL2-26B",device_map=auto
|
| 4 |
+
|
| 5 |
+
# calibration data args
|
| 6 |
+
calib_data: coco
|
| 7 |
+
n_samples: 128
|
| 8 |
+
data_path: "your/coco/caption/path"
|
| 9 |
+
image_folder: "your/coco/image/folder"
|
| 10 |
+
few_shot_format: False # if True, concat two samples to simulate few shot
|
| 11 |
+
interleave_format: False # if True, insert pure text data between two image-text pairs to simulate interleave data
|
| 12 |
+
text_data_path: "" # You should specify this arg if you want to use interleave_format
|
| 13 |
+
|
| 14 |
+
# quantization config
|
| 15 |
+
method: mbq
|
| 16 |
+
run_process: True
|
| 17 |
+
w_bit: 4
|
| 18 |
+
a_bit: 8
|
| 19 |
+
distort: False # if True, use distort feature map during scale search process
|
| 20 |
+
reweight: True # if True, use avg gradient to reweight the loss of each modality
|
| 21 |
+
loss_mode: mae
|
| 22 |
+
|
| 23 |
+
# output args
|
| 24 |
+
scale_path: scale_cache/mbq/internvl2_26b_w4a8.pt
|
| 25 |
+
|
| 26 |
+
|
configs/internvl2/MBQ_search/26b_weight_only.yaml
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# model args
|
| 2 |
+
model: internvl2
|
| 3 |
+
model_args: pretrained="OpenGVLab/InternVL2-26B",device_map=auto
|
| 4 |
+
|
| 5 |
+
# calibration data args
|
| 6 |
+
calib_data: coco
|
| 7 |
+
n_samples: 128
|
| 8 |
+
data_path: "your/coco/caption/path"
|
| 9 |
+
image_folder: "your/coco/image/folder"
|
| 10 |
+
few_shot_format: False # if True, concat two samples to simulate few shot
|
| 11 |
+
interleave_format: False # if True, insert pure text data between two image-text pairs to simulate interleave data
|
| 12 |
+
text_data_path: "" # You should specify this arg if you want to use interleave_format
|
| 13 |
+
|
| 14 |
+
# quantization config
|
| 15 |
+
method: mbq
|
| 16 |
+
run_process: True
|
| 17 |
+
w_bit: 3
|
| 18 |
+
w_group: 128
|
| 19 |
+
distort: False # if True, use distort feature map during scale search process
|
| 20 |
+
reweight: True # if True, use avg gradient to reweight the loss of each modality
|
| 21 |
+
loss_mode: mae
|
| 22 |
+
|
| 23 |
+
# output args
|
| 24 |
+
scale_path: scale_cache/mbq/internvl2_26b_w3g128.pt
|
| 25 |
+
|
| 26 |
+
|
configs/internvl2/MBQ_search/8b_weight_activation.yaml
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# model args
|
| 2 |
+
model: internvl2
|
| 3 |
+
model_args: pretrained="OpenGVLab/InternVL2-8B"
|
| 4 |
+
|
| 5 |
+
# calibration data args
|
| 6 |
+
calib_data: coco
|
| 7 |
+
n_samples: 128
|
| 8 |
+
data_path: "your/coco/caption/path"
|
| 9 |
+
image_folder: "your/coco/image/folder"
|
| 10 |
+
few_shot_format: False # if True, concat two samples to simulate few shot
|
| 11 |
+
interleave_format: False # if True, insert pure text data between two image-text pairs to simulate interleave data
|
| 12 |
+
text_data_path: "" # You should specify this arg if you want to use interleave_format
|
| 13 |
+
|
| 14 |
+
# quantization config
|
| 15 |
+
method: mbq
|
| 16 |
+
run_process: True
|
| 17 |
+
w_bit: 4
|
| 18 |
+
a_bit: 8
|
| 19 |
+
distort: False # if True, use distort feature map during scale search process
|
| 20 |
+
reweight: True # if True, use avg gradient to reweight the loss of each modality
|
| 21 |
+
loss_mode: mae
|
| 22 |
+
|
| 23 |
+
# output args
|
| 24 |
+
scale_path: scale_cache/mbq/internvl2_8b_w4a8.pt
|
| 25 |
+
|
| 26 |
+
|
configs/internvl2/MBQ_search/8b_weight_only.yaml
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# model args
|
| 2 |
+
model: internvl2
|
| 3 |
+
model_args: pretrained="OpenGVLab/InternVL2-8B"
|
| 4 |
+
|
| 5 |
+
# calibration data args
|
| 6 |
+
calib_data: coco
|
| 7 |
+
n_samples: 128
|
| 8 |
+
data_path: "your/coco/caption/path"
|
| 9 |
+
image_folder: "your/coco/image/folder"
|
| 10 |
+
few_shot_format: False # if True, concat two samples to simulate few shot
|
| 11 |
+
interleave_format: False # if True, insert pure text data between two image-text pairs to simulate interleave data
|
| 12 |
+
text_data_path: "" # You should specify this arg if you want to use interleave_format
|
| 13 |
+
|
| 14 |
+
# quantization config
|
| 15 |
+
method: mbq
|
| 16 |
+
run_process: True
|
| 17 |
+
w_bit: 3
|
| 18 |
+
w_group: 128
|
| 19 |
+
distort: False # if True, use distort feature map during scale search process
|
| 20 |
+
reweight: True # if True, use avg gradient to reweight the loss of each modality
|
| 21 |
+
loss_mode: mae
|
| 22 |
+
|
| 23 |
+
# output args
|
| 24 |
+
scale_path: scale_cache/mbq/internvl2_8b_w3g128.pt
|
| 25 |
+
|
| 26 |
+
|
configs/llava_onevision/Eval/eval.yaml
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# model args
|
| 2 |
+
model: llava_onevision
|
| 3 |
+
model_args: pretrained="lmms-lab/llava-onevision-qwen2-7b-ov"
|
| 4 |
+
|
| 5 |
+
# task args
|
| 6 |
+
tasks: mmmu_val
|
| 7 |
+
batch_size: 1
|
| 8 |
+
log_samples: True
|
| 9 |
+
log_samples_suffix: mmmu_val
|
| 10 |
+
|
| 11 |
+
# quantization config
|
| 12 |
+
method: mbq
|
| 13 |
+
run_process: False # You don't need to run scale search, load the scale directly
|
| 14 |
+
pseudo_quant: True # if True, conduct pseudo quantization on the model, otherwise the model will stay in fp16
|
| 15 |
+
scale_path: scale_cache/mbq/llava_ov_7b_w3g128.pt
|
| 16 |
+
w_bit: 3
|
| 17 |
+
w_group: 128
|
| 18 |
+
|
| 19 |
+
# output args
|
| 20 |
+
output_path: /your/output/log/path
|
| 21 |
+
|
configs/llava_onevision/MBQ_search/72b_weight_activation.yaml
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# model args
|
| 2 |
+
model: llava_onevision
|
| 3 |
+
model_args: pretrained="lmms-lab/llava-onevision-qwen2-72b-ov-sft",device_map=auto
|
| 4 |
+
|
| 5 |
+
# calibration data args
|
| 6 |
+
calib_data: coco
|
| 7 |
+
n_samples: 128
|
| 8 |
+
data_path: "your/coco/caption/path"
|
| 9 |
+
image_folder: "your/coco/image/folder"
|
| 10 |
+
few_shot_format: False # if True, concat two samples to simulate few shot
|
| 11 |
+
interleave_format: False # if True, insert pure text data between two image-text pairs to simulate interleave data
|
| 12 |
+
text_data_path: "" # You should specify this arg if you want to use interleave_format
|
| 13 |
+
|
| 14 |
+
# quantization config
|
| 15 |
+
method: mbq
|
| 16 |
+
run_process: True
|
| 17 |
+
w_bit: 4
|
| 18 |
+
a_bit: 8
|
| 19 |
+
distort: False # if True, use distort feature map during scale search process
|
| 20 |
+
reweight: True # if True, use avg gradient to reweight the loss of each modality
|
| 21 |
+
loss_mode: mae
|
| 22 |
+
|
| 23 |
+
# output args
|
| 24 |
+
scale_path: scale_cache/mbq/llava_ov_72b_w4a8.pt
|
| 25 |
+
|
| 26 |
+
|
configs/llava_onevision/MBQ_search/72b_weight_only.yaml
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# model args
|
| 2 |
+
model: llava_onevision
|
| 3 |
+
model_args: pretrained="lmms-lab/llava-onevision-qwen2-72b-ov-sft",device_map=auto
|
| 4 |
+
|
| 5 |
+
# calibration data args
|
| 6 |
+
calib_data: coco
|
| 7 |
+
n_samples: 128
|
| 8 |
+
data_path: "your/coco/caption/path"
|
| 9 |
+
image_folder: "your/coco/image/folder"
|
| 10 |
+
few_shot_format: False # if True, concat two samples to simulate few shot
|
| 11 |
+
interleave_format: False # if True, insert pure text data between two image-text pairs to simulate interleave data
|
| 12 |
+
text_data_path: "" # You should specify this arg if you want to use interleave_format
|
| 13 |
+
|
| 14 |
+
# quantization config
|
| 15 |
+
method: mbq
|
| 16 |
+
run_process: True
|
| 17 |
+
w_bit: 3
|
| 18 |
+
w_group: 128
|
| 19 |
+
distort: False # if True, use distort feature map during scale search process
|
| 20 |
+
reweight: True # if True, use avg gradient to reweight the loss of each modality
|
| 21 |
+
loss_mode: mae
|
| 22 |
+
|
| 23 |
+
# output args
|
| 24 |
+
scale_path: scale_cache/mbq/llava_ov_72b_w3g128.pt
|
| 25 |
+
|
| 26 |
+
|
configs/llava_onevision/MBQ_search/7b_weight_activation.yaml
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# model args
|
| 2 |
+
model: llava_onevision
|
| 3 |
+
model_args: pretrained="lmms-lab/llava-onevision-qwen2-7b-ov"
|
| 4 |
+
|
| 5 |
+
# calibration data args
|
| 6 |
+
calib_data: coco
|
| 7 |
+
n_samples: 128
|
| 8 |
+
data_path: "your/coco/caption/path"
|
| 9 |
+
image_folder: "your/coco/image/folder"
|
| 10 |
+
few_shot_format: False # if True, concat two samples to simulate few shot
|
| 11 |
+
interleave_format: False # if True, insert pure text data between two image-text pairs to simulate interleave data
|
| 12 |
+
text_data_path: "" # You should specify this arg if you want to use interleave_format
|
| 13 |
+
|
| 14 |
+
# quantization config
|
| 15 |
+
method: mbq
|
| 16 |
+
run_process: True
|
| 17 |
+
w_bit: 4
|
| 18 |
+
a_bit: 8
|
| 19 |
+
distort: False # if True, use distort feature map during scale search process
|
| 20 |
+
reweight: True # if True, use avg gradient to reweight the loss of each modality
|
| 21 |
+
loss_mode: mae
|
| 22 |
+
|
| 23 |
+
# output args
|
| 24 |
+
scale_path: scale_cache/mbq/llava_ov_7b_w4a8.pt
|
| 25 |
+
|
| 26 |
+
|
configs/llava_onevision/MBQ_search/7b_weight_only.yaml
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# model args
|
| 2 |
+
model: llava_onevision
|
| 3 |
+
model_args: pretrained="lmms-lab/llava-onevision-qwen2-7b-ov"
|
| 4 |
+
|
| 5 |
+
# calibration data args
|
| 6 |
+
calib_data: coco
|
| 7 |
+
n_samples: 128
|
| 8 |
+
data_path: "your/coco/caption/path"
|
| 9 |
+
image_folder: "your/coco/image/folder"
|
| 10 |
+
few_shot_format: False # if True, concat two samples to simulate few shot
|
| 11 |
+
interleave_format: False # if True, insert pure text data between two image-text pairs to simulate interleave data
|
| 12 |
+
text_data_path: "" # You should specify this arg if you want to use interleave_format
|
| 13 |
+
|
| 14 |
+
# quantization config
|
| 15 |
+
method: mbq
|
| 16 |
+
run_process: True
|
| 17 |
+
w_bit: 3
|
| 18 |
+
w_group: 128
|
| 19 |
+
distort: False # if True, use distort feature map during scale search process
|
| 20 |
+
reweight: True # if True, use avg gradient to reweight the loss of each modality
|
| 21 |
+
loss_mode: mae
|
| 22 |
+
|
| 23 |
+
# output args
|
| 24 |
+
scale_path: scale_cache/mbq/llava_ov_7b_w3g128.pt
|
| 25 |
+
|
| 26 |
+
|
configs/qwen2_vl/Eval/eval.yaml
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# model args
|
| 2 |
+
model: qwen2_vl
|
| 3 |
+
model_args: pretrained="Qwen/Qwen2-VL-7B-Instruct"
|
| 4 |
+
|
| 5 |
+
# task args
|
| 6 |
+
tasks: mmmu_val
|
| 7 |
+
batch_size: 1
|
| 8 |
+
log_samples: True
|
| 9 |
+
log_samples_suffix: mmmu_val
|
| 10 |
+
|
| 11 |
+
# quantization config
|
| 12 |
+
method: mbq
|
| 13 |
+
run_process: False # You don't need to run scale search, load the scale directly
|
| 14 |
+
pseudo_quant: True # if True, conduct pseudo quantization on the model, otherwise the model will stay in fp16
|
| 15 |
+
scale_path: scale_cache/mbq/qwen2_vl_7b_w3g128.pt
|
| 16 |
+
w_bit: 3
|
| 17 |
+
w_group: 128
|
| 18 |
+
|
| 19 |
+
# output args
|
| 20 |
+
output_path: /your/output/log/path
|
| 21 |
+
|
configs/qwen2_vl/MBQ_search/72b_weight_activation.yaml
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# model args
|
| 2 |
+
model: qwen2_vl
|
| 3 |
+
model_args: pretrained="Qwen/Qwen2-VL-72B-Instruct",device_map=auto
|
| 4 |
+
|
| 5 |
+
# calibration data args
|
| 6 |
+
calib_data: coco
|
| 7 |
+
n_samples: 128
|
| 8 |
+
data_path: "your/coco/caption/path"
|
| 9 |
+
image_folder: "your/coco/image/folder"
|
| 10 |
+
few_shot_format: False # if True, concat two samples to simulate few shot
|
| 11 |
+
interleave_format: False # if True, insert pure text data between two image-text pairs to simulate interleave data
|
| 12 |
+
text_data_path: "" # You should specify this arg if you want to use interleave_format
|
| 13 |
+
|
| 14 |
+
# quantization config
|
| 15 |
+
method: mbq
|
| 16 |
+
run_process: True
|
| 17 |
+
w_bit: 4
|
| 18 |
+
a_bit: 8
|
| 19 |
+
distort: False # if True, use distort feature map during scale search process
|
| 20 |
+
reweight: True # if True, use avg gradient to reweight the loss of each modality
|
| 21 |
+
loss_mode: mae
|
| 22 |
+
|
| 23 |
+
# output args
|
| 24 |
+
scale_path: scale_cache/mbq/qwen2_vl_72b_w4a8.pt
|
| 25 |
+
|
| 26 |
+
|
configs/qwen2_vl/MBQ_search/72b_weight_only.yaml
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# model args
|
| 2 |
+
model: qwen2_vl
|
| 3 |
+
model_args: pretrained="Qwen/Qwen2-VL-72B-Instruct",device_map=auto
|
| 4 |
+
|
| 5 |
+
# calibration data args
|
| 6 |
+
calib_data: coco
|
| 7 |
+
n_samples: 128
|
| 8 |
+
data_path: "your/coco/caption/path"
|
| 9 |
+
image_folder: "your/coco/image/folder"
|
| 10 |
+
few_shot_format: False # if True, concat two samples to simulate few shot
|
| 11 |
+
interleave_format: False # if True, insert pure text data between two image-text pairs to simulate interleave data
|
| 12 |
+
text_data_path: "" # You should specify this arg if you want to use interleave_format
|
| 13 |
+
|
| 14 |
+
# quantization config
|
| 15 |
+
method: mbq
|
| 16 |
+
run_process: True
|
| 17 |
+
w_bit: 3
|
| 18 |
+
w_group: 128
|
| 19 |
+
distort: False # if True, use distort feature map during scale search process
|
| 20 |
+
reweight: True # if True, use avg gradient to reweight the loss of each modality
|
| 21 |
+
loss_mode: mae
|
| 22 |
+
|
| 23 |
+
# output args
|
| 24 |
+
scale_path: scale_cache/mbq/qwen2_vl_72b_w3g128.pt
|
| 25 |
+
|
| 26 |
+
|
configs/qwen2_vl/MBQ_search/7b_weight_activation.yaml
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# model args
|
| 2 |
+
model: qwen2_vl
|
| 3 |
+
model_args: pretrained="Qwen/Qwen2-VL-7B-Instruct"
|
| 4 |
+
|
| 5 |
+
# calibration data args
|
| 6 |
+
calib_data: coco
|
| 7 |
+
n_samples: 128
|
| 8 |
+
data_path: "your/coco/caption/path"
|
| 9 |
+
image_folder: "your/coco/image/folder"
|
| 10 |
+
few_shot_format: False # if True, concat two samples to simulate few shot
|
| 11 |
+
interleave_format: False # if True, insert pure text data between two image-text pairs to simulate interleave data
|
| 12 |
+
text_data_path: "" # You should specify this arg if you want to use interleave_format
|
| 13 |
+
|
| 14 |
+
# quantization config
|
| 15 |
+
method: mbq
|
| 16 |
+
run_process: True
|
| 17 |
+
w_bit: 4
|
| 18 |
+
a_bit: 8
|
| 19 |
+
distort: False # if True, use distort feature map during scale search process
|
| 20 |
+
reweight: True # if True, use avg gradient to reweight the loss of each modality
|
| 21 |
+
loss_mode: mae
|
| 22 |
+
|
| 23 |
+
# output args
|
| 24 |
+
scale_path: scale_cache/mbq/qwen2_vl_7b_w4a8.pt
|
| 25 |
+
|
| 26 |
+
|
configs/qwen2_vl/MBQ_search/7b_weight_only.yaml
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# model args
|
| 2 |
+
model: qwen2_vl
|
| 3 |
+
model_args: pretrained="Qwen/Qwen2-VL-7B-Instruct"
|
| 4 |
+
|
| 5 |
+
# calibration data args
|
| 6 |
+
calib_data: coco
|
| 7 |
+
n_samples: 128
|
| 8 |
+
data_path: "your/coco/caption/path"
|
| 9 |
+
image_folder: "your/coco/image/folder"
|
| 10 |
+
few_shot_format: False # if True, concat two samples to simulate few shot
|
| 11 |
+
interleave_format: False # if True, insert pure text data between two image-text pairs to simulate interleave data
|
| 12 |
+
text_data_path: "" # You should specify this arg if you want to use interleave_format
|
| 13 |
+
|
| 14 |
+
# quantization config
|
| 15 |
+
method: mbq
|
| 16 |
+
run_process: True
|
| 17 |
+
w_bit: 3
|
| 18 |
+
w_group: 128
|
| 19 |
+
distort: False # if True, use distort feature map during scale search process
|
| 20 |
+
reweight: True # if True, use avg gradient to reweight the loss of each modality
|
| 21 |
+
loss_mode: mae
|
| 22 |
+
|
| 23 |
+
# output args
|
| 24 |
+
scale_path: scale_cache/mbq/qwen2_vl_7b_w3g128.pt
|
| 25 |
+
|
| 26 |
+
|
figures/logo_Infinigence-ai.png
ADDED
|
Git LFS Details
|
figures/logo_nicsefc.jpg
ADDED
|
Git LFS Details
|
main.py
ADDED
|
@@ -0,0 +1,567 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import argparse
|
| 2 |
+
import datetime
|
| 3 |
+
import importlib
|
| 4 |
+
import json
|
| 5 |
+
import os
|
| 6 |
+
import sys
|
| 7 |
+
import traceback
|
| 8 |
+
import warnings
|
| 9 |
+
from functools import partial
|
| 10 |
+
|
| 11 |
+
import numpy as np
|
| 12 |
+
import yaml
|
| 13 |
+
|
| 14 |
+
warnings.simplefilter("ignore", category=DeprecationWarning)
|
| 15 |
+
|
| 16 |
+
import hashlib
|
| 17 |
+
from pathlib import Path
|
| 18 |
+
from typing import Union
|
| 19 |
+
|
| 20 |
+
from accelerate import Accelerator
|
| 21 |
+
from accelerate.utils import InitProcessGroupKwargs
|
| 22 |
+
from loguru import logger as eval_logger
|
| 23 |
+
|
| 24 |
+
from lmms_eval import evaluator, utils
|
| 25 |
+
from lmms_eval.models import get_model
|
| 26 |
+
from lmms_eval.evaluator import request_caching_arg_to_dict
|
| 27 |
+
from lmms_eval.loggers import EvaluationTracker, WandbLogger
|
| 28 |
+
from lmms_eval.tasks import TaskManager
|
| 29 |
+
from lmms_eval.utils import (
|
| 30 |
+
make_table,
|
| 31 |
+
simple_parse_args_string,
|
| 32 |
+
)
|
| 33 |
+
|
| 34 |
+
from qmllm.quantization.quant_wrapper import qwrapper
|
| 35 |
+
from qmllm.models import get_process_model
|
| 36 |
+
from qmllm.calibration.pileval import get_calib_dataset
|
| 37 |
+
from qmllm.calibration.coco_vl import get_multimodal_calib_dataset
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
def _int_or_none_list_arg_type(min_len: int, max_len: int, defaults: str, value: str, split_char: str = ","):
|
| 41 |
+
def parse_value(item):
|
| 42 |
+
item = item.strip().lower()
|
| 43 |
+
if item == "none":
|
| 44 |
+
return None
|
| 45 |
+
try:
|
| 46 |
+
return int(item)
|
| 47 |
+
except ValueError:
|
| 48 |
+
raise argparse.ArgumentTypeError(f"{item} is not an integer or None")
|
| 49 |
+
|
| 50 |
+
items = [parse_value(v) for v in value.split(split_char)]
|
| 51 |
+
num_items = len(items)
|
| 52 |
+
|
| 53 |
+
if num_items == 1:
|
| 54 |
+
# Makes downstream handling the same for single and multiple values
|
| 55 |
+
items = items * max_len
|
| 56 |
+
elif num_items < min_len or num_items > max_len:
|
| 57 |
+
raise argparse.ArgumentTypeError(f"Argument requires {max_len} integers or None, separated by '{split_char}'")
|
| 58 |
+
elif num_items != max_len:
|
| 59 |
+
print(f"Argument requires {max_len} integers or None, separated by '{split_char}'. " "Missing values will be filled with defaults.")
|
| 60 |
+
default_items = [parse_value(v) for v in defaults.split(split_char)]
|
| 61 |
+
items.extend(default_items[num_items:]) # extend items list with missing defaults
|
| 62 |
+
|
| 63 |
+
return items
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
def check_argument_types(parser: argparse.ArgumentParser):
|
| 67 |
+
"""
|
| 68 |
+
Check to make sure all CLI args are typed, raises error if not
|
| 69 |
+
"""
|
| 70 |
+
for action in parser._actions:
|
| 71 |
+
if action.dest != "help" and not action.const:
|
| 72 |
+
if action.type is None:
|
| 73 |
+
raise ValueError(f"Argument '{action.dest}' doesn't have a type specified.")
|
| 74 |
+
else:
|
| 75 |
+
continue
|
| 76 |
+
|
| 77 |
+
|
| 78 |
+
def _handle_non_serializable(o):
|
| 79 |
+
if isinstance(o, np.int64) or isinstance(o, np.int32):
|
| 80 |
+
return int(o)
|
| 81 |
+
elif isinstance(o, set):
|
| 82 |
+
return list(o)
|
| 83 |
+
else:
|
| 84 |
+
return str(o)
|
| 85 |
+
|
| 86 |
+
|
| 87 |
+
def parse_eval_args() -> argparse.Namespace:
|
| 88 |
+
parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter)
|
| 89 |
+
parser.add_argument("--config", default="", help="Path to a yaml file specifying all eval arguments, will ignore cli arguments if specified")
|
| 90 |
+
parser.add_argument("--model", default="hf", help="Name of model e.g. `hf`")
|
| 91 |
+
parser.add_argument(
|
| 92 |
+
"--tasks",
|
| 93 |
+
default=None,
|
| 94 |
+
help="To get full list of tasks, use the command lmms-eval --tasks list",
|
| 95 |
+
)
|
| 96 |
+
parser.add_argument(
|
| 97 |
+
"--model_args",
|
| 98 |
+
default="",
|
| 99 |
+
help="String arguments for model, e.g. `pretrained=EleutherAI/pythia-160m,dtype=float32`",
|
| 100 |
+
)
|
| 101 |
+
parser.add_argument(
|
| 102 |
+
"--num_fewshot",
|
| 103 |
+
type=int,
|
| 104 |
+
default=None,
|
| 105 |
+
help="Number of examples in few-shot context",
|
| 106 |
+
)
|
| 107 |
+
parser.add_argument(
|
| 108 |
+
"--batch_size",
|
| 109 |
+
"-b",
|
| 110 |
+
type=str,
|
| 111 |
+
default=1,
|
| 112 |
+
metavar="auto|auto:N|N",
|
| 113 |
+
help="Acceptable values are 'auto', 'auto:N' or N, where N is an integer. Default 1.",
|
| 114 |
+
)
|
| 115 |
+
parser.add_argument(
|
| 116 |
+
"--max_batch_size",
|
| 117 |
+
type=int,
|
| 118 |
+
default=None,
|
| 119 |
+
metavar="N",
|
| 120 |
+
help="Maximal batch size to try with --batch_size auto.",
|
| 121 |
+
)
|
| 122 |
+
parser.add_argument(
|
| 123 |
+
"--device",
|
| 124 |
+
type=str,
|
| 125 |
+
default=None,
|
| 126 |
+
help="Device to use (e.g. cuda, cuda:0, cpu)",
|
| 127 |
+
)
|
| 128 |
+
parser.add_argument(
|
| 129 |
+
"--output_path",
|
| 130 |
+
default=None,
|
| 131 |
+
type=str,
|
| 132 |
+
metavar="= [dir/file.jsonl] [DIR]",
|
| 133 |
+
help="The path to the output file where the result metrics will be saved. If the path is a directory and log_samples is true, the results will be saved in the directory. Else the parent directory will be used.",
|
| 134 |
+
)
|
| 135 |
+
parser.add_argument(
|
| 136 |
+
"--limit",
|
| 137 |
+
type=float,
|
| 138 |
+
default=None,
|
| 139 |
+
help="Limit the number of examples per task. " "If <1, limit is a percentage of the total number of examples.",
|
| 140 |
+
)
|
| 141 |
+
parser.add_argument(
|
| 142 |
+
"--use_cache",
|
| 143 |
+
"-c",
|
| 144 |
+
type=str,
|
| 145 |
+
default=None,
|
| 146 |
+
metavar="DIR",
|
| 147 |
+
help="A path to a sqlite db file for caching model responses. `None` if not caching.",
|
| 148 |
+
)
|
| 149 |
+
parser.add_argument(
|
| 150 |
+
"--cache_requests",
|
| 151 |
+
type=str,
|
| 152 |
+
default=None,
|
| 153 |
+
choices=["true", "refresh", "delete"],
|
| 154 |
+
help="Speed up evaluation by caching the building of dataset requests. `None` if not caching.",
|
| 155 |
+
)
|
| 156 |
+
parser.add_argument(
|
| 157 |
+
"--check_integrity",
|
| 158 |
+
action="store_true",
|
| 159 |
+
help="Whether to run the relevant part of the test suite for the tasks",
|
| 160 |
+
)
|
| 161 |
+
parser.add_argument(
|
| 162 |
+
"--write_out",
|
| 163 |
+
"-w",
|
| 164 |
+
action="store_true",
|
| 165 |
+
default=False,
|
| 166 |
+
help="Prints the prompt for the first few documents.",
|
| 167 |
+
)
|
| 168 |
+
parser.add_argument(
|
| 169 |
+
"--log_samples",
|
| 170 |
+
action="store_true",
|
| 171 |
+
default=False,
|
| 172 |
+
help="If True, write out all model outputs and documents for per-sample measurement and post-hoc analysis",
|
| 173 |
+
)
|
| 174 |
+
parser.add_argument(
|
| 175 |
+
"--wandb_log_samples",
|
| 176 |
+
action="store_true",
|
| 177 |
+
default=False,
|
| 178 |
+
help="If True, write out all model outputs and documents for per-sample measurement and post-hoc analysis to Weights and Biases",
|
| 179 |
+
)
|
| 180 |
+
parser.add_argument(
|
| 181 |
+
"--log_samples_suffix",
|
| 182 |
+
type=str,
|
| 183 |
+
default="model_outputs",
|
| 184 |
+
help="Specify a suffix for the log_samples file name.",
|
| 185 |
+
)
|
| 186 |
+
parser.add_argument(
|
| 187 |
+
"--system_instruction",
|
| 188 |
+
type=str,
|
| 189 |
+
default=None,
|
| 190 |
+
help="System instruction to be used in the prompt",
|
| 191 |
+
)
|
| 192 |
+
parser.add_argument(
|
| 193 |
+
"--apply_chat_template",
|
| 194 |
+
action="store_true",
|
| 195 |
+
default=False,
|
| 196 |
+
help="If True, applies the chat template to the prompt",
|
| 197 |
+
)
|
| 198 |
+
parser.add_argument(
|
| 199 |
+
"--fewshot_as_multiturn",
|
| 200 |
+
action="store_true",
|
| 201 |
+
default=False,
|
| 202 |
+
help="If True, uses the fewshot as a multi-turn conversation",
|
| 203 |
+
)
|
| 204 |
+
parser.add_argument(
|
| 205 |
+
"--show_config",
|
| 206 |
+
action="store_true",
|
| 207 |
+
default=False,
|
| 208 |
+
help="If True, shows the the full config of all tasks at the end of the evaluation.",
|
| 209 |
+
)
|
| 210 |
+
parser.add_argument(
|
| 211 |
+
"--include_path",
|
| 212 |
+
type=str,
|
| 213 |
+
default=None,
|
| 214 |
+
help="Additional path to include if there are external tasks to include.",
|
| 215 |
+
)
|
| 216 |
+
parser.add_argument(
|
| 217 |
+
"--gen_kwargs",
|
| 218 |
+
default="",
|
| 219 |
+
help=("String arguments for model generation on greedy_until tasks," " e.g. `temperature=0,top_k=0,top_p=0`"),
|
| 220 |
+
)
|
| 221 |
+
parser.add_argument(
|
| 222 |
+
"--verbosity",
|
| 223 |
+
type=str,
|
| 224 |
+
default="INFO",
|
| 225 |
+
help="Log error when tasks are not registered.",
|
| 226 |
+
)
|
| 227 |
+
parser.add_argument(
|
| 228 |
+
"--wandb_args",
|
| 229 |
+
default="",
|
| 230 |
+
help="Comma separated string arguments passed to wandb.init, e.g. `project=lmms-eval,job_type=eval",
|
| 231 |
+
)
|
| 232 |
+
parser.add_argument(
|
| 233 |
+
"--timezone",
|
| 234 |
+
default="Asia/Singapore",
|
| 235 |
+
help="Timezone for datetime string, e.g. Asia/Singapore, America/New_York, America/Los_Angeles. You can check the full list via `import pytz; print(pytz.common_timezones)`",
|
| 236 |
+
)
|
| 237 |
+
parser.add_argument(
|
| 238 |
+
"--hf_hub_log_args",
|
| 239 |
+
type=str,
|
| 240 |
+
default="",
|
| 241 |
+
help="Comma separated string arguments passed to Hugging Face Hub's log function, e.g. `hub_results_org=EleutherAI,hub_repo_name=lm-eval-results`",
|
| 242 |
+
)
|
| 243 |
+
parser.add_argument(
|
| 244 |
+
"--predict_only",
|
| 245 |
+
"-x",
|
| 246 |
+
action="store_true",
|
| 247 |
+
default=False,
|
| 248 |
+
help="Use with --log_samples. Only model outputs will be saved and metrics will not be evaluated.",
|
| 249 |
+
)
|
| 250 |
+
default_seed_string = "0,1234,1234,1234"
|
| 251 |
+
parser.add_argument(
|
| 252 |
+
"--seed",
|
| 253 |
+
type=partial(_int_or_none_list_arg_type, 3, 4, default_seed_string),
|
| 254 |
+
default=default_seed_string, # for backward compatibility
|
| 255 |
+
help=(
|
| 256 |
+
"Set seed for python's random, numpy, torch, and fewshot sampling.\n"
|
| 257 |
+
"Accepts a comma-separated list of 4 values for python's random, numpy, torch, and fewshot sampling seeds, "
|
| 258 |
+
"respectively, or a single integer to set the same seed for all four.\n"
|
| 259 |
+
f"The values are either an integer or 'None' to not set the seed. Default is `{default_seed_string}` "
|
| 260 |
+
"(for backward compatibility).\n"
|
| 261 |
+
"E.g. `--seed 0,None,8,52` sets `random.seed(0)`, `torch.manual_seed(8)`, and fewshot sampling seed to 52. "
|
| 262 |
+
"Here numpy's seed is not set since the second value is `None`.\n"
|
| 263 |
+
"E.g, `--seed 42` sets all four seeds to 42."
|
| 264 |
+
),
|
| 265 |
+
)
|
| 266 |
+
parser.add_argument(
|
| 267 |
+
"--trust_remote_code",
|
| 268 |
+
action="store_true",
|
| 269 |
+
help="Sets trust_remote_code to True to execute code to create HF Datasets from the Hub",
|
| 270 |
+
)
|
| 271 |
+
|
| 272 |
+
# calibration parameters
|
| 273 |
+
parser.add_argument("--calib_data", default="pileval", choices=["pileval", "coco", None])
|
| 274 |
+
parser.add_argument("--data_path", default="", type=str)
|
| 275 |
+
parser.add_argument("--image_folder", default="", type=str)
|
| 276 |
+
parser.add_argument("--n_samples", default=128, type=int)
|
| 277 |
+
parser.add_argument("--interleave_format", action="store_true")
|
| 278 |
+
|
| 279 |
+
# TODO: quantization parameters
|
| 280 |
+
parser.add_argument("--method", default="awq", choices=["awq", "smoothquant", "mbq", "rtn", None])
|
| 281 |
+
parser.add_argument("--w_bit", default=8, type=int)
|
| 282 |
+
parser.add_argument("--a_bit", default=16, type=int)
|
| 283 |
+
parser.add_argument("--w_group", default=128, type=int)
|
| 284 |
+
parser.add_argument("--alpha", default=0.5, type=int)
|
| 285 |
+
parser.add_argument("--reweight", action="store_true")
|
| 286 |
+
parser.add_argument("--distort", action="store_true")
|
| 287 |
+
parser.add_argument("--loss_mode", default="mae", choices=["mae", "mse"])
|
| 288 |
+
parser.add_argument("--scale_path", default=None, type=str)
|
| 289 |
+
parser.add_argument("--run_process", action="store_true")
|
| 290 |
+
parser.add_argument("--pseudo_quant", action="store_true")
|
| 291 |
+
args = parser.parse_args()
|
| 292 |
+
return args
|
| 293 |
+
|
| 294 |
+
|
| 295 |
+
def cli_evaluate(args: Union[argparse.Namespace, None] = None) -> None:
|
| 296 |
+
if not args:
|
| 297 |
+
args = parse_eval_args()
|
| 298 |
+
|
| 299 |
+
# Check if no arguments were passed after parsing
|
| 300 |
+
if len(sys.argv) == 1:
|
| 301 |
+
print("┌───────────────────────────────────────────────────────────────────────────────┐")
|
| 302 |
+
print("│ Please provide arguments to evaluate the model. e.g. │")
|
| 303 |
+
print("│ `lmms-eval --model llava --model_path liuhaotian/llava-v1.6-7b --tasks okvqa` │")
|
| 304 |
+
print("│ Use `lmms-eval --help` for more information. │")
|
| 305 |
+
print("└───────────────────────────────────────────────────────────────────────────────┘")
|
| 306 |
+
sys.exit(1)
|
| 307 |
+
|
| 308 |
+
if args.wandb_args:
|
| 309 |
+
if "name" not in args.wandb_args:
|
| 310 |
+
name = f"{args.model}_{args.model_args}_{utils.get_datetime_str(timezone=args.timezone)}"
|
| 311 |
+
name = utils.sanitize_long_string(name)
|
| 312 |
+
args.wandb_args += f",name={name}"
|
| 313 |
+
wandb_logger = WandbLogger(**simple_parse_args_string(args.wandb_args))
|
| 314 |
+
|
| 315 |
+
# reset logger
|
| 316 |
+
eval_logger.remove()
|
| 317 |
+
eval_logger.add(sys.stdout, colorize=True, level=args.verbosity)
|
| 318 |
+
eval_logger.info(f"Verbosity set to {args.verbosity}")
|
| 319 |
+
os.environ["VERBOSITY"] = args.verbosity
|
| 320 |
+
os.environ["TOKENIZERS_PARALLELISM"] = "false"
|
| 321 |
+
|
| 322 |
+
args_list = []
|
| 323 |
+
results_list = []
|
| 324 |
+
if args.config:
|
| 325 |
+
if not os.path.exists(args.config):
|
| 326 |
+
raise ValueError(f"Config file does not exist: {args.config}")
|
| 327 |
+
|
| 328 |
+
with open(args.config, "r") as file:
|
| 329 |
+
config_args = yaml.safe_load(file)
|
| 330 |
+
config_args = [config_args] if type(config_args) != list else config_args
|
| 331 |
+
# multiple configs, create args list first
|
| 332 |
+
for config in config_args:
|
| 333 |
+
args_copy = argparse.Namespace(**vars(args))
|
| 334 |
+
for key, value in config.items():
|
| 335 |
+
setattr(args_copy, key, value)
|
| 336 |
+
args_list.append(args_copy)
|
| 337 |
+
else:
|
| 338 |
+
args_list.append(args)
|
| 339 |
+
|
| 340 |
+
# initialize Accelerator
|
| 341 |
+
kwargs_handler = InitProcessGroupKwargs(timeout=datetime.timedelta(seconds=60000))
|
| 342 |
+
accelerator = Accelerator(kwargs_handlers=[kwargs_handler])
|
| 343 |
+
if accelerator.is_main_process:
|
| 344 |
+
is_main_process = True
|
| 345 |
+
else:
|
| 346 |
+
is_main_process = False
|
| 347 |
+
|
| 348 |
+
for args in args_list:
|
| 349 |
+
try:
|
| 350 |
+
# if is_main_process and args.wandb_args: # thoughtfully we should only init wandb once, instead of multiple ranks to avoid network traffics and unwanted behaviors.
|
| 351 |
+
# wandb_logger = WandbLogger()
|
| 352 |
+
|
| 353 |
+
results, samples = cli_evaluate_single(args)
|
| 354 |
+
results_list.append(results)
|
| 355 |
+
|
| 356 |
+
accelerator.wait_for_everyone()
|
| 357 |
+
if is_main_process and args.wandb_args:
|
| 358 |
+
try:
|
| 359 |
+
wandb_logger.post_init(results)
|
| 360 |
+
wandb_logger.log_eval_result()
|
| 361 |
+
if args.wandb_log_samples and samples is not None:
|
| 362 |
+
wandb_logger.log_eval_samples(samples)
|
| 363 |
+
except Exception as e:
|
| 364 |
+
eval_logger.info(f"Logging to Weights and Biases failed due to {e}")
|
| 365 |
+
# wandb_logger.finish()
|
| 366 |
+
|
| 367 |
+
except Exception as e:
|
| 368 |
+
if args.verbosity == "DEBUG":
|
| 369 |
+
raise e
|
| 370 |
+
else:
|
| 371 |
+
traceback.print_exc()
|
| 372 |
+
eval_logger.error(f"Error during evaluation: {e}. Please set `--verbosity=DEBUG` to get more information.")
|
| 373 |
+
results_list.append(None)
|
| 374 |
+
|
| 375 |
+
for args, results in zip(args_list, results_list):
|
| 376 |
+
# cli_evaluate will return none if the process is not the main process (rank 0)
|
| 377 |
+
if results is not None:
|
| 378 |
+
print(f"{args.model} ({args.model_args}), gen_kwargs: ({args.gen_kwargs}), limit: {args.limit}, num_fewshot: {args.num_fewshot}, " f"batch_size: {args.batch_size}")
|
| 379 |
+
print(make_table(results))
|
| 380 |
+
if "groups" in results:
|
| 381 |
+
print(make_table(results, "groups"))
|
| 382 |
+
|
| 383 |
+
if args.wandb_args:
|
| 384 |
+
wandb_logger.run.finish()
|
| 385 |
+
|
| 386 |
+
|
| 387 |
+
def cli_evaluate_single(args: Union[argparse.Namespace, None] = None) -> None:
|
| 388 |
+
selected_task_list = args.tasks.split(",") if args.tasks else None
|
| 389 |
+
|
| 390 |
+
if args.include_path is not None:
|
| 391 |
+
eval_logger.info(f"Including path: {args.include_path}")
|
| 392 |
+
task_manager = TaskManager(args.verbosity, include_path=args.include_path, model_name=args.model)
|
| 393 |
+
|
| 394 |
+
# update the evaluation tracker args with the output path and the HF token
|
| 395 |
+
if args.output_path:
|
| 396 |
+
args.hf_hub_log_args += f",output_path={args.output_path}"
|
| 397 |
+
if os.environ.get("HF_TOKEN", None):
|
| 398 |
+
args.hf_hub_log_args += f",token={os.environ.get('HF_TOKEN')}"
|
| 399 |
+
|
| 400 |
+
evaluation_tracker_args = simple_parse_args_string(args.hf_hub_log_args)
|
| 401 |
+
eval_logger.info(f"Evaluation tracker args: {evaluation_tracker_args}")
|
| 402 |
+
|
| 403 |
+
evaluation_tracker = EvaluationTracker(**evaluation_tracker_args)
|
| 404 |
+
|
| 405 |
+
if args.predict_only:
|
| 406 |
+
args.log_samples = True
|
| 407 |
+
if (args.log_samples or args.predict_only) and not args.output_path:
|
| 408 |
+
raise ValueError("Specify --output_path if providing --log_samples or --predict_only")
|
| 409 |
+
|
| 410 |
+
if args.fewshot_as_multiturn and args.apply_chat_template is False:
|
| 411 |
+
raise ValueError("If fewshot_as_multiturn is set, apply_chat_template must be set to True.")
|
| 412 |
+
|
| 413 |
+
if (args.num_fewshot is None or args.num_fewshot == 0) and args.fewshot_as_multiturn:
|
| 414 |
+
raise ValueError("If fewshot_as_multiturn is set, num_fewshot must be greater than 0.")
|
| 415 |
+
|
| 416 |
+
if args.include_path is not None:
|
| 417 |
+
eval_logger.info(f"Including path: {args.include_path}")
|
| 418 |
+
|
| 419 |
+
if "push_samples_to_hub" in evaluation_tracker_args and not args.log_samples:
|
| 420 |
+
eval_logger.warning("Pushing samples to the Hub requires --log_samples to be set. Samples will not be pushed to the Hub.")
|
| 421 |
+
|
| 422 |
+
if args.limit:
|
| 423 |
+
eval_logger.warning(" --limit SHOULD ONLY BE USED FOR TESTING." "REAL METRICS SHOULD NOT BE COMPUTED USING LIMIT.")
|
| 424 |
+
|
| 425 |
+
if os.environ.get("LMMS_EVAL_PLUGINS", None):
|
| 426 |
+
args.include_path = [args.include_path] if args.include_path else []
|
| 427 |
+
for plugin in os.environ["LMMS_EVAL_PLUGINS"].split(","):
|
| 428 |
+
package_tasks_location = importlib.util.find_spec(f"{plugin}.tasks").submodule_search_locations[0]
|
| 429 |
+
args.include_path.append(package_tasks_location)
|
| 430 |
+
|
| 431 |
+
if args.tasks is None:
|
| 432 |
+
eval_logger.error("Need to specify task to evaluate.")
|
| 433 |
+
sys.exit()
|
| 434 |
+
elif args.tasks == "list":
|
| 435 |
+
eval_logger.info("Available Tasks:\n - {}".format(f"\n - ".join(sorted(task_manager.list_all_tasks()))))
|
| 436 |
+
sys.exit()
|
| 437 |
+
elif args.tasks == "list_groups":
|
| 438 |
+
eval_logger.info(task_manager.list_all_tasks(list_subtasks=False, list_tags=False))
|
| 439 |
+
sys.exit()
|
| 440 |
+
elif args.tasks == "list_tags":
|
| 441 |
+
eval_logger.info(task_manager.list_all_tasks(list_groups=False, list_subtasks=False))
|
| 442 |
+
sys.exit()
|
| 443 |
+
elif args.tasks == "list_subtasks":
|
| 444 |
+
eval_logger.info(task_manager.list_all_tasks(list_groups=False, list_tags=False))
|
| 445 |
+
sys.exit()
|
| 446 |
+
else:
|
| 447 |
+
if os.path.isdir(args.tasks):
|
| 448 |
+
import glob
|
| 449 |
+
|
| 450 |
+
task_names = []
|
| 451 |
+
yaml_path = os.path.join(args.tasks, "*.yaml")
|
| 452 |
+
for yaml_file in glob.glob(yaml_path):
|
| 453 |
+
config = utils.load_yaml_config(yaml_file)
|
| 454 |
+
task_names.append(config)
|
| 455 |
+
else:
|
| 456 |
+
task_list = args.tasks.split(",")
|
| 457 |
+
task_names = task_manager.match_tasks(task_list)
|
| 458 |
+
for task in [task for task in task_list if task not in task_names]:
|
| 459 |
+
if os.path.isfile(task):
|
| 460 |
+
config = utils.load_yaml_config(task)
|
| 461 |
+
task_names.append(config)
|
| 462 |
+
task_missing = [task for task in task_list if task not in task_names and "*" not in task] # we don't want errors if a wildcard ("*") task name was used
|
| 463 |
+
|
| 464 |
+
if task_missing:
|
| 465 |
+
missing = ", ".join(task_missing)
|
| 466 |
+
eval_logger.error(
|
| 467 |
+
f"Tasks were not found: {missing}\n" f"{utils.SPACING}Try `lm-eval --tasks list` for list of available tasks",
|
| 468 |
+
)
|
| 469 |
+
raise ValueError(
|
| 470 |
+
f"Tasks not found: {missing}. Try `lm-eval --tasks {{list_groups,list_subtasks,list_tags,list}}` to list out all available names for task groupings; only (sub)tasks; tags; or all of the above, or pass '--verbosity DEBUG' to troubleshoot task registration issues."
|
| 471 |
+
)
|
| 472 |
+
|
| 473 |
+
eval_logger.info(f"Selected Tasks: {task_names}")
|
| 474 |
+
request_caching_args = request_caching_arg_to_dict(cache_requests=args.cache_requests)
|
| 475 |
+
datetime_str = utils.get_datetime_str(timezone=args.timezone)
|
| 476 |
+
|
| 477 |
+
# here we load MLLMs outside of the evaluator.
|
| 478 |
+
if args.model_args is None:
|
| 479 |
+
args.model_args = ""
|
| 480 |
+
|
| 481 |
+
ModelClass = get_model(args.model)
|
| 482 |
+
lm = ModelClass.create_from_arg_string(
|
| 483 |
+
args.model_args,
|
| 484 |
+
{
|
| 485 |
+
"batch_size": args.batch_size,
|
| 486 |
+
"device": args.device,
|
| 487 |
+
},
|
| 488 |
+
)
|
| 489 |
+
#
|
| 490 |
+
if args.pseudo_quant:
|
| 491 |
+
print("Pseudo quant...")
|
| 492 |
+
Process_ModelClass = get_process_model(args.model)
|
| 493 |
+
process_model = Process_ModelClass(lm._model,
|
| 494 |
+
lm._tokenizer,
|
| 495 |
+
lm.processor if hasattr(lm, 'processor') else None)
|
| 496 |
+
|
| 497 |
+
#
|
| 498 |
+
prompt_inputs = None
|
| 499 |
+
prompt_kwargs = None
|
| 500 |
+
|
| 501 |
+
#
|
| 502 |
+
qwrapper(process_model, prompt_inputs, prompt_kwargs, args)
|
| 503 |
+
|
| 504 |
+
results = evaluator.simple_evaluate(
|
| 505 |
+
model=args.model,
|
| 506 |
+
lm=lm,
|
| 507 |
+
model_args=args.model_args,
|
| 508 |
+
tasks=task_names,
|
| 509 |
+
num_fewshot=args.num_fewshot,
|
| 510 |
+
batch_size=args.batch_size,
|
| 511 |
+
max_batch_size=args.max_batch_size,
|
| 512 |
+
device=args.device,
|
| 513 |
+
use_cache=args.use_cache,
|
| 514 |
+
limit=args.limit,
|
| 515 |
+
check_integrity=args.check_integrity,
|
| 516 |
+
write_out=args.write_out,
|
| 517 |
+
log_samples=args.log_samples,
|
| 518 |
+
evaluation_tracker=evaluation_tracker,
|
| 519 |
+
system_instruction=args.system_instruction,
|
| 520 |
+
apply_chat_template=args.apply_chat_template,
|
| 521 |
+
fewshot_as_multiturn=args.fewshot_as_multiturn,
|
| 522 |
+
gen_kwargs=args.gen_kwargs,
|
| 523 |
+
task_manager=task_manager,
|
| 524 |
+
verbosity=args.verbosity,
|
| 525 |
+
predict_only=args.predict_only,
|
| 526 |
+
random_seed=args.seed[0],
|
| 527 |
+
numpy_random_seed=args.seed[1],
|
| 528 |
+
torch_random_seed=args.seed[2],
|
| 529 |
+
fewshot_random_seed=args.seed[3],
|
| 530 |
+
cli_args=args,
|
| 531 |
+
datetime_str=datetime_str,
|
| 532 |
+
**request_caching_args,
|
| 533 |
+
)
|
| 534 |
+
|
| 535 |
+
if results is not None:
|
| 536 |
+
if args.log_samples:
|
| 537 |
+
samples = results.pop("samples")
|
| 538 |
+
else:
|
| 539 |
+
samples = None
|
| 540 |
+
dumped = json.dumps(results, indent=4, default=_handle_non_serializable)
|
| 541 |
+
if args.show_config:
|
| 542 |
+
print(dumped)
|
| 543 |
+
|
| 544 |
+
batch_sizes = ",".join(map(str, results["config"]["batch_sizes"]))
|
| 545 |
+
|
| 546 |
+
evaluation_tracker.save_results_aggregated(results=results, samples=samples if args.log_samples else None, datetime_str=datetime_str)
|
| 547 |
+
|
| 548 |
+
if args.log_samples:
|
| 549 |
+
for task_name, config in results["configs"].items():
|
| 550 |
+
evaluation_tracker.save_results_samples(task_name=task_name, samples=samples[task_name])
|
| 551 |
+
|
| 552 |
+
if evaluation_tracker.push_results_to_hub or evaluation_tracker.push_samples_to_hub:
|
| 553 |
+
evaluation_tracker.recreate_metadata_card()
|
| 554 |
+
|
| 555 |
+
return results, samples
|
| 556 |
+
return None, None
|
| 557 |
+
|
| 558 |
+
|
| 559 |
+
def print_results(args, results):
|
| 560 |
+
print(f"{args.model} ({args.model_args}),\ngen_kwargs: ({args.gen_kwargs}),\nlimit: {args.limit},\nnum_fewshot: {args.num_fewshot},\nbatch_size: {args.batch_size}")
|
| 561 |
+
print(evaluator.make_table(results))
|
| 562 |
+
if "groups" in results:
|
| 563 |
+
print(evaluator.make_table(results, "groups"))
|
| 564 |
+
|
| 565 |
+
|
| 566 |
+
if __name__ == "__main__":
|
| 567 |
+
cli_evaluate()
|
main_quant.py
ADDED
|
@@ -0,0 +1,139 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import argparse
|
| 2 |
+
import datetime
|
| 3 |
+
import importlib
|
| 4 |
+
import json
|
| 5 |
+
import os
|
| 6 |
+
import sys
|
| 7 |
+
import traceback
|
| 8 |
+
import warnings
|
| 9 |
+
from functools import partial
|
| 10 |
+
|
| 11 |
+
import numpy as np
|
| 12 |
+
import yaml
|
| 13 |
+
|
| 14 |
+
warnings.simplefilter("ignore", category=DeprecationWarning)
|
| 15 |
+
|
| 16 |
+
from typing import Union
|
| 17 |
+
|
| 18 |
+
from lmms_eval.models import get_model
|
| 19 |
+
|
| 20 |
+
from qmllm.quantization.quant_wrapper import qwrapper
|
| 21 |
+
from qmllm.models import get_process_model
|
| 22 |
+
from qmllm.calibration.pileval import get_calib_dataset
|
| 23 |
+
from qmllm.calibration.coco_vl import get_multimodal_calib_dataset
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
def parse_quant_args() -> argparse.Namespace:
|
| 27 |
+
parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter)
|
| 28 |
+
parser.add_argument("--config", default="", help="Path to a yaml file specifying all eval arguments, will ignore cli arguments if specified")
|
| 29 |
+
parser.add_argument("--model", default="hf", help="Name of model e.g. `hf`")
|
| 30 |
+
parser.add_argument(
|
| 31 |
+
"--model_args",
|
| 32 |
+
default="",
|
| 33 |
+
help="String arguments for model, e.g. `pretrained=EleutherAI/pythia-160m,dtype=float32`",
|
| 34 |
+
)
|
| 35 |
+
parser.add_argument(
|
| 36 |
+
"--batch_size",
|
| 37 |
+
"-b",
|
| 38 |
+
type=str,
|
| 39 |
+
default=1,
|
| 40 |
+
metavar="auto|auto:N|N",
|
| 41 |
+
help="Acceptable values are 'auto', 'auto:N' or N, where N is an integer. Default 1.",
|
| 42 |
+
)
|
| 43 |
+
parser.add_argument(
|
| 44 |
+
"--device",
|
| 45 |
+
type=str,
|
| 46 |
+
default=None,
|
| 47 |
+
help="Device to use (e.g. cuda, cuda:0, cpu)",
|
| 48 |
+
)
|
| 49 |
+
# calibration parameters
|
| 50 |
+
parser.add_argument("--calib_data", default="pileval", choices=["pileval", "coco", None])
|
| 51 |
+
parser.add_argument("--n_samples", default=128, type=int)
|
| 52 |
+
parser.add_argument("--data_path", default="", type=str)
|
| 53 |
+
parser.add_argument("--image_folder", default="", type=str)
|
| 54 |
+
parser.add_argument("--interleave_format", action="store_true")
|
| 55 |
+
parser.add_argument("--few_shot_format", action="store_true")
|
| 56 |
+
parser.add_argument("--text_data_path", default="", type=str)
|
| 57 |
+
|
| 58 |
+
# TODO: quantization parameters
|
| 59 |
+
parser.add_argument("--method", default="awq", choices=["awq", "smoothquant", "mbq", "rtn", None])
|
| 60 |
+
parser.add_argument("--w_bit", default=8, type=int)
|
| 61 |
+
parser.add_argument("--a_bit", default=16, type=int)
|
| 62 |
+
parser.add_argument("--w_group", default=128, type=int)
|
| 63 |
+
parser.add_argument("--alpha", default=0.5, type=int)
|
| 64 |
+
parser.add_argument("--reweight", action="store_true")
|
| 65 |
+
parser.add_argument("--distort", action="store_true")
|
| 66 |
+
parser.add_argument("--loss_mode", default="mae", choices=["mae", "mse"])
|
| 67 |
+
parser.add_argument("--scale_path", default=None, type=str)
|
| 68 |
+
parser.add_argument("--run_process", action="store_true")
|
| 69 |
+
parser.add_argument("--pseudo_quant", action="store_true")
|
| 70 |
+
args = parser.parse_args()
|
| 71 |
+
return args
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
def cli_quant(args: Union[argparse.Namespace, None] = None) -> None:
|
| 75 |
+
if not args:
|
| 76 |
+
args = parse_quant_args()
|
| 77 |
+
|
| 78 |
+
args_list = []
|
| 79 |
+
if args.config:
|
| 80 |
+
if not os.path.exists(args.config):
|
| 81 |
+
raise ValueError(f"Config file does not exist: {args.config}")
|
| 82 |
+
|
| 83 |
+
with open(args.config, "r") as file:
|
| 84 |
+
config_args = yaml.safe_load(file)
|
| 85 |
+
config_args = [config_args] if type(config_args) != list else config_args
|
| 86 |
+
# multiple configs, create args list first
|
| 87 |
+
for config in config_args:
|
| 88 |
+
args_copy = argparse.Namespace(**vars(args))
|
| 89 |
+
for key, value in config.items():
|
| 90 |
+
setattr(args_copy, key, value)
|
| 91 |
+
args_list.append(args_copy)
|
| 92 |
+
else:
|
| 93 |
+
args_list.append(args)
|
| 94 |
+
|
| 95 |
+
for args in args_list:
|
| 96 |
+
cli_quant_single(args)
|
| 97 |
+
|
| 98 |
+
|
| 99 |
+
def cli_quant_single(args: Union[argparse.Namespace, None] = None) -> None:
|
| 100 |
+
# here we load MLLMs outside of the evaluator.
|
| 101 |
+
if args.model_args is None:
|
| 102 |
+
args.model_args = ""
|
| 103 |
+
|
| 104 |
+
ModelClass = get_model(args.model)
|
| 105 |
+
lm = ModelClass.create_from_arg_string(
|
| 106 |
+
args.model_args,
|
| 107 |
+
{
|
| 108 |
+
"batch_size": args.batch_size,
|
| 109 |
+
"device": args.device,
|
| 110 |
+
},
|
| 111 |
+
)
|
| 112 |
+
|
| 113 |
+
# Preprocess the MLLM here, use "lm._model" to get the fp16 mllm.
|
| 114 |
+
Process_ModelClass = get_process_model(args.model)
|
| 115 |
+
process_model = Process_ModelClass(lm._model,
|
| 116 |
+
lm._tokenizer,
|
| 117 |
+
lm.processor if hasattr(lm, 'processor') else None)
|
| 118 |
+
|
| 119 |
+
# Generate the calibration tokens.
|
| 120 |
+
prompt_inputs = None
|
| 121 |
+
prompt_kwargs = None
|
| 122 |
+
|
| 123 |
+
if args.calib_data == "pileval":
|
| 124 |
+
prompt_inputs, prompt_kwargs = get_calib_dataset(data_path=args.data_path, tokenizer=lm._tokenizer, n_samples=args.n_samples)
|
| 125 |
+
elif args.calib_data == "coco":
|
| 126 |
+
prompt_inputs, prompt_kwargs = get_multimodal_calib_dataset(data_path=args.data_path,
|
| 127 |
+
image_folder=args.image_folder,
|
| 128 |
+
model=process_model,
|
| 129 |
+
n_samples=args.n_samples,
|
| 130 |
+
few_shot_format=args.few_shot_format,
|
| 131 |
+
interleave_format=args.interleave_format,
|
| 132 |
+
text_data_path=args.text_data_path)
|
| 133 |
+
|
| 134 |
+
# Wrapper the quantized model.
|
| 135 |
+
qwrapper(process_model, prompt_inputs, prompt_kwargs, args)
|
| 136 |
+
|
| 137 |
+
|
| 138 |
+
if __name__ == "__main__":
|
| 139 |
+
cli_quant()
|
qmllm/calibration/__init__.py
ADDED
|
File without changes
|
qmllm/calibration/coco_vl.py
ADDED
|
@@ -0,0 +1,100 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import json
|
| 3 |
+
import torch
|
| 4 |
+
import numpy as np
|
| 5 |
+
|
| 6 |
+
from PIL import Image
|
| 7 |
+
from datasets import load_dataset
|
| 8 |
+
|
| 9 |
+
def load_image(image_path):
|
| 10 |
+
# Load the image using use PIL, we don't support tcs_loader
|
| 11 |
+
return Image.open(image_path).convert('RGB')
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
def get_multimodal_calib_dataset(
|
| 15 |
+
data_path,
|
| 16 |
+
image_folder,
|
| 17 |
+
model,
|
| 18 |
+
n_samples=128,
|
| 19 |
+
few_shot_format=False,
|
| 20 |
+
interleave_format=False,
|
| 21 |
+
text_data_path=None,
|
| 22 |
+
shuffle=True,
|
| 23 |
+
):
|
| 24 |
+
if data_path.endswith(".jsonl"):
|
| 25 |
+
dataset = []
|
| 26 |
+
with open(data_path, "r") as json_file:
|
| 27 |
+
for line in json_file:
|
| 28 |
+
dataset.append(json.loads(line.strip()))
|
| 29 |
+
elif data_path.endswith(".json"):
|
| 30 |
+
with open(data_path, "r") as json_file:
|
| 31 |
+
dataset = json.load(json_file)
|
| 32 |
+
else:
|
| 33 |
+
raise ValueError(f"Unsupported file type: {data_path}")
|
| 34 |
+
|
| 35 |
+
if shuffle:
|
| 36 |
+
rng = np.random.default_rng(seed=42)
|
| 37 |
+
rng.shuffle(dataset)
|
| 38 |
+
|
| 39 |
+
data_list = []
|
| 40 |
+
for i in range(n_samples):
|
| 41 |
+
i = i % len(dataset)
|
| 42 |
+
data_item = dataset[i]
|
| 43 |
+
if 'image' in data_item and len(data_item['image']) != 0:
|
| 44 |
+
if type(data_item['image']) == list:
|
| 45 |
+
images = []
|
| 46 |
+
for image_path in data_item['image']:
|
| 47 |
+
# Merge the image path
|
| 48 |
+
full_image_path = os.path.join(image_folder, image_path)
|
| 49 |
+
image = load_image(full_image_path)
|
| 50 |
+
images.append(image)
|
| 51 |
+
else:
|
| 52 |
+
images = []
|
| 53 |
+
image_path = data_item['image']
|
| 54 |
+
full_image_path = os.path.join(image_folder, image_path)
|
| 55 |
+
image = load_image(full_image_path)
|
| 56 |
+
images.append(image)
|
| 57 |
+
else:
|
| 58 |
+
images = None
|
| 59 |
+
|
| 60 |
+
data_dict = model.preprocess_data(images, data_item)
|
| 61 |
+
data_list.append(data_dict)
|
| 62 |
+
|
| 63 |
+
examples = model.data_collator(data_list)
|
| 64 |
+
|
| 65 |
+
if few_shot_format and interleave_format:
|
| 66 |
+
raise ValueError('You cannot specify both few_shot_format and interleave_format at the same time!')
|
| 67 |
+
|
| 68 |
+
if few_shot_format:
|
| 69 |
+
examples = model.few_shot_data_samples(examples)
|
| 70 |
+
if interleave_format:
|
| 71 |
+
if not text_data_path:
|
| 72 |
+
dataset = load_dataset("mit-han-lab/pile-val-backup", split="validation")
|
| 73 |
+
else:
|
| 74 |
+
# TODO: for other dataset, maybe we should not specify the split?
|
| 75 |
+
dataset = load_dataset(data_path, split="validation")
|
| 76 |
+
if shuffle:
|
| 77 |
+
dataset = dataset.shuffle(seed=42)
|
| 78 |
+
samples = []
|
| 79 |
+
n_run = 0
|
| 80 |
+
for data in dataset:
|
| 81 |
+
line = data["text"].strip()
|
| 82 |
+
line_encoded = model.tokenizer.encode(line)
|
| 83 |
+
|
| 84 |
+
if len(line_encoded) > 512:
|
| 85 |
+
sample = torch.tensor(line_encoded[:512])
|
| 86 |
+
samples.append(sample)
|
| 87 |
+
n_run += 1
|
| 88 |
+
|
| 89 |
+
if n_run == 128:
|
| 90 |
+
break
|
| 91 |
+
pure_text = samples
|
| 92 |
+
|
| 93 |
+
examples = model.interleave_data_samples(examples, pure_text=pure_text)
|
| 94 |
+
|
| 95 |
+
prompt_inputs, prompt_kwargs = model.generate_input(examples)
|
| 96 |
+
|
| 97 |
+
return prompt_inputs, prompt_kwargs
|
| 98 |
+
|
| 99 |
+
|
| 100 |
+
|
qmllm/calibration/pileval.py
ADDED
|
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
from datasets import load_dataset
|
| 3 |
+
|
| 4 |
+
def get_calib_dataset(data_path="", tokenizer=None, n_samples=512, block_size=512):
|
| 5 |
+
if not data_path:
|
| 6 |
+
dataset = load_dataset("mit-han-lab/pile-val-backup", split="validation")
|
| 7 |
+
else:
|
| 8 |
+
dataset = load_dataset(data_path, split="validation")
|
| 9 |
+
dataset = dataset.shuffle(seed=42)
|
| 10 |
+
samples = []
|
| 11 |
+
n_run = 0
|
| 12 |
+
for data in dataset:
|
| 13 |
+
line = data["text"]
|
| 14 |
+
line = line.strip()
|
| 15 |
+
line_encoded = tokenizer.encode(line)
|
| 16 |
+
if len(line_encoded) > 512:
|
| 17 |
+
continue
|
| 18 |
+
sample = torch.tensor([line_encoded])
|
| 19 |
+
if sample.numel() == 0:
|
| 20 |
+
continue
|
| 21 |
+
samples.append(sample)
|
| 22 |
+
n_run += 1
|
| 23 |
+
if n_run == n_samples:
|
| 24 |
+
break
|
| 25 |
+
# now concatenate all samples and split according to block size
|
| 26 |
+
cat_samples = torch.cat(samples, dim=1)
|
| 27 |
+
n_split = cat_samples.shape[1] // block_size
|
| 28 |
+
print(f" * Split into {n_split} blocks")
|
| 29 |
+
|
| 30 |
+
samples = [cat_samples[:, i * block_size : (i + 1) * block_size] for i in range(n_split)]
|
| 31 |
+
samples = torch.cat(samples, dim=0)
|
| 32 |
+
|
| 33 |
+
prompt_inputs = {
|
| 34 |
+
"input_ids": samples
|
| 35 |
+
}
|
| 36 |
+
|
| 37 |
+
prompt_kawrgs = {
|
| 38 |
+
"labels": samples,
|
| 39 |
+
"attention_mask": None,
|
| 40 |
+
"vision_mask": None,
|
| 41 |
+
"caption_mask": None,
|
| 42 |
+
}
|
| 43 |
+
|
| 44 |
+
return prompt_inputs, prompt_kawrgs
|
qmllm/datasets/__init__.py
ADDED
|
File without changes
|
qmllm/datasets/multimodal_dataset.py
ADDED
|
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import json
|
| 3 |
+
import copy
|
| 4 |
+
import torch
|
| 5 |
+
import transformers
|
| 6 |
+
import numpy as np
|
| 7 |
+
from PIL import Image
|
| 8 |
+
from dataclasses import dataclass, field
|
| 9 |
+
from typing import Dict, Optional, Sequence, List
|
| 10 |
+
from torch.utils.data import Dataset, DataLoader
|
| 11 |
+
|
| 12 |
+
# Import LLaVA modules
|
| 13 |
+
try:
|
| 14 |
+
from llava.train.train import LazySupervisedDataset as LLaVA_LazySupervisedDataset
|
| 15 |
+
from llava.train.train import DataCollatorForSupervisedDataset as LLaVA_DataCollatorForSupervisedDataset
|
| 16 |
+
except ImportError as e:
|
| 17 |
+
print(f"LLaVA is not installed. Please install LLaVA to use this model.\nError: {e}")
|
| 18 |
+
|
| 19 |
+
# Import InternVL modules
|
| 20 |
+
try:
|
| 21 |
+
from internvl.patch import concat_pad_data_collator
|
| 22 |
+
from internvl.train.internvl_chat_finetune import build_datasets
|
| 23 |
+
except ImportError as e:
|
| 24 |
+
print(f"internvl-chat is not installed. Please install internvl-chat to use this model.\nError: {e}")
|
| 25 |
+
|
| 26 |
+
def build_llava_dataloader(n_samples, tokenizer, shuffle, data_args):
|
| 27 |
+
train_dataset = LLaVA_LazySupervisedDataset(tokenizer=tokenizer, data_path=data_args.data_path, data_args=data_args)
|
| 28 |
+
data_collator = LLaVA_DataCollatorForSupervisedDataset(tokenizer=tokenizer)
|
| 29 |
+
dataloader = DataLoader(dataset=train_dataset, batch_size=n_samples, shuffle=shuffle, collate_fn=data_collator)
|
| 30 |
+
|
| 31 |
+
return dataloader
|
| 32 |
+
|
| 33 |
+
def build_internvl_dataloader(n_samples, model, tokenizer, shuffle, data_args):
|
| 34 |
+
train_dataset = build_datasets(
|
| 35 |
+
data_args=data_args,
|
| 36 |
+
tokenizer=tokenizer,
|
| 37 |
+
tcs_loader=None,
|
| 38 |
+
model=model,
|
| 39 |
+
group_by_length=True,
|
| 40 |
+
dynamic_image_size=data_args.dynamic_image_size,
|
| 41 |
+
use_thumbnail=data_args.use_thumbnail,
|
| 42 |
+
min_dynamic_patch=data_args.min_dynamic_patch,
|
| 43 |
+
max_dynamic_patch=data_args.max_dynamic_patch,
|
| 44 |
+
normalize_type=data_args.normalize_type
|
| 45 |
+
)
|
| 46 |
+
data_collator = concat_pad_data_collator
|
| 47 |
+
dataloader = DataLoader(dataset=train_dataset, batch_size=n_samples, shuffle=shuffle, collate_fn=data_collator)
|
| 48 |
+
|
| 49 |
+
return dataloader
|
| 50 |
+
|
qmllm/methods/__init__.py
ADDED
|
File without changes
|
qmllm/methods/awq/__init__.py
ADDED
|
File without changes
|
qmllm/methods/awq/entry.py
ADDED
|
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
|
| 3 |
+
from qmllm.methods.awq.quantize.pre_quant import run_awq, apply_awq
|
| 4 |
+
from qmllm.methods.awq.quantize.quantizer import pseudo_quantize_model_weight
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
def awq_entry(model, prompt_tokens, prompt_mask, run_awq_process: bool, scale_path: str=None, zero_point: str=True, q_group_size: int=128, w_bit: int=4):
|
| 8 |
+
'''
|
| 9 |
+
model: here the model is the LLM, you have to extract the LLM first!
|
| 10 |
+
prompt_tokens: the prompt tokens
|
| 11 |
+
prompt_mask: the prompt mask, mask the answer language tokens
|
| 12 |
+
run_awq_process: whether to run the AWQ process
|
| 13 |
+
'''
|
| 14 |
+
q_config = {
|
| 15 |
+
"zero_point": zero_point, # by default True
|
| 16 |
+
"q_group_size": q_group_size, # whether to use group quantization
|
| 17 |
+
}
|
| 18 |
+
|
| 19 |
+
assert scale_path is not None
|
| 20 |
+
|
| 21 |
+
# reparameterization
|
| 22 |
+
if run_awq_process:
|
| 23 |
+
awq_results = run_awq(
|
| 24 |
+
model,
|
| 25 |
+
prompt_tokens,
|
| 26 |
+
prompt_mask,
|
| 27 |
+
w_bit=w_bit,
|
| 28 |
+
q_config=q_config,
|
| 29 |
+
auto_scale=True,
|
| 30 |
+
)
|
| 31 |
+
torch.save(awq_results, scale_path)
|
| 32 |
+
print("AWQ results saved at", scale_path)
|
| 33 |
+
else:
|
| 34 |
+
awq_results = torch.load(scale_path, map_location="cpu")
|
| 35 |
+
apply_awq(model, awq_results)
|
| 36 |
+
|
| 37 |
+
# weight quantization
|
| 38 |
+
pseudo_quantize_model_weight(model, w_bit=w_bit, q_config=q_config)
|
| 39 |
+
|
| 40 |
+
return model
|
qmllm/methods/awq/quantize/__init__.py
ADDED
|
File without changes
|
qmllm/methods/awq/quantize/auto_scale.py
ADDED
|
@@ -0,0 +1,572 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gc
|
| 2 |
+
import torch
|
| 3 |
+
import torch.nn as nn
|
| 4 |
+
|
| 5 |
+
from transformers.models.bloom.modeling_bloom import BloomBlock, BloomGelu
|
| 6 |
+
from transformers.models.opt.modeling_opt import OPTDecoderLayer
|
| 7 |
+
from transformers.models.llama.modeling_llama import LlamaDecoderLayer, LlamaRMSNorm
|
| 8 |
+
from transformers.activations import GELUActivation
|
| 9 |
+
|
| 10 |
+
from transformers.models.qwen2.modeling_qwen2 import Qwen2RMSNorm
|
| 11 |
+
|
| 12 |
+
from .qmodule import ScaledActivation
|
| 13 |
+
from qmllm.utils.search import get_op_by_name, get_op_name, set_op_by_name
|
| 14 |
+
from qmllm.quantization.quant_funcs import pseudo_quantize_tensor
|
| 15 |
+
|
| 16 |
+
__all__ = ["auto_scale_block", "apply_scale"]
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
@torch.no_grad()
|
| 20 |
+
def get_weight_scale(weight, q_group_size=-1):
|
| 21 |
+
org_shape = weight.shape
|
| 22 |
+
if q_group_size > 0:
|
| 23 |
+
weight = weight.view(-1, q_group_size)
|
| 24 |
+
scale = weight.abs() / weight.abs().amax(dim=1, keepdim=True)
|
| 25 |
+
scale = scale.view(org_shape)
|
| 26 |
+
scale = scale.mean(0)
|
| 27 |
+
return scale
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
@torch.no_grad()
|
| 31 |
+
def get_act_scale(x):
|
| 32 |
+
return x.abs().view(-1, x.shape[-1]).mean(0)
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
@torch.no_grad()
|
| 36 |
+
def scale_ln_fcs(ln, fcs, scales):
|
| 37 |
+
if not isinstance(fcs, list):
|
| 38 |
+
fcs = [fcs]
|
| 39 |
+
|
| 40 |
+
scales = scales.to(ln.weight.device)
|
| 41 |
+
|
| 42 |
+
ln.weight.div_(scales)
|
| 43 |
+
if hasattr(ln, "bias") and ln.bias is not None:
|
| 44 |
+
ln.bias.div_(scales)
|
| 45 |
+
|
| 46 |
+
for fc in fcs:
|
| 47 |
+
fc.weight.mul_(scales.view(1, -1))
|
| 48 |
+
|
| 49 |
+
for p in ln.parameters():
|
| 50 |
+
assert torch.isnan(p).sum() == 0
|
| 51 |
+
for fc in fcs:
|
| 52 |
+
for p in fc.parameters():
|
| 53 |
+
assert torch.isnan(p).sum() == 0
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
@torch.no_grad()
|
| 57 |
+
def scale_fc_fc(fc1, fc2, scales):
|
| 58 |
+
assert isinstance(fc1, nn.Linear)
|
| 59 |
+
assert isinstance(fc2, nn.Linear)
|
| 60 |
+
# assert fc1.out_features == fc2.in_features
|
| 61 |
+
|
| 62 |
+
scales = scales.to(fc1.weight.device)
|
| 63 |
+
|
| 64 |
+
# fc1.weight.div_(scales.view(-1, 1))
|
| 65 |
+
fc1.weight[-scales.size(0) :].div_(scales.view(-1, 1))
|
| 66 |
+
if fc1.bias is not None:
|
| 67 |
+
fc1.bias.div_(scales.view(-1))
|
| 68 |
+
|
| 69 |
+
fc2.weight.mul_(scales.view(1, -1))
|
| 70 |
+
|
| 71 |
+
for p in fc1.parameters():
|
| 72 |
+
assert torch.isnan(p).sum() == 0
|
| 73 |
+
for p in fc2.parameters():
|
| 74 |
+
assert torch.isnan(p).sum() == 0
|
| 75 |
+
|
| 76 |
+
|
| 77 |
+
@torch.no_grad()
|
| 78 |
+
def scale_gelu_fc(gelu, fc, scales):
|
| 79 |
+
assert isinstance(gelu, (nn.GELU, BloomGelu, GELUActivation))
|
| 80 |
+
assert isinstance(fc, nn.Linear)
|
| 81 |
+
|
| 82 |
+
fc.weight.mul_(scales.view(1, -1).to(fc.weight.device))
|
| 83 |
+
|
| 84 |
+
for p in fc.parameters():
|
| 85 |
+
assert torch.isnan(p).sum() == 0
|
| 86 |
+
|
| 87 |
+
|
| 88 |
+
@torch.no_grad()
|
| 89 |
+
def auto_scale_block(module, module_kwargs, w_bit, q_config, input_feat, ans_mask):
|
| 90 |
+
|
| 91 |
+
# firstly, get the weight quantize function
|
| 92 |
+
if w_bit is not None:
|
| 93 |
+
|
| 94 |
+
def w_quantize_func(p):
|
| 95 |
+
return pseudo_quantize_tensor(
|
| 96 |
+
p,
|
| 97 |
+
n_bit=w_bit,
|
| 98 |
+
**q_config,
|
| 99 |
+
).detach()
|
| 100 |
+
|
| 101 |
+
else:
|
| 102 |
+
|
| 103 |
+
def w_quantize_func(p):
|
| 104 |
+
return p
|
| 105 |
+
|
| 106 |
+
if "use_cache" in module_kwargs:
|
| 107 |
+
module_kwargs.pop("use_cache")
|
| 108 |
+
|
| 109 |
+
# find the best scale ratio
|
| 110 |
+
def _search_module_scale(block, linears2scale: list, x, kwargs={}):
|
| 111 |
+
# w: co, ci
|
| 112 |
+
# x: n, ci
|
| 113 |
+
x = x.to(next(block.parameters()).device)
|
| 114 |
+
with torch.no_grad():
|
| 115 |
+
org_out = block(x, **kwargs)
|
| 116 |
+
if isinstance(org_out, tuple):
|
| 117 |
+
org_out = org_out[0]
|
| 118 |
+
|
| 119 |
+
x_max = get_act_scale(x)
|
| 120 |
+
|
| 121 |
+
best_error = float("inf")
|
| 122 |
+
best_ratio = -1
|
| 123 |
+
best_scales = None
|
| 124 |
+
|
| 125 |
+
n_grid = 20
|
| 126 |
+
history = []
|
| 127 |
+
|
| 128 |
+
org_sd = {k: v.cpu() for k, v in block.state_dict().items()}
|
| 129 |
+
for ratio in range(n_grid):
|
| 130 |
+
ratio = ratio * 1 / n_grid
|
| 131 |
+
scales = x_max.pow(ratio).clamp(min=1e-4).view(-1)
|
| 132 |
+
scales = scales / (scales.max() * scales.min()).sqrt()
|
| 133 |
+
for fc in linears2scale:
|
| 134 |
+
fc.weight.mul_(scales.view(1, -1).to(fc.weight.device))
|
| 135 |
+
fc.weight.data = w_quantize_func(fc.weight.data) / (scales.view(1, -1))
|
| 136 |
+
out = block(x, **kwargs)
|
| 137 |
+
if isinstance(out, tuple):
|
| 138 |
+
out = out[0]
|
| 139 |
+
|
| 140 |
+
# loss = (
|
| 141 |
+
# (org_out - out).float().pow(2).mean().item()
|
| 142 |
+
# ) # float prevents overflow
|
| 143 |
+
|
| 144 |
+
if ans_mask is not None:
|
| 145 |
+
ans_mask_expand = ans_mask.unsqueeze(-1).expand_as(out)
|
| 146 |
+
masked_diff = ((org_out - out).float().pow(2) * ans_mask_expand)
|
| 147 |
+
loss = masked_diff.sum() / ans_mask_expand.sum()
|
| 148 |
+
else:
|
| 149 |
+
loss = (
|
| 150 |
+
(org_out - out).float().pow(2).mean().item()
|
| 151 |
+
) # float prevents overflow
|
| 152 |
+
history.append(loss)
|
| 153 |
+
is_best = loss < best_error
|
| 154 |
+
if is_best:
|
| 155 |
+
best_error = loss
|
| 156 |
+
best_ratio = ratio
|
| 157 |
+
best_scales = scales
|
| 158 |
+
block.load_state_dict(org_sd)
|
| 159 |
+
if best_ratio == -1:
|
| 160 |
+
print(history)
|
| 161 |
+
raise Exception
|
| 162 |
+
# print(best_ratio)
|
| 163 |
+
best_scales = best_scales.view(-1)
|
| 164 |
+
|
| 165 |
+
assert torch.isnan(best_scales).sum() == 0, best_scales
|
| 166 |
+
return best_scales.detach()
|
| 167 |
+
|
| 168 |
+
def _auto_get_scale(prev_op, layers, inp, module2inspect=None, kwargs={}):
|
| 169 |
+
# module2inspect: if given, we will check the output diff of this module instead of layers
|
| 170 |
+
if module2inspect is None:
|
| 171 |
+
assert len(layers) == 1
|
| 172 |
+
module2inspect = layers[0]
|
| 173 |
+
|
| 174 |
+
scales = _search_module_scale(module2inspect, layers, inp, kwargs)
|
| 175 |
+
scales = scales.detach().cpu()
|
| 176 |
+
# prev_op_name, [layer_name], scale
|
| 177 |
+
return (
|
| 178 |
+
get_op_name(module, prev_op),
|
| 179 |
+
tuple([get_op_name(module, m) for m in layers]),
|
| 180 |
+
scales,
|
| 181 |
+
)
|
| 182 |
+
|
| 183 |
+
scales_list = [] # return the searched scales
|
| 184 |
+
|
| 185 |
+
if isinstance(module, OPTDecoderLayer):
|
| 186 |
+
# attention input
|
| 187 |
+
scales_list.append(
|
| 188 |
+
_auto_get_scale(
|
| 189 |
+
prev_op=module.self_attn_layer_norm,
|
| 190 |
+
layers=[
|
| 191 |
+
module.self_attn.q_proj,
|
| 192 |
+
module.self_attn.k_proj,
|
| 193 |
+
module.self_attn.v_proj,
|
| 194 |
+
],
|
| 195 |
+
inp=input_feat["self_attn.q_proj"],
|
| 196 |
+
module2inspect=module.self_attn,
|
| 197 |
+
kwargs=module_kwargs,
|
| 198 |
+
)
|
| 199 |
+
)
|
| 200 |
+
# attn out
|
| 201 |
+
scales_list.append(
|
| 202 |
+
_auto_get_scale(
|
| 203 |
+
prev_op=module.self_attn.v_proj,
|
| 204 |
+
layers=[module.self_attn.out_proj],
|
| 205 |
+
inp=input_feat["self_attn.out_proj"],
|
| 206 |
+
)
|
| 207 |
+
)
|
| 208 |
+
# fc1
|
| 209 |
+
scales_list.append(
|
| 210 |
+
_auto_get_scale(
|
| 211 |
+
prev_op=module.final_layer_norm,
|
| 212 |
+
layers=[module.fc1],
|
| 213 |
+
inp=input_feat["fc1"],
|
| 214 |
+
)
|
| 215 |
+
)
|
| 216 |
+
# fc2
|
| 217 |
+
scales_list.append(
|
| 218 |
+
_auto_get_scale(
|
| 219 |
+
prev_op=module.fc1,
|
| 220 |
+
layers=[module.fc2],
|
| 221 |
+
inp=input_feat["fc2"],
|
| 222 |
+
)
|
| 223 |
+
)
|
| 224 |
+
|
| 225 |
+
elif isinstance(module, LlamaDecoderLayer):
|
| 226 |
+
# attention input
|
| 227 |
+
scales_list.append(
|
| 228 |
+
_auto_get_scale(
|
| 229 |
+
prev_op=module.input_layernorm,
|
| 230 |
+
layers=[
|
| 231 |
+
module.self_attn.q_proj,
|
| 232 |
+
module.self_attn.k_proj,
|
| 233 |
+
module.self_attn.v_proj,
|
| 234 |
+
],
|
| 235 |
+
inp=input_feat["self_attn.q_proj"],
|
| 236 |
+
module2inspect=module.self_attn,
|
| 237 |
+
kwargs=module_kwargs,
|
| 238 |
+
)
|
| 239 |
+
)
|
| 240 |
+
# attn out
|
| 241 |
+
# Please refer to https://github.com/mit-han-lab/llm-awq/pull/67#issue-1850622696
|
| 242 |
+
if module.self_attn.v_proj.weight.shape == module.self_attn.o_proj.weight.shape:
|
| 243 |
+
scales_list.append(
|
| 244 |
+
_auto_get_scale(
|
| 245 |
+
prev_op=module.self_attn.v_proj,
|
| 246 |
+
layers=[module.self_attn.o_proj],
|
| 247 |
+
inp=input_feat["self_attn.o_proj"],
|
| 248 |
+
)
|
| 249 |
+
)
|
| 250 |
+
# fc1
|
| 251 |
+
scales_list.append(
|
| 252 |
+
_auto_get_scale(
|
| 253 |
+
prev_op=module.post_attention_layernorm,
|
| 254 |
+
layers=[module.mlp.gate_proj, module.mlp.up_proj],
|
| 255 |
+
inp=input_feat["mlp.gate_proj"],
|
| 256 |
+
module2inspect=module.mlp,
|
| 257 |
+
)
|
| 258 |
+
)
|
| 259 |
+
# fc2
|
| 260 |
+
scales_list.append(
|
| 261 |
+
_auto_get_scale(
|
| 262 |
+
prev_op=module.mlp.up_proj,
|
| 263 |
+
layers=[module.mlp.down_proj],
|
| 264 |
+
inp=input_feat["mlp.down_proj"],
|
| 265 |
+
)
|
| 266 |
+
)
|
| 267 |
+
|
| 268 |
+
elif isinstance(module, BloomBlock):
|
| 269 |
+
# attention input
|
| 270 |
+
scales_list.append(
|
| 271 |
+
_auto_get_scale(
|
| 272 |
+
prev_op=module.input_layernorm,
|
| 273 |
+
layers=[module.self_attention.query_key_value],
|
| 274 |
+
inp=input_feat["self_attention.query_key_value"],
|
| 275 |
+
module2inspect=module,
|
| 276 |
+
kwargs=module_kwargs,
|
| 277 |
+
)
|
| 278 |
+
)
|
| 279 |
+
# attn out
|
| 280 |
+
# Please refer to https://github.com/mit-han-lab/llm-awq/issues/2#issuecomment-1606297469
|
| 281 |
+
"""
|
| 282 |
+
scales_list.append(_auto_get_scale(
|
| 283 |
+
prev_op=module.self_attention.query_key_value,
|
| 284 |
+
layers=[module.self_attention.dense],
|
| 285 |
+
inp=input_feat['self_attention.dense'],
|
| 286 |
+
))
|
| 287 |
+
"""
|
| 288 |
+
# fc1
|
| 289 |
+
scales_list.append(
|
| 290 |
+
_auto_get_scale(
|
| 291 |
+
prev_op=module.post_attention_layernorm,
|
| 292 |
+
layers=[module.mlp.dense_h_to_4h],
|
| 293 |
+
inp=input_feat["mlp.dense_h_to_4h"],
|
| 294 |
+
module2inspect=module,
|
| 295 |
+
kwargs=module_kwargs,
|
| 296 |
+
)
|
| 297 |
+
)
|
| 298 |
+
# fc2
|
| 299 |
+
scales_list.append(
|
| 300 |
+
_auto_get_scale(
|
| 301 |
+
prev_op=module.mlp.gelu_impl,
|
| 302 |
+
layers=[module.mlp.dense_4h_to_h],
|
| 303 |
+
inp=input_feat["mlp.dense_4h_to_h"],
|
| 304 |
+
)
|
| 305 |
+
)
|
| 306 |
+
elif "mpt" in str(module.__class__).lower():
|
| 307 |
+
# attention input
|
| 308 |
+
scales_list.append(
|
| 309 |
+
_auto_get_scale(
|
| 310 |
+
prev_op=module.norm_1,
|
| 311 |
+
layers=[module.attn.Wqkv],
|
| 312 |
+
inp=input_feat["attn.Wqkv"],
|
| 313 |
+
module2inspect=module.attn,
|
| 314 |
+
kwargs=module_kwargs,
|
| 315 |
+
)
|
| 316 |
+
)
|
| 317 |
+
|
| 318 |
+
# attn out
|
| 319 |
+
scales_list.append(
|
| 320 |
+
_auto_get_scale(
|
| 321 |
+
prev_op=module.attn.Wqkv,
|
| 322 |
+
layers=[module.attn.out_proj],
|
| 323 |
+
inp=input_feat["attn.out_proj"],
|
| 324 |
+
)
|
| 325 |
+
)
|
| 326 |
+
# fc1
|
| 327 |
+
scales_list.append(
|
| 328 |
+
_auto_get_scale(
|
| 329 |
+
prev_op=module.norm_2,
|
| 330 |
+
layers=[module.ffn.up_proj],
|
| 331 |
+
inp=input_feat["ffn.up_proj"],
|
| 332 |
+
module2inspect=module.ffn,
|
| 333 |
+
)
|
| 334 |
+
)
|
| 335 |
+
# fc2
|
| 336 |
+
scales_list.append(
|
| 337 |
+
_auto_get_scale(
|
| 338 |
+
prev_op=module.ffn.act,
|
| 339 |
+
layers=[module.ffn.down_proj],
|
| 340 |
+
inp=input_feat["ffn.down_proj"],
|
| 341 |
+
)
|
| 342 |
+
)
|
| 343 |
+
|
| 344 |
+
elif "falcon" in str(module.__class__).lower():
|
| 345 |
+
# attn out
|
| 346 |
+
# Haotian: TBD: need to handle repeated scales for MQ
|
| 347 |
+
"""
|
| 348 |
+
scales_list.append(_auto_get_scale(
|
| 349 |
+
prev_op=module.self_attention.query_key_value,
|
| 350 |
+
layers=[module.self_attention.dense],
|
| 351 |
+
inp=input_feat['self_attention.dense'],
|
| 352 |
+
))
|
| 353 |
+
"""
|
| 354 |
+
# fc1, as long as it is scaled, everything is screwed up
|
| 355 |
+
if "falcon-7b" in str(module.__class__).lower():
|
| 356 |
+
scales_list.append(
|
| 357 |
+
_auto_get_scale(
|
| 358 |
+
prev_op=module.input_layernorm,
|
| 359 |
+
layers=[
|
| 360 |
+
module.mlp.dense_h_to_4h,
|
| 361 |
+
module.self_attention.query_key_value,
|
| 362 |
+
],
|
| 363 |
+
inp=input_feat["self_attention.query_key_value"],
|
| 364 |
+
module2inspect=module,
|
| 365 |
+
kwargs=module_kwargs,
|
| 366 |
+
)
|
| 367 |
+
)
|
| 368 |
+
elif "falcon-40b" in str(module.__class__).lower():
|
| 369 |
+
scales_list.append(
|
| 370 |
+
_auto_get_scale(
|
| 371 |
+
prev_op=module.ln_attn,
|
| 372 |
+
layers=[module.self_attention.query_key_value],
|
| 373 |
+
inp=input_feat["self_attention.query_key_value"],
|
| 374 |
+
module2inspect=module,
|
| 375 |
+
kwargs=module_kwargs,
|
| 376 |
+
)
|
| 377 |
+
)
|
| 378 |
+
scales_list.append(
|
| 379 |
+
_auto_get_scale(
|
| 380 |
+
prev_op=module.ln_mlp,
|
| 381 |
+
layers=[module.mlp.dense_h_to_4h],
|
| 382 |
+
inp=input_feat["mlp.dense_h_to_4h"],
|
| 383 |
+
module2inspect=module,
|
| 384 |
+
kwargs=module_kwargs,
|
| 385 |
+
)
|
| 386 |
+
)
|
| 387 |
+
else:
|
| 388 |
+
raise NotImplementedError(
|
| 389 |
+
"Unknown Falcon architecture, currently only falcon-7b and falcon-40b are supported"
|
| 390 |
+
)
|
| 391 |
+
# fc2
|
| 392 |
+
scales_list.append(
|
| 393 |
+
_auto_get_scale(
|
| 394 |
+
prev_op=module.mlp.act,
|
| 395 |
+
layers=[module.mlp.dense_4h_to_h],
|
| 396 |
+
inp=input_feat["mlp.dense_4h_to_h"],
|
| 397 |
+
)
|
| 398 |
+
)
|
| 399 |
+
elif "bigcode" in str(module.__class__).lower():
|
| 400 |
+
scales_list.append(
|
| 401 |
+
_auto_get_scale(
|
| 402 |
+
prev_op=module.ln_1,
|
| 403 |
+
layers=[module.attn.c_attn],
|
| 404 |
+
inp=input_feat["attn.c_attn"],
|
| 405 |
+
module2inspect=module.attn,
|
| 406 |
+
kwargs=module_kwargs,
|
| 407 |
+
)
|
| 408 |
+
)
|
| 409 |
+
# fc1
|
| 410 |
+
scales_list.append(
|
| 411 |
+
_auto_get_scale(
|
| 412 |
+
prev_op=module.ln_2,
|
| 413 |
+
layers=[module.mlp.c_fc],
|
| 414 |
+
inp=input_feat["mlp.c_fc"],
|
| 415 |
+
module2inspect=module.mlp,
|
| 416 |
+
)
|
| 417 |
+
)
|
| 418 |
+
# fc2
|
| 419 |
+
scales_list.append(
|
| 420 |
+
_auto_get_scale(
|
| 421 |
+
prev_op=module.mlp.act,
|
| 422 |
+
layers=[module.mlp.c_proj],
|
| 423 |
+
inp=input_feat["mlp.c_proj"],
|
| 424 |
+
)
|
| 425 |
+
)
|
| 426 |
+
elif "neox" in str(module.__class__).lower():
|
| 427 |
+
scales_list.append(
|
| 428 |
+
_auto_get_scale(
|
| 429 |
+
prev_op=module.input_layernorm,
|
| 430 |
+
layers=[module.attention.query_key_value],
|
| 431 |
+
inp=input_feat["attention.query_key_value"],
|
| 432 |
+
module2inspect=module.attention,
|
| 433 |
+
kwargs=module_kwargs,
|
| 434 |
+
)
|
| 435 |
+
)
|
| 436 |
+
# fc1
|
| 437 |
+
scales_list.append(
|
| 438 |
+
_auto_get_scale(
|
| 439 |
+
prev_op=module.post_attention_layernorm,
|
| 440 |
+
layers=[module.mlp.dense_h_to_4h],
|
| 441 |
+
inp=input_feat["mlp.dense_h_to_4h"],
|
| 442 |
+
module2inspect=module.mlp,
|
| 443 |
+
)
|
| 444 |
+
)
|
| 445 |
+
# fc2
|
| 446 |
+
scales_list.append(
|
| 447 |
+
_auto_get_scale(
|
| 448 |
+
prev_op=module.mlp.act,
|
| 449 |
+
layers=[module.mlp.dense_4h_to_h],
|
| 450 |
+
inp=input_feat["mlp.dense_4h_to_h"],
|
| 451 |
+
)
|
| 452 |
+
)
|
| 453 |
+
elif module.__class__.__name__ == "Qwen2DecoderLayer":
|
| 454 |
+
# attention input
|
| 455 |
+
scales_list.append(
|
| 456 |
+
_auto_get_scale(
|
| 457 |
+
prev_op=module.input_layernorm,
|
| 458 |
+
layers=[
|
| 459 |
+
module.self_attn.q_proj,
|
| 460 |
+
module.self_attn.k_proj,
|
| 461 |
+
module.self_attn.v_proj,
|
| 462 |
+
],
|
| 463 |
+
inp=input_feat["self_attn.q_proj"],
|
| 464 |
+
module2inspect=module.self_attn,
|
| 465 |
+
kwargs=module_kwargs,
|
| 466 |
+
)
|
| 467 |
+
)
|
| 468 |
+
# attn out
|
| 469 |
+
# Please refer to https://github.com/mit-han-lab/llm-awq/pull/67#issue-1850622696
|
| 470 |
+
if module.self_attn.v_proj.weight.shape == module.self_attn.o_proj.weight.shape:
|
| 471 |
+
scales_list.append(
|
| 472 |
+
_auto_get_scale(
|
| 473 |
+
prev_op=module.self_attn.v_proj,
|
| 474 |
+
layers=[module.self_attn.o_proj],
|
| 475 |
+
inp=input_feat["self_attn.o_proj"],
|
| 476 |
+
)
|
| 477 |
+
)
|
| 478 |
+
# fc1
|
| 479 |
+
scales_list.append(
|
| 480 |
+
_auto_get_scale(
|
| 481 |
+
prev_op=module.post_attention_layernorm,
|
| 482 |
+
layers=[module.mlp.gate_proj, module.mlp.up_proj],
|
| 483 |
+
inp=input_feat["mlp.gate_proj"],
|
| 484 |
+
module2inspect=module.mlp,
|
| 485 |
+
)
|
| 486 |
+
)
|
| 487 |
+
# fc2
|
| 488 |
+
scales_list.append(
|
| 489 |
+
_auto_get_scale(
|
| 490 |
+
prev_op=module.mlp.up_proj,
|
| 491 |
+
layers=[module.mlp.down_proj],
|
| 492 |
+
inp=input_feat["mlp.down_proj"],
|
| 493 |
+
)
|
| 494 |
+
)
|
| 495 |
+
elif module.__class__.__name__ == "InternLM2DecoderLayer":
|
| 496 |
+
# attention input
|
| 497 |
+
scales_list.append(
|
| 498 |
+
_auto_get_scale(
|
| 499 |
+
prev_op=module.attention_norm,
|
| 500 |
+
layers=[
|
| 501 |
+
module.attention.wqkv,
|
| 502 |
+
],
|
| 503 |
+
inp=input_feat["attention.wqkv"],
|
| 504 |
+
module2inspect=module.attention,
|
| 505 |
+
kwargs=module_kwargs,
|
| 506 |
+
)
|
| 507 |
+
)
|
| 508 |
+
# attn out
|
| 509 |
+
# Please refer to https://github.com/mit-han-lab/llm-awq/pull/67#issue-1850622696
|
| 510 |
+
if module.attention.wqkv.weight.shape == module.attention.wo.weight.shape:
|
| 511 |
+
scales_list.append(
|
| 512 |
+
_auto_get_scale(
|
| 513 |
+
prev_op=module.attention.wqkv,
|
| 514 |
+
layers=[module.attention.wo],
|
| 515 |
+
inp=input_feat["attention.wo"],
|
| 516 |
+
)
|
| 517 |
+
)
|
| 518 |
+
# fc1
|
| 519 |
+
scales_list.append(
|
| 520 |
+
_auto_get_scale(
|
| 521 |
+
prev_op=module.ffn_norm,
|
| 522 |
+
layers=[module.feed_forward.w1, module.feed_forward.w3],
|
| 523 |
+
inp=input_feat["feed_forward.w1"],
|
| 524 |
+
module2inspect=module.feed_forward,
|
| 525 |
+
)
|
| 526 |
+
)
|
| 527 |
+
# fc2
|
| 528 |
+
scales_list.append(
|
| 529 |
+
_auto_get_scale(
|
| 530 |
+
prev_op=module.feed_forward.w3,
|
| 531 |
+
layers=[module.feed_forward.w2],
|
| 532 |
+
inp=input_feat["feed_forward.w2"],
|
| 533 |
+
)
|
| 534 |
+
)
|
| 535 |
+
else:
|
| 536 |
+
raise NotImplementedError(f"{type(module)} not supported yet!")
|
| 537 |
+
|
| 538 |
+
return scales_list
|
| 539 |
+
|
| 540 |
+
|
| 541 |
+
def apply_scale(module, scales_list, input_feat_dict=None):
|
| 542 |
+
for prev_op_name, layer_names, scales in scales_list:
|
| 543 |
+
prev_op = get_op_by_name(module, prev_op_name)
|
| 544 |
+
layers = [get_op_by_name(module, name) for name in layer_names]
|
| 545 |
+
|
| 546 |
+
prev_op.cuda()
|
| 547 |
+
for layer in layers:
|
| 548 |
+
layer.cuda()
|
| 549 |
+
scales.cuda()
|
| 550 |
+
|
| 551 |
+
if isinstance(prev_op, nn.Linear):
|
| 552 |
+
assert len(layers) == 1
|
| 553 |
+
scale_fc_fc(prev_op, layers[0], scales)
|
| 554 |
+
elif isinstance(prev_op, (nn.LayerNorm, LlamaRMSNorm, Qwen2RMSNorm)) or prev_op.__class__.__name__ == "InternLM2RMSNorm":
|
| 555 |
+
scale_ln_fcs(prev_op, layers, scales)
|
| 556 |
+
elif isinstance(prev_op, (nn.GELU, BloomGelu, GELUActivation)):
|
| 557 |
+
new_module = ScaledActivation(prev_op, scales)
|
| 558 |
+
set_op_by_name(module, prev_op_name, new_module)
|
| 559 |
+
scale_gelu_fc(prev_op, layers[0], scales)
|
| 560 |
+
else:
|
| 561 |
+
raise NotImplementedError(f"prev_op {type(prev_op)} not supported yet!")
|
| 562 |
+
|
| 563 |
+
# apply the scaling to input feat if given
|
| 564 |
+
if input_feat_dict is not None:
|
| 565 |
+
for layer_name in layer_names:
|
| 566 |
+
inp = input_feat_dict[layer_name]
|
| 567 |
+
inp.div_(scales.view(1, -1).to(inp.device))
|
| 568 |
+
|
| 569 |
+
prev_op.cpu()
|
| 570 |
+
for layer in layers:
|
| 571 |
+
layer.cpu()
|
| 572 |
+
scales.cpu()
|
qmllm/methods/awq/quantize/pre_quant.py
ADDED
|
@@ -0,0 +1,207 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import torch.nn as nn
|
| 3 |
+
import tqdm
|
| 4 |
+
import gc
|
| 5 |
+
import functools
|
| 6 |
+
from collections import defaultdict
|
| 7 |
+
from typing import List
|
| 8 |
+
|
| 9 |
+
from transformers.models.bloom.modeling_bloom import BloomForCausalLM
|
| 10 |
+
from transformers.models.opt.modeling_opt import OPTForCausalLM
|
| 11 |
+
from transformers.models.llama.modeling_llama import LlamaForCausalLM
|
| 12 |
+
|
| 13 |
+
from qmllm.calibration.pileval import get_calib_dataset
|
| 14 |
+
from qmllm.calibration.coco_vl import get_multimodal_calib_dataset
|
| 15 |
+
from qmllm.utils.search import append_str_prefix, get_op_name
|
| 16 |
+
|
| 17 |
+
from qmllm.methods.awq.quantize.auto_scale import auto_scale_block, apply_scale
|
| 18 |
+
|
| 19 |
+
__all__ = ["run_awq"]
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
def get_named_linears(module):
|
| 23 |
+
return {name: m for name, m in module.named_modules() if isinstance(m, nn.Linear)}
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
def get_blocks(model):
|
| 27 |
+
if model.__class__.__name__ == "LlamaForCausalLM":
|
| 28 |
+
layers = model.model.layers
|
| 29 |
+
elif model.__class__.__name__ == "LlavaLlamaForCausalLM":
|
| 30 |
+
# layers = [model.model.layers, model.model.vision_tower.vision_tower.vision_model.encoder.layers]
|
| 31 |
+
layers = model.model.layers
|
| 32 |
+
elif model.__class__.__name__ == "LlavaQwenForCausalLM":
|
| 33 |
+
layers = model.model.layers
|
| 34 |
+
elif model.__class__.__name__ == "InternLM2ForCausalLM":
|
| 35 |
+
layers = model.model.layers
|
| 36 |
+
elif model.__class__.__name__ == "InternVLChatModel":
|
| 37 |
+
layers = model.language_model.model.layers
|
| 38 |
+
elif isinstance(model, OPTForCausalLM):
|
| 39 |
+
layers = model.model.decoder.layers
|
| 40 |
+
elif isinstance(model, BloomForCausalLM):
|
| 41 |
+
layers = model.transformer.h
|
| 42 |
+
elif "mpt" in str(model.__class__).lower():
|
| 43 |
+
layers = model.transformer.blocks
|
| 44 |
+
elif "falcon" in str(model.__class__).lower():
|
| 45 |
+
layers = model.transformer.h
|
| 46 |
+
elif "bigcode" in str(model.__class__).lower():
|
| 47 |
+
layers = model.transformer.h
|
| 48 |
+
elif "neox" in str(model.__class__).lower():
|
| 49 |
+
layers = model.gpt_neox.layers
|
| 50 |
+
else:
|
| 51 |
+
raise NotImplementedError(type(model))
|
| 52 |
+
return layers
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
def move_embed(model, device):
|
| 56 |
+
if isinstance(model, LlamaForCausalLM):
|
| 57 |
+
model.model.embed_tokens = model.model.embed_tokens.to(device)
|
| 58 |
+
model.model.rotary_emb = model.model.rotary_emb.to(device)
|
| 59 |
+
elif isinstance(model, OPTForCausalLM):
|
| 60 |
+
model.model.decoder.embed_tokens = model.model.decoder.embed_tokens.to(device)
|
| 61 |
+
model.model.decoder.embed_positions = model.model.decoder.embed_positions.to(
|
| 62 |
+
device
|
| 63 |
+
)
|
| 64 |
+
elif isinstance(model, BloomForCausalLM):
|
| 65 |
+
model.transformer.word_embeddings = model.transformer.word_embeddings.to(device)
|
| 66 |
+
model.transformer.word_embeddings_layernorm = (
|
| 67 |
+
model.transformer.word_embeddings_layernorm.to(device)
|
| 68 |
+
)
|
| 69 |
+
elif "mpt" in str(model.__class__).lower():
|
| 70 |
+
model.transformer.wte = model.transformer.wte.to(device)
|
| 71 |
+
model.transformer.emb_drop = model.transformer.emb_drop.to(device)
|
| 72 |
+
elif "falcon" in str(model.__class__).lower():
|
| 73 |
+
model.transformer.word_embeddings = model.transformer.word_embeddings.to(device)
|
| 74 |
+
elif "bigcode" in str(model.__class__).lower():
|
| 75 |
+
model.transformer.wte = model.transformer.wte.to(device)
|
| 76 |
+
model.transformer.wpe = model.transformer.wpe.to(device)
|
| 77 |
+
model.transformer.drop = model.transformer.drop.to(device)
|
| 78 |
+
elif "neox" in str(model.__class__).lower():
|
| 79 |
+
model.gpt_neox.embed_in = model.gpt_neox.embed_in.to(device)
|
| 80 |
+
model.gpt_neox.emb_dropout = model.gpt_neox.emb_dropout.to(device)
|
| 81 |
+
model.embed_out = model.embed_out.to(device)
|
| 82 |
+
elif model.__class__.__name__ == "LlavaQwenForCausalLM":
|
| 83 |
+
model.embed_tokens = model.embed_tokens.to(device)
|
| 84 |
+
# model.model.rotary_emb = model.model.rotary_emb.to(device)
|
| 85 |
+
elif model.__class__.__name__ == "InternLM2ForCausalLM":
|
| 86 |
+
model.tok_embeddings = model.tok_embeddings.to(device)
|
| 87 |
+
elif model.__class__.__name__ == "InternVLChatModel":
|
| 88 |
+
model.model.tok_embeddings = model.model.tok_embeddings.to(device)
|
| 89 |
+
else:
|
| 90 |
+
raise NotImplementedError(type(model))
|
| 91 |
+
|
| 92 |
+
|
| 93 |
+
@torch.no_grad()
|
| 94 |
+
def run_awq(
|
| 95 |
+
model,
|
| 96 |
+
prompt_tokens,
|
| 97 |
+
prompt_mask,
|
| 98 |
+
w_bit,
|
| 99 |
+
q_config,
|
| 100 |
+
auto_scale=True,
|
| 101 |
+
):
|
| 102 |
+
|
| 103 |
+
if "bigcode" in str(model.__class__).lower():
|
| 104 |
+
# otherwise attention_mask will always be on cpu.
|
| 105 |
+
model.transformer.bias = model.transformer.bias.to("cuda")
|
| 106 |
+
|
| 107 |
+
layers = get_blocks(model)
|
| 108 |
+
|
| 109 |
+
inps = []
|
| 110 |
+
layer_kwargs = {}
|
| 111 |
+
|
| 112 |
+
layers[0] = layers[0].cuda()
|
| 113 |
+
move_embed(model, "cuda")
|
| 114 |
+
|
| 115 |
+
# get input and kwargs to layer 0
|
| 116 |
+
# with_kwargs is only supported in PyTorch 2.0
|
| 117 |
+
# use this Catcher hack for now
|
| 118 |
+
class Catcher(nn.Module):
|
| 119 |
+
def __init__(self, module):
|
| 120 |
+
super().__init__()
|
| 121 |
+
self.module = module
|
| 122 |
+
|
| 123 |
+
def forward(self, inp, **kwargs):
|
| 124 |
+
inps.append(inp)
|
| 125 |
+
layer_kwargs.update(kwargs)
|
| 126 |
+
raise ValueError # early exit to break later inference
|
| 127 |
+
|
| 128 |
+
# patch layer 0 to catch input and kwargs
|
| 129 |
+
layers[0] = Catcher(layers[0])
|
| 130 |
+
model(prompt_tokens.to(next(model.parameters()).device), use_cache=False)
|
| 131 |
+
|
| 132 |
+
layers[0] = layers[0].module # restore
|
| 133 |
+
inps = inps[0]
|
| 134 |
+
|
| 135 |
+
layers[0] = layers[0].cpu()
|
| 136 |
+
move_embed(model, "cpu")
|
| 137 |
+
|
| 138 |
+
gc.collect()
|
| 139 |
+
torch.cuda.empty_cache()
|
| 140 |
+
|
| 141 |
+
awq_results = {
|
| 142 |
+
"scale": [],
|
| 143 |
+
}
|
| 144 |
+
|
| 145 |
+
# solve layer by layer
|
| 146 |
+
for i in tqdm.tqdm(range(len(layers)), desc="Running AWQ..."):
|
| 147 |
+
layer = layers[i]
|
| 148 |
+
layer = layer.cuda()
|
| 149 |
+
named_linears = get_named_linears(layer)
|
| 150 |
+
|
| 151 |
+
# firstly, get input features of all linear layers
|
| 152 |
+
def cache_input_hook(m, x, y, name, feat_dict):
|
| 153 |
+
x = x[0]
|
| 154 |
+
x = x.detach().cpu()
|
| 155 |
+
feat_dict[name].append(x)
|
| 156 |
+
|
| 157 |
+
input_feat = defaultdict(list)
|
| 158 |
+
handles = []
|
| 159 |
+
for name in named_linears:
|
| 160 |
+
handles.append(
|
| 161 |
+
named_linears[name].register_forward_hook(
|
| 162 |
+
functools.partial(cache_input_hook, name=name, feat_dict=input_feat)
|
| 163 |
+
)
|
| 164 |
+
)
|
| 165 |
+
inps = inps.to(next(layer.parameters()).device) # in case multi-gpu
|
| 166 |
+
# get output as next layer's input
|
| 167 |
+
inps = layer(inps, **layer_kwargs)[0]
|
| 168 |
+
for h in handles:
|
| 169 |
+
h.remove()
|
| 170 |
+
# now solve for scaling
|
| 171 |
+
input_feat = {k: torch.cat(v, dim=0) for k, v in input_feat.items()}
|
| 172 |
+
|
| 173 |
+
# Clear GPU memory
|
| 174 |
+
torch.cuda.empty_cache()
|
| 175 |
+
|
| 176 |
+
if (
|
| 177 |
+
auto_scale
|
| 178 |
+
): # if it applies, we should also modify the input_feat with scales
|
| 179 |
+
scales_list = auto_scale_block(
|
| 180 |
+
layer,
|
| 181 |
+
layer_kwargs,
|
| 182 |
+
w_bit=w_bit,
|
| 183 |
+
q_config=q_config,
|
| 184 |
+
input_feat=input_feat,
|
| 185 |
+
ans_mask=prompt_mask,
|
| 186 |
+
)
|
| 187 |
+
# apply_scale(layer, scales_list, input_feat_dict=input_feat)
|
| 188 |
+
apply_scale(layers[i], scales_list, input_feat_dict=input_feat)
|
| 189 |
+
# append prefix to make names global
|
| 190 |
+
awq_results["scale"] += append_str_prefix(
|
| 191 |
+
scales_list, get_op_name(model, layer) + "."
|
| 192 |
+
)
|
| 193 |
+
|
| 194 |
+
# Clear GPU memory
|
| 195 |
+
torch.cuda.empty_cache()
|
| 196 |
+
|
| 197 |
+
layer = layer.cpu()
|
| 198 |
+
# Haotian: check activation replacement
|
| 199 |
+
del input_feat
|
| 200 |
+
gc.collect()
|
| 201 |
+
torch.cuda.empty_cache()
|
| 202 |
+
|
| 203 |
+
return awq_results
|
| 204 |
+
|
| 205 |
+
|
| 206 |
+
def apply_awq(model, awq_results):
|
| 207 |
+
apply_scale(model, awq_results["scale"])
|
qmllm/methods/awq/quantize/qmodule.py
ADDED
|
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import math
|
| 2 |
+
import torch
|
| 3 |
+
import torch.nn as nn
|
| 4 |
+
|
| 5 |
+
def make_divisible(c, divisor):
|
| 6 |
+
return (c + divisor - 1) // divisor
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
def calculate_zeros_width(in_features, group_size=128, pack_num=8):
|
| 10 |
+
if group_size >= 128:
|
| 11 |
+
size_multiplier = 1
|
| 12 |
+
elif group_size == 64:
|
| 13 |
+
size_multiplier = 2
|
| 14 |
+
elif group_size == 32:
|
| 15 |
+
size_multiplier = 4
|
| 16 |
+
else:
|
| 17 |
+
raise NotImplementedError
|
| 18 |
+
|
| 19 |
+
base_width = make_divisible(in_features // group_size, pack_num)
|
| 20 |
+
base_width = make_divisible(base_width, size_multiplier) * size_multiplier
|
| 21 |
+
return base_width
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
class ScaledActivation(nn.Module):
|
| 25 |
+
def __init__(self, module, scales):
|
| 26 |
+
super().__init__()
|
| 27 |
+
self.act = module
|
| 28 |
+
self.scales = nn.Parameter(scales.data)
|
| 29 |
+
|
| 30 |
+
def forward(self, x):
|
| 31 |
+
return self.act(x) / self.scales.view(1, 1, -1).to(x.device)
|
qmllm/methods/awq/quantize/quantizer.py
ADDED
|
@@ -0,0 +1,76 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import torch.nn as nn
|
| 3 |
+
from tqdm import tqdm
|
| 4 |
+
import gc
|
| 5 |
+
from qmllm.methods.awq.quantize.qmodule import ScaledActivation
|
| 6 |
+
from qmllm.utils.search import set_op_by_name
|
| 7 |
+
|
| 8 |
+
from transformers.models.bloom.modeling_bloom import BloomBlock
|
| 9 |
+
from qmllm.quantization.quant_funcs import pseudo_quantize_tensor
|
| 10 |
+
|
| 11 |
+
EMBEDDING_KEYWORDS = ["embed"]
|
| 12 |
+
LM_HEAD_KEYWORDS = ["lm_head", "embed_out", "output"]
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
def scale_activations(module):
|
| 16 |
+
param = next(module.parameters())
|
| 17 |
+
dtype = param.dtype
|
| 18 |
+
device = param.device
|
| 19 |
+
if isinstance(module, BloomBlock):
|
| 20 |
+
if isinstance(module.mlp.gelu_impl, ScaledActivation):
|
| 21 |
+
return
|
| 22 |
+
c = module.mlp.dense_h_to_4h.out_features
|
| 23 |
+
act = ScaledActivation(
|
| 24 |
+
module.mlp.gelu_impl, torch.ones(c, dtype=dtype, device=device)
|
| 25 |
+
)
|
| 26 |
+
set_op_by_name(module, "mlp.gelu_impl", act)
|
| 27 |
+
elif "mptblock" in str(module.__class__.__name__).lower():
|
| 28 |
+
if isinstance(module.ffn.act, ScaledActivation):
|
| 29 |
+
return
|
| 30 |
+
c = module.ffn.up_proj.out_features
|
| 31 |
+
act = ScaledActivation(
|
| 32 |
+
module.ffn.act, torch.ones(c, dtype=dtype, device=device)
|
| 33 |
+
)
|
| 34 |
+
set_op_by_name(module, "ffn.act", act)
|
| 35 |
+
elif "falcon" in str(module.__class__).lower():
|
| 36 |
+
if isinstance(module.mlp.act, ScaledActivation):
|
| 37 |
+
return
|
| 38 |
+
c = module.mlp.dense_h_to_4h.out_features
|
| 39 |
+
act = ScaledActivation(
|
| 40 |
+
module.mlp.act, torch.ones(c, dtype=dtype, device=device)
|
| 41 |
+
)
|
| 42 |
+
set_op_by_name(module, "mlp.act", act)
|
| 43 |
+
elif "bigcode" in str(module.__class__).lower():
|
| 44 |
+
if isinstance(module.mlp.act, ScaledActivation):
|
| 45 |
+
return
|
| 46 |
+
c = module.mlp.c_proj.out_features
|
| 47 |
+
act = ScaledActivation(
|
| 48 |
+
module.mlp.act, torch.ones(c, dtype=dtype, device=device)
|
| 49 |
+
)
|
| 50 |
+
set_op_by_name(module, "mlp.act", act)
|
| 51 |
+
elif "neox" in str(module.__class__).lower():
|
| 52 |
+
if isinstance(module.mlp.act, ScaledActivation):
|
| 53 |
+
return
|
| 54 |
+
c = module.mlp.dense_h_to_4h.out_features
|
| 55 |
+
act = ScaledActivation(
|
| 56 |
+
module.mlp.act, torch.ones(c, dtype=dtype, device=device)
|
| 57 |
+
)
|
| 58 |
+
set_op_by_name(module, "mlp.act", act)
|
| 59 |
+
|
| 60 |
+
@torch.no_grad()
|
| 61 |
+
def pseudo_quantize_model_weight(
|
| 62 |
+
model,
|
| 63 |
+
w_bit,
|
| 64 |
+
q_config,
|
| 65 |
+
):
|
| 66 |
+
from .pre_quant import get_blocks, get_named_linears
|
| 67 |
+
|
| 68 |
+
layers = get_blocks(model)
|
| 69 |
+
for i in tqdm(range(len(layers)), desc="pseudo weight quantization..."):
|
| 70 |
+
named_linears = get_named_linears(layers[i])
|
| 71 |
+
for n, m in named_linears.items():
|
| 72 |
+
m.cuda()
|
| 73 |
+
m.weight.data = pseudo_quantize_tensor(
|
| 74 |
+
m.weight.data, n_bit=w_bit, **q_config
|
| 75 |
+
)
|
| 76 |
+
m.cpu()
|
qmllm/methods/mbq/__init__.py
ADDED
|
File without changes
|
qmllm/methods/mbq/entry.py
ADDED
|
@@ -0,0 +1,58 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import torch
|
| 3 |
+
|
| 4 |
+
from qmllm.methods.mbq.quantize.pre_quant import run_mbq, apply_mbq
|
| 5 |
+
from qmllm.methods.mbq.quantize.quantizer import pseudo_quantize_model_weight, pseudo_quantize_model_weight_act
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
def mbq_entry(model, prompt_inputs, prompt_kwargs, run_mbq_process: bool, pseudo_quant: bool, scale_path: str=None, zero_point: str=True, q_group_size: int=128, w_bit: int=4, a_bit: int=16, wa_quant: bool=False, reweight: bool=False, distort: bool=False, loss_mode: str="mae"):
|
| 9 |
+
'''
|
| 10 |
+
model: here the model is the LLM, you have to extract the LLM first!
|
| 11 |
+
prompt_tokens: the prompt tokens
|
| 12 |
+
prompt_mask: the prompt mask, mask the answer language tokens
|
| 13 |
+
run_mbq_process: whether to run the MBQ process
|
| 14 |
+
'''
|
| 15 |
+
q_config = {
|
| 16 |
+
"zero_point": zero_point, # by default True
|
| 17 |
+
"q_group_size": q_group_size, # whether to use group quantization
|
| 18 |
+
}
|
| 19 |
+
|
| 20 |
+
assert scale_path is not None
|
| 21 |
+
|
| 22 |
+
scale_exist = os.path.exists(scale_path)
|
| 23 |
+
# reparameterization
|
| 24 |
+
if run_mbq_process and not scale_exist:
|
| 25 |
+
model.to_cpu()
|
| 26 |
+
mbq_results = run_mbq(
|
| 27 |
+
model,
|
| 28 |
+
prompt_inputs,
|
| 29 |
+
prompt_kwargs,
|
| 30 |
+
w_bit=w_bit,
|
| 31 |
+
a_bit=a_bit,
|
| 32 |
+
q_config=q_config,
|
| 33 |
+
auto_scale=True,
|
| 34 |
+
loss_mode=loss_mode,
|
| 35 |
+
wa_quant=wa_quant,
|
| 36 |
+
reweight=reweight,
|
| 37 |
+
distort=distort,
|
| 38 |
+
)
|
| 39 |
+
|
| 40 |
+
dirpath = os.path.dirname(scale_path)
|
| 41 |
+
os.makedirs(dirpath, exist_ok=True)
|
| 42 |
+
|
| 43 |
+
torch.save(mbq_results, scale_path)
|
| 44 |
+
print("MBQ results saved at", scale_path)
|
| 45 |
+
|
| 46 |
+
if pseudo_quant:
|
| 47 |
+
mbq_results = torch.load(scale_path, map_location="cpu")
|
| 48 |
+
apply_mbq(model.model, mbq_results)
|
| 49 |
+
|
| 50 |
+
if not wa_quant:
|
| 51 |
+
# weight quantization
|
| 52 |
+
pseudo_quantize_model_weight(model.model, w_bit=w_bit, q_config=q_config)
|
| 53 |
+
else:
|
| 54 |
+
# weight activation quantization
|
| 55 |
+
pseudo_quantize_model_weight_act(model.model, w_bit=w_bit, a_bit=a_bit)
|
| 56 |
+
|
| 57 |
+
model.to_cuda()
|
| 58 |
+
return model
|
qmllm/methods/mbq/quantize/__init__.py
ADDED
|
File without changes
|
qmllm/methods/mbq/quantize/auto_scale.py
ADDED
|
@@ -0,0 +1,664 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gc
|
| 2 |
+
import torch
|
| 3 |
+
import torch.nn as nn
|
| 4 |
+
|
| 5 |
+
from transformers.models.bloom.modeling_bloom import BloomBlock, BloomGelu
|
| 6 |
+
from transformers.models.opt.modeling_opt import OPTDecoderLayer
|
| 7 |
+
from transformers.models.llama.modeling_llama import LlamaDecoderLayer, LlamaRMSNorm
|
| 8 |
+
from transformers.activations import GELUActivation
|
| 9 |
+
|
| 10 |
+
from transformers.models.qwen2.modeling_qwen2 import Qwen2RMSNorm
|
| 11 |
+
|
| 12 |
+
from .qmodule import ScaledActivation
|
| 13 |
+
from qmllm.utils.search import get_op_by_name, get_op_name, set_op_by_name
|
| 14 |
+
from qmllm.quantization.quant_funcs import pseudo_quantize_tensor
|
| 15 |
+
|
| 16 |
+
__all__ = ["auto_scale_block", "apply_scale"]
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
@torch.no_grad()
|
| 20 |
+
def get_weight_scale(weight, q_group_size=-1):
|
| 21 |
+
org_shape = weight.shape
|
| 22 |
+
if q_group_size > 0:
|
| 23 |
+
weight = weight.view(-1, q_group_size)
|
| 24 |
+
scale = weight.abs() / weight.abs().amax(dim=1, keepdim=True)
|
| 25 |
+
scale = scale.view(org_shape)
|
| 26 |
+
scale = scale.mean(0)
|
| 27 |
+
return scale
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
@torch.no_grad()
|
| 31 |
+
def get_act_scale(x):
|
| 32 |
+
return x.abs().view(-1, x.shape[-1]).mean(0)
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
@torch.no_grad()
|
| 36 |
+
def scale_ln_fcs(ln, fcs, scales):
|
| 37 |
+
if not isinstance(fcs, list):
|
| 38 |
+
fcs = [fcs]
|
| 39 |
+
|
| 40 |
+
scales = scales.to(ln.weight.device)
|
| 41 |
+
|
| 42 |
+
ln.weight.div_(scales)
|
| 43 |
+
if hasattr(ln, "bias") and ln.bias is not None:
|
| 44 |
+
ln.bias.div_(scales)
|
| 45 |
+
|
| 46 |
+
for fc in fcs:
|
| 47 |
+
fc.weight.mul_(scales.view(1, -1))
|
| 48 |
+
|
| 49 |
+
for p in ln.parameters():
|
| 50 |
+
assert torch.isnan(p).sum() == 0
|
| 51 |
+
for fc in fcs:
|
| 52 |
+
for p in fc.parameters():
|
| 53 |
+
assert torch.isnan(p).sum() == 0
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
@torch.no_grad()
|
| 57 |
+
def scale_fc_fc(fc1, fc2, scales):
|
| 58 |
+
assert isinstance(fc1, nn.Linear)
|
| 59 |
+
assert isinstance(fc2, nn.Linear)
|
| 60 |
+
# assert fc1.out_features == fc2.in_features
|
| 61 |
+
|
| 62 |
+
scales = scales.to(fc1.weight.device)
|
| 63 |
+
|
| 64 |
+
# fc1.weight.div_(scales.view(-1, 1))
|
| 65 |
+
fc1.weight[-scales.size(0) :].div_(scales.view(-1, 1))
|
| 66 |
+
if fc1.bias is not None:
|
| 67 |
+
fc1.bias.div_(scales.view(-1))
|
| 68 |
+
|
| 69 |
+
fc2.weight.mul_(scales.view(1, -1))
|
| 70 |
+
|
| 71 |
+
for p in fc1.parameters():
|
| 72 |
+
assert torch.isnan(p).sum() == 0
|
| 73 |
+
for p in fc2.parameters():
|
| 74 |
+
assert torch.isnan(p).sum() == 0
|
| 75 |
+
|
| 76 |
+
|
| 77 |
+
@torch.no_grad()
|
| 78 |
+
def scale_gelu_fc(gelu, fc, scales):
|
| 79 |
+
assert isinstance(gelu, (nn.GELU, BloomGelu, GELUActivation))
|
| 80 |
+
assert isinstance(fc, nn.Linear)
|
| 81 |
+
|
| 82 |
+
fc.weight.mul_(scales.view(1, -1).to(fc.weight.device))
|
| 83 |
+
|
| 84 |
+
for p in fc.parameters():
|
| 85 |
+
assert torch.isnan(p).sum() == 0
|
| 86 |
+
|
| 87 |
+
|
| 88 |
+
@torch.no_grad()
|
| 89 |
+
def auto_scale_block(module, module_kwargs, w_bit, q_config, input_feat, ans_mask, vis_mask, reweight_ratio_dict, loss_mode="mae"):
|
| 90 |
+
|
| 91 |
+
# firstly, get the weight quantize function
|
| 92 |
+
if w_bit is not None:
|
| 93 |
+
|
| 94 |
+
def w_quantize_func(p):
|
| 95 |
+
return pseudo_quantize_tensor(
|
| 96 |
+
p,
|
| 97 |
+
n_bits=w_bit,
|
| 98 |
+
**q_config,
|
| 99 |
+
).detach()
|
| 100 |
+
|
| 101 |
+
else:
|
| 102 |
+
|
| 103 |
+
def w_quantize_func(p):
|
| 104 |
+
return p
|
| 105 |
+
|
| 106 |
+
if "use_cache" in module_kwargs:
|
| 107 |
+
module_kwargs.pop("use_cache")
|
| 108 |
+
|
| 109 |
+
# find the best scale ratio
|
| 110 |
+
def _search_module_scale(block, linears2scale: list, x, reweight_ratio=None, kwargs={}):
|
| 111 |
+
# w: co, ci
|
| 112 |
+
# x: n, ci
|
| 113 |
+
x = x.to(next(block.parameters()).device)
|
| 114 |
+
with torch.no_grad():
|
| 115 |
+
org_out = block(x, **kwargs)
|
| 116 |
+
if isinstance(org_out, tuple):
|
| 117 |
+
org_out = org_out[0]
|
| 118 |
+
|
| 119 |
+
x_max = get_act_scale(x)
|
| 120 |
+
|
| 121 |
+
best_error = float("inf")
|
| 122 |
+
best_ratio = -1
|
| 123 |
+
best_scales = None
|
| 124 |
+
|
| 125 |
+
n_grid = 20
|
| 126 |
+
history = []
|
| 127 |
+
|
| 128 |
+
org_sd = {k: v.cpu() for k, v in block.state_dict().items()}
|
| 129 |
+
for ratio in range(n_grid):
|
| 130 |
+
ratio = ratio * 1 / n_grid
|
| 131 |
+
scales = x_max.pow(ratio).clamp(min=1e-4).view(-1)
|
| 132 |
+
scales = scales / (scales.max() * scales.min()).sqrt()
|
| 133 |
+
for fc in linears2scale:
|
| 134 |
+
fc.weight.mul_(scales.view(1, -1).to(fc.weight.device))
|
| 135 |
+
fc.weight.data = w_quantize_func(fc.weight.data) / (scales.view(1, -1))
|
| 136 |
+
out = block(x, **kwargs)
|
| 137 |
+
if isinstance(out, tuple):
|
| 138 |
+
out = out[0]
|
| 139 |
+
|
| 140 |
+
# loss = (
|
| 141 |
+
# (org_out - out).float().pow(2).mean().item()
|
| 142 |
+
# ) # float prevents overflow
|
| 143 |
+
|
| 144 |
+
if loss_mode == "mse":
|
| 145 |
+
if ans_mask is not None and vis_mask is not None:
|
| 146 |
+
ans_mask_expand = ans_mask.unsqueeze(-1).expand_as(out)
|
| 147 |
+
vis_mask_expand = vis_mask.unsqueeze(-1).expand_as(out).cuda()
|
| 148 |
+
masked_diff_ans = ((org_out - out).float().pow(2) * ans_mask_expand)
|
| 149 |
+
masked_diff_vis = ((org_out - out).float().pow(2) * vis_mask_expand)
|
| 150 |
+
if reweight_ratio is not None:
|
| 151 |
+
loss = masked_diff_ans.sum() / ans_mask_expand.sum() + reweight_ratio * (masked_diff_vis.sum() / vis_mask_expand.sum())
|
| 152 |
+
else:
|
| 153 |
+
loss = (
|
| 154 |
+
(org_out - out).float().pow(2).mean().item()
|
| 155 |
+
)
|
| 156 |
+
elif ans_mask is not None and vis_mask is None:
|
| 157 |
+
ans_mask_expand = ans_mask.unsqueeze(-1).expand_as(out)
|
| 158 |
+
masked_diff = ((org_out - out).float().pow(2) * ans_mask_expand)
|
| 159 |
+
loss = masked_diff.sum() / ans_mask_expand.sum()
|
| 160 |
+
else:
|
| 161 |
+
loss = (
|
| 162 |
+
(org_out - out).float().pow(2).mean().item()
|
| 163 |
+
) # float prevents overflow
|
| 164 |
+
elif loss_mode == "mae":
|
| 165 |
+
if ans_mask is not None and vis_mask is not None:
|
| 166 |
+
ans_mask_expand = ans_mask.unsqueeze(-1).expand_as(out)
|
| 167 |
+
vis_mask_expand = vis_mask.unsqueeze(-1).expand_as(out).cuda()
|
| 168 |
+
masked_diff_ans = ((org_out - out).float().abs() * ans_mask_expand)
|
| 169 |
+
masked_diff_vis = ((org_out - out).float().abs() * vis_mask_expand)
|
| 170 |
+
if reweight_ratio is not None:
|
| 171 |
+
loss = (masked_diff_ans.sum() + reweight_ratio * masked_diff_vis.sum()) / (ans_mask_expand.sum() + vis_mask_expand.sum())
|
| 172 |
+
else:
|
| 173 |
+
loss = (
|
| 174 |
+
(org_out - out).float().abs().mean().item()
|
| 175 |
+
)
|
| 176 |
+
elif ans_mask is not None and vis_mask is None:
|
| 177 |
+
ans_mask_expand = ans_mask.unsqueeze(-1).expand_as(out)
|
| 178 |
+
masked_diff = ((org_out - out).float().abs() * ans_mask_expand)
|
| 179 |
+
loss = masked_diff.sum() / ans_mask_expand.sum()
|
| 180 |
+
else:
|
| 181 |
+
loss = (
|
| 182 |
+
(org_out - out).float().abs().mean().item()
|
| 183 |
+
) # float prevents overflow
|
| 184 |
+
|
| 185 |
+
history.append(loss)
|
| 186 |
+
is_best = loss < best_error
|
| 187 |
+
if is_best:
|
| 188 |
+
best_error = loss
|
| 189 |
+
best_ratio = ratio
|
| 190 |
+
best_scales = scales
|
| 191 |
+
block.load_state_dict(org_sd)
|
| 192 |
+
if best_ratio == -1:
|
| 193 |
+
print(history)
|
| 194 |
+
raise Exception
|
| 195 |
+
# print(best_ratio)
|
| 196 |
+
best_scales = best_scales.view(-1)
|
| 197 |
+
|
| 198 |
+
assert torch.isnan(best_scales).sum() == 0, best_scales
|
| 199 |
+
return best_scales.detach()
|
| 200 |
+
|
| 201 |
+
def _auto_get_scale(prev_op, layers, inp, reweight_ratio=None, module2inspect=None, kwargs={}):
|
| 202 |
+
# module2inspect: if given, we will check the output diff of this module instead of layers
|
| 203 |
+
if module2inspect is None:
|
| 204 |
+
assert len(layers) == 1
|
| 205 |
+
module2inspect = layers[0]
|
| 206 |
+
|
| 207 |
+
scales = _search_module_scale(module2inspect, layers, inp, reweight_ratio, kwargs)
|
| 208 |
+
scales = scales.detach().cpu()
|
| 209 |
+
# prev_op_name, [layer_name], scale
|
| 210 |
+
return (
|
| 211 |
+
get_op_name(module, prev_op),
|
| 212 |
+
tuple([get_op_name(module, m) for m in layers]),
|
| 213 |
+
scales,
|
| 214 |
+
)
|
| 215 |
+
|
| 216 |
+
scales_list = [] # return the searched scales
|
| 217 |
+
|
| 218 |
+
if isinstance(module, OPTDecoderLayer):
|
| 219 |
+
# attention input
|
| 220 |
+
scales_list.append(
|
| 221 |
+
_auto_get_scale(
|
| 222 |
+
prev_op=module.self_attn_layer_norm,
|
| 223 |
+
layers=[
|
| 224 |
+
module.self_attn.q_proj,
|
| 225 |
+
module.self_attn.k_proj,
|
| 226 |
+
module.self_attn.v_proj,
|
| 227 |
+
],
|
| 228 |
+
inp=input_feat["self_attn.q_proj"],
|
| 229 |
+
module2inspect=module.self_attn,
|
| 230 |
+
kwargs=module_kwargs,
|
| 231 |
+
)
|
| 232 |
+
)
|
| 233 |
+
# attn out
|
| 234 |
+
scales_list.append(
|
| 235 |
+
_auto_get_scale(
|
| 236 |
+
prev_op=module.self_attn.v_proj,
|
| 237 |
+
layers=[module.self_attn.out_proj],
|
| 238 |
+
inp=input_feat["self_attn.out_proj"],
|
| 239 |
+
)
|
| 240 |
+
)
|
| 241 |
+
# fc1
|
| 242 |
+
scales_list.append(
|
| 243 |
+
_auto_get_scale(
|
| 244 |
+
prev_op=module.final_layer_norm,
|
| 245 |
+
layers=[module.fc1],
|
| 246 |
+
inp=input_feat["fc1"],
|
| 247 |
+
)
|
| 248 |
+
)
|
| 249 |
+
# fc2
|
| 250 |
+
scales_list.append(
|
| 251 |
+
_auto_get_scale(
|
| 252 |
+
prev_op=module.fc1,
|
| 253 |
+
layers=[module.fc2],
|
| 254 |
+
inp=input_feat["fc2"],
|
| 255 |
+
)
|
| 256 |
+
)
|
| 257 |
+
|
| 258 |
+
elif isinstance(module, LlamaDecoderLayer):
|
| 259 |
+
# attention input
|
| 260 |
+
scales_list.append(
|
| 261 |
+
_auto_get_scale(
|
| 262 |
+
prev_op=module.input_layernorm,
|
| 263 |
+
layers=[
|
| 264 |
+
module.self_attn.q_proj,
|
| 265 |
+
module.self_attn.k_proj,
|
| 266 |
+
module.self_attn.v_proj,
|
| 267 |
+
],
|
| 268 |
+
inp=input_feat["self_attn.q_proj"],
|
| 269 |
+
reweight_ratio=reweight_ratio_dict["attn"],
|
| 270 |
+
module2inspect=module.self_attn,
|
| 271 |
+
kwargs=module_kwargs,
|
| 272 |
+
)
|
| 273 |
+
)
|
| 274 |
+
# attn out
|
| 275 |
+
# Please refer to https://github.com/mit-han-lab/llm-awq/pull/67#issue-1850622696
|
| 276 |
+
if module.self_attn.v_proj.weight.shape == module.self_attn.o_proj.weight.shape:
|
| 277 |
+
scales_list.append(
|
| 278 |
+
_auto_get_scale(
|
| 279 |
+
prev_op=module.self_attn.v_proj,
|
| 280 |
+
layers=[module.self_attn.o_proj],
|
| 281 |
+
inp=input_feat["self_attn.o_proj"],
|
| 282 |
+
reweight_ratio=reweight_ratio_dict["attn"],
|
| 283 |
+
)
|
| 284 |
+
)
|
| 285 |
+
# fc1
|
| 286 |
+
scales_list.append(
|
| 287 |
+
_auto_get_scale(
|
| 288 |
+
prev_op=module.post_attention_layernorm,
|
| 289 |
+
layers=[module.mlp.gate_proj, module.mlp.up_proj],
|
| 290 |
+
inp=input_feat["mlp.gate_proj"],
|
| 291 |
+
reweight_ratio=reweight_ratio_dict["mlp"],
|
| 292 |
+
module2inspect=module.mlp,
|
| 293 |
+
)
|
| 294 |
+
)
|
| 295 |
+
# fc2
|
| 296 |
+
scales_list.append(
|
| 297 |
+
_auto_get_scale(
|
| 298 |
+
prev_op=module.mlp.up_proj,
|
| 299 |
+
layers=[module.mlp.down_proj],
|
| 300 |
+
inp=input_feat["mlp.down_proj"],
|
| 301 |
+
reweight_ratio=reweight_ratio_dict["mlp"],
|
| 302 |
+
)
|
| 303 |
+
)
|
| 304 |
+
|
| 305 |
+
elif isinstance(module, BloomBlock):
|
| 306 |
+
# attention input
|
| 307 |
+
scales_list.append(
|
| 308 |
+
_auto_get_scale(
|
| 309 |
+
prev_op=module.input_layernorm,
|
| 310 |
+
layers=[module.self_attention.query_key_value],
|
| 311 |
+
inp=input_feat["self_attention.query_key_value"],
|
| 312 |
+
module2inspect=module,
|
| 313 |
+
kwargs=module_kwargs,
|
| 314 |
+
)
|
| 315 |
+
)
|
| 316 |
+
# attn out
|
| 317 |
+
# Please refer to https://github.com/mit-han-lab/llm-awq/issues/2#issuecomment-1606297469
|
| 318 |
+
"""
|
| 319 |
+
scales_list.append(_auto_get_scale(
|
| 320 |
+
prev_op=module.self_attention.query_key_value,
|
| 321 |
+
layers=[module.self_attention.dense],
|
| 322 |
+
inp=input_feat['self_attention.dense'],
|
| 323 |
+
))
|
| 324 |
+
"""
|
| 325 |
+
# fc1
|
| 326 |
+
scales_list.append(
|
| 327 |
+
_auto_get_scale(
|
| 328 |
+
prev_op=module.post_attention_layernorm,
|
| 329 |
+
layers=[module.mlp.dense_h_to_4h],
|
| 330 |
+
inp=input_feat["mlp.dense_h_to_4h"],
|
| 331 |
+
module2inspect=module,
|
| 332 |
+
kwargs=module_kwargs,
|
| 333 |
+
)
|
| 334 |
+
)
|
| 335 |
+
# fc2
|
| 336 |
+
scales_list.append(
|
| 337 |
+
_auto_get_scale(
|
| 338 |
+
prev_op=module.mlp.gelu_impl,
|
| 339 |
+
layers=[module.mlp.dense_4h_to_h],
|
| 340 |
+
inp=input_feat["mlp.dense_4h_to_h"],
|
| 341 |
+
)
|
| 342 |
+
)
|
| 343 |
+
elif "mpt" in str(module.__class__).lower():
|
| 344 |
+
# attention input
|
| 345 |
+
scales_list.append(
|
| 346 |
+
_auto_get_scale(
|
| 347 |
+
prev_op=module.norm_1,
|
| 348 |
+
layers=[module.attn.Wqkv],
|
| 349 |
+
inp=input_feat["attn.Wqkv"],
|
| 350 |
+
module2inspect=module.attn,
|
| 351 |
+
kwargs=module_kwargs,
|
| 352 |
+
)
|
| 353 |
+
)
|
| 354 |
+
|
| 355 |
+
# attn out
|
| 356 |
+
scales_list.append(
|
| 357 |
+
_auto_get_scale(
|
| 358 |
+
prev_op=module.attn.Wqkv,
|
| 359 |
+
layers=[module.attn.out_proj],
|
| 360 |
+
inp=input_feat["attn.out_proj"],
|
| 361 |
+
)
|
| 362 |
+
)
|
| 363 |
+
# fc1
|
| 364 |
+
scales_list.append(
|
| 365 |
+
_auto_get_scale(
|
| 366 |
+
prev_op=module.norm_2,
|
| 367 |
+
layers=[module.ffn.up_proj],
|
| 368 |
+
inp=input_feat["ffn.up_proj"],
|
| 369 |
+
module2inspect=module.ffn,
|
| 370 |
+
)
|
| 371 |
+
)
|
| 372 |
+
# fc2
|
| 373 |
+
scales_list.append(
|
| 374 |
+
_auto_get_scale(
|
| 375 |
+
prev_op=module.ffn.act,
|
| 376 |
+
layers=[module.ffn.down_proj],
|
| 377 |
+
inp=input_feat["ffn.down_proj"],
|
| 378 |
+
)
|
| 379 |
+
)
|
| 380 |
+
|
| 381 |
+
elif "falcon" in str(module.__class__).lower():
|
| 382 |
+
# attn out
|
| 383 |
+
# Haotian: TBD: need to handle repeated scales for MQ
|
| 384 |
+
"""
|
| 385 |
+
scales_list.append(_auto_get_scale(
|
| 386 |
+
prev_op=module.self_attention.query_key_value,
|
| 387 |
+
layers=[module.self_attention.dense],
|
| 388 |
+
inp=input_feat['self_attention.dense'],
|
| 389 |
+
))
|
| 390 |
+
"""
|
| 391 |
+
# fc1, as long as it is scaled, everything is screwed up
|
| 392 |
+
if "falcon-7b" in str(module.__class__).lower():
|
| 393 |
+
scales_list.append(
|
| 394 |
+
_auto_get_scale(
|
| 395 |
+
prev_op=module.input_layernorm,
|
| 396 |
+
layers=[
|
| 397 |
+
module.mlp.dense_h_to_4h,
|
| 398 |
+
module.self_attention.query_key_value,
|
| 399 |
+
],
|
| 400 |
+
inp=input_feat["self_attention.query_key_value"],
|
| 401 |
+
module2inspect=module,
|
| 402 |
+
kwargs=module_kwargs,
|
| 403 |
+
)
|
| 404 |
+
)
|
| 405 |
+
elif "falcon-40b" in str(module.__class__).lower():
|
| 406 |
+
scales_list.append(
|
| 407 |
+
_auto_get_scale(
|
| 408 |
+
prev_op=module.ln_attn,
|
| 409 |
+
layers=[module.self_attention.query_key_value],
|
| 410 |
+
inp=input_feat["self_attention.query_key_value"],
|
| 411 |
+
module2inspect=module,
|
| 412 |
+
kwargs=module_kwargs,
|
| 413 |
+
)
|
| 414 |
+
)
|
| 415 |
+
scales_list.append(
|
| 416 |
+
_auto_get_scale(
|
| 417 |
+
prev_op=module.ln_mlp,
|
| 418 |
+
layers=[module.mlp.dense_h_to_4h],
|
| 419 |
+
inp=input_feat["mlp.dense_h_to_4h"],
|
| 420 |
+
module2inspect=module,
|
| 421 |
+
kwargs=module_kwargs,
|
| 422 |
+
)
|
| 423 |
+
)
|
| 424 |
+
else:
|
| 425 |
+
raise NotImplementedError(
|
| 426 |
+
"Unknown Falcon architecture, currently only falcon-7b and falcon-40b are supported"
|
| 427 |
+
)
|
| 428 |
+
# fc2
|
| 429 |
+
scales_list.append(
|
| 430 |
+
_auto_get_scale(
|
| 431 |
+
prev_op=module.mlp.act,
|
| 432 |
+
layers=[module.mlp.dense_4h_to_h],
|
| 433 |
+
inp=input_feat["mlp.dense_4h_to_h"],
|
| 434 |
+
)
|
| 435 |
+
)
|
| 436 |
+
elif "bigcode" in str(module.__class__).lower():
|
| 437 |
+
scales_list.append(
|
| 438 |
+
_auto_get_scale(
|
| 439 |
+
prev_op=module.ln_1,
|
| 440 |
+
layers=[module.attn.c_attn],
|
| 441 |
+
inp=input_feat["attn.c_attn"],
|
| 442 |
+
module2inspect=module.attn,
|
| 443 |
+
kwargs=module_kwargs,
|
| 444 |
+
)
|
| 445 |
+
)
|
| 446 |
+
# fc1
|
| 447 |
+
scales_list.append(
|
| 448 |
+
_auto_get_scale(
|
| 449 |
+
prev_op=module.ln_2,
|
| 450 |
+
layers=[module.mlp.c_fc],
|
| 451 |
+
inp=input_feat["mlp.c_fc"],
|
| 452 |
+
module2inspect=module.mlp,
|
| 453 |
+
)
|
| 454 |
+
)
|
| 455 |
+
# fc2
|
| 456 |
+
scales_list.append(
|
| 457 |
+
_auto_get_scale(
|
| 458 |
+
prev_op=module.mlp.act,
|
| 459 |
+
layers=[module.mlp.c_proj],
|
| 460 |
+
inp=input_feat["mlp.c_proj"],
|
| 461 |
+
)
|
| 462 |
+
)
|
| 463 |
+
elif "neox" in str(module.__class__).lower():
|
| 464 |
+
scales_list.append(
|
| 465 |
+
_auto_get_scale(
|
| 466 |
+
prev_op=module.input_layernorm,
|
| 467 |
+
layers=[module.attention.query_key_value],
|
| 468 |
+
inp=input_feat["attention.query_key_value"],
|
| 469 |
+
module2inspect=module.attention,
|
| 470 |
+
kwargs=module_kwargs,
|
| 471 |
+
)
|
| 472 |
+
)
|
| 473 |
+
# fc1
|
| 474 |
+
scales_list.append(
|
| 475 |
+
_auto_get_scale(
|
| 476 |
+
prev_op=module.post_attention_layernorm,
|
| 477 |
+
layers=[module.mlp.dense_h_to_4h],
|
| 478 |
+
inp=input_feat["mlp.dense_h_to_4h"],
|
| 479 |
+
module2inspect=module.mlp,
|
| 480 |
+
)
|
| 481 |
+
)
|
| 482 |
+
# fc2
|
| 483 |
+
scales_list.append(
|
| 484 |
+
_auto_get_scale(
|
| 485 |
+
prev_op=module.mlp.act,
|
| 486 |
+
layers=[module.mlp.dense_4h_to_h],
|
| 487 |
+
inp=input_feat["mlp.dense_4h_to_h"],
|
| 488 |
+
)
|
| 489 |
+
)
|
| 490 |
+
elif module.__class__.__name__ == "Qwen2DecoderLayer":
|
| 491 |
+
# attention input
|
| 492 |
+
scales_list.append(
|
| 493 |
+
_auto_get_scale(
|
| 494 |
+
prev_op=module.input_layernorm,
|
| 495 |
+
layers=[
|
| 496 |
+
module.self_attn.q_proj,
|
| 497 |
+
module.self_attn.k_proj,
|
| 498 |
+
module.self_attn.v_proj,
|
| 499 |
+
],
|
| 500 |
+
inp=input_feat["self_attn.q_proj"],
|
| 501 |
+
reweight_ratio=reweight_ratio_dict["attn"],
|
| 502 |
+
module2inspect=module.self_attn,
|
| 503 |
+
kwargs=module_kwargs,
|
| 504 |
+
)
|
| 505 |
+
)
|
| 506 |
+
# attn out
|
| 507 |
+
# Please refer to https://github.com/mit-han-lab/llm-awq/pull/67#issue-1850622696
|
| 508 |
+
if module.self_attn.v_proj.weight.shape == module.self_attn.o_proj.weight.shape:
|
| 509 |
+
scales_list.append(
|
| 510 |
+
_auto_get_scale(
|
| 511 |
+
prev_op=module.self_attn.v_proj,
|
| 512 |
+
layers=[module.self_attn.o_proj],
|
| 513 |
+
inp=input_feat["self_attn.o_proj"],
|
| 514 |
+
reweight_ratio=reweight_ratio_dict["attn"],
|
| 515 |
+
)
|
| 516 |
+
)
|
| 517 |
+
# fc1
|
| 518 |
+
scales_list.append(
|
| 519 |
+
_auto_get_scale(
|
| 520 |
+
prev_op=module.post_attention_layernorm,
|
| 521 |
+
layers=[module.mlp.gate_proj, module.mlp.up_proj],
|
| 522 |
+
inp=input_feat["mlp.gate_proj"],
|
| 523 |
+
reweight_ratio=reweight_ratio_dict["mlp"],
|
| 524 |
+
module2inspect=module.mlp,
|
| 525 |
+
)
|
| 526 |
+
)
|
| 527 |
+
# fc2
|
| 528 |
+
scales_list.append(
|
| 529 |
+
_auto_get_scale(
|
| 530 |
+
prev_op=module.mlp.up_proj,
|
| 531 |
+
layers=[module.mlp.down_proj],
|
| 532 |
+
inp=input_feat["mlp.down_proj"],
|
| 533 |
+
reweight_ratio=reweight_ratio_dict["mlp"],
|
| 534 |
+
)
|
| 535 |
+
)
|
| 536 |
+
elif module.__class__.__name__ == "InternLM2DecoderLayer":
|
| 537 |
+
# attention input
|
| 538 |
+
scales_list.append(
|
| 539 |
+
_auto_get_scale(
|
| 540 |
+
prev_op=module.attention_norm,
|
| 541 |
+
layers=[
|
| 542 |
+
module.attention.wqkv,
|
| 543 |
+
],
|
| 544 |
+
inp=input_feat["attention.wqkv"],
|
| 545 |
+
reweight_ratio=reweight_ratio_dict["attn"],
|
| 546 |
+
module2inspect=module.attention,
|
| 547 |
+
kwargs=module_kwargs,
|
| 548 |
+
)
|
| 549 |
+
)
|
| 550 |
+
# attn out
|
| 551 |
+
# Please refer to https://github.com/mit-han-lab/llm-awq/pull/67#issue-1850622696
|
| 552 |
+
if module.attention.wqkv.weight.shape == module.attention.wo.weight.shape:
|
| 553 |
+
scales_list.append(
|
| 554 |
+
_auto_get_scale(
|
| 555 |
+
prev_op=module.attention.wqkv,
|
| 556 |
+
layers=[module.attention.wo],
|
| 557 |
+
inp=input_feat["attention.wo"],
|
| 558 |
+
reweight_ratio=reweight_ratio_dict["attn"],
|
| 559 |
+
)
|
| 560 |
+
)
|
| 561 |
+
# fc1
|
| 562 |
+
scales_list.append(
|
| 563 |
+
_auto_get_scale(
|
| 564 |
+
prev_op=module.ffn_norm,
|
| 565 |
+
layers=[module.feed_forward.w1, module.feed_forward.w3],
|
| 566 |
+
inp=input_feat["feed_forward.w1"],
|
| 567 |
+
reweight_ratio=reweight_ratio_dict["mlp"],
|
| 568 |
+
module2inspect=module.feed_forward,
|
| 569 |
+
)
|
| 570 |
+
)
|
| 571 |
+
# fc2
|
| 572 |
+
scales_list.append(
|
| 573 |
+
_auto_get_scale(
|
| 574 |
+
prev_op=module.feed_forward.w3,
|
| 575 |
+
layers=[module.feed_forward.w2],
|
| 576 |
+
inp=input_feat["feed_forward.w2"],
|
| 577 |
+
reweight_ratio=reweight_ratio_dict["mlp"],
|
| 578 |
+
)
|
| 579 |
+
)
|
| 580 |
+
|
| 581 |
+
elif module.__class__.__name__ == "Qwen2VLDecoderLayer":
|
| 582 |
+
# attention input
|
| 583 |
+
scales_list.append(
|
| 584 |
+
_auto_get_scale(
|
| 585 |
+
prev_op=module.input_layernorm,
|
| 586 |
+
layers=[
|
| 587 |
+
module.self_attn.q_proj,
|
| 588 |
+
module.self_attn.k_proj,
|
| 589 |
+
module.self_attn.v_proj,
|
| 590 |
+
],
|
| 591 |
+
inp=input_feat["self_attn.q_proj"],
|
| 592 |
+
reweight_ratio=reweight_ratio_dict["attn"],
|
| 593 |
+
module2inspect=module.self_attn,
|
| 594 |
+
kwargs=module_kwargs,
|
| 595 |
+
)
|
| 596 |
+
)
|
| 597 |
+
# attn out
|
| 598 |
+
# Please refer to https://github.com/mit-han-lab/llm-awq/pull/67#issue-1850622696
|
| 599 |
+
if module.self_attn.v_proj.weight.shape == module.self_attn.o_proj.weight.shape:
|
| 600 |
+
scales_list.append(
|
| 601 |
+
_auto_get_scale(
|
| 602 |
+
prev_op=module.self_attn.v_proj,
|
| 603 |
+
layers=[module.self_attn.o_proj],
|
| 604 |
+
inp=input_feat["self_attn.o_proj"],
|
| 605 |
+
reweight_ratio=reweight_ratio_dict["attn"],
|
| 606 |
+
)
|
| 607 |
+
)
|
| 608 |
+
# fc1
|
| 609 |
+
scales_list.append(
|
| 610 |
+
_auto_get_scale(
|
| 611 |
+
prev_op=module.post_attention_layernorm,
|
| 612 |
+
layers=[module.mlp.gate_proj, module.mlp.up_proj],
|
| 613 |
+
inp=input_feat["mlp.gate_proj"],
|
| 614 |
+
reweight_ratio=reweight_ratio_dict["mlp"],
|
| 615 |
+
module2inspect=module.mlp,
|
| 616 |
+
)
|
| 617 |
+
)
|
| 618 |
+
# fc2
|
| 619 |
+
scales_list.append(
|
| 620 |
+
_auto_get_scale(
|
| 621 |
+
prev_op=module.mlp.up_proj,
|
| 622 |
+
layers=[module.mlp.down_proj],
|
| 623 |
+
inp=input_feat["mlp.down_proj"],
|
| 624 |
+
reweight_ratio=reweight_ratio_dict["mlp"],
|
| 625 |
+
)
|
| 626 |
+
)
|
| 627 |
+
else:
|
| 628 |
+
raise NotImplementedError(f"{type(module)} not supported yet!")
|
| 629 |
+
|
| 630 |
+
return scales_list
|
| 631 |
+
|
| 632 |
+
|
| 633 |
+
def apply_scale(module, scales_list, input_feat_dict=None):
|
| 634 |
+
for prev_op_name, layer_names, scales in scales_list:
|
| 635 |
+
prev_op = get_op_by_name(module, prev_op_name)
|
| 636 |
+
layers = [get_op_by_name(module, name) for name in layer_names]
|
| 637 |
+
|
| 638 |
+
prev_op.cuda()
|
| 639 |
+
for layer in layers:
|
| 640 |
+
layer.cuda()
|
| 641 |
+
scales.cuda()
|
| 642 |
+
|
| 643 |
+
if isinstance(prev_op, nn.Linear):
|
| 644 |
+
assert len(layers) == 1
|
| 645 |
+
scale_fc_fc(prev_op, layers[0], scales)
|
| 646 |
+
elif isinstance(prev_op, (nn.LayerNorm, LlamaRMSNorm)) or prev_op.__class__.__name__ == "InternLM2RMSNorm" or prev_op.__class__.__name__ == "Qwen2RMSNorm":
|
| 647 |
+
scale_ln_fcs(prev_op, layers, scales)
|
| 648 |
+
elif isinstance(prev_op, (nn.GELU, BloomGelu, GELUActivation)):
|
| 649 |
+
new_module = ScaledActivation(prev_op, scales)
|
| 650 |
+
set_op_by_name(module, prev_op_name, new_module)
|
| 651 |
+
scale_gelu_fc(prev_op, layers[0], scales)
|
| 652 |
+
else:
|
| 653 |
+
raise NotImplementedError(f"prev_op {type(prev_op)} not supported yet!")
|
| 654 |
+
|
| 655 |
+
# apply the scaling to input feat if given
|
| 656 |
+
if input_feat_dict is not None:
|
| 657 |
+
for layer_name in layer_names:
|
| 658 |
+
inp = input_feat_dict[layer_name]
|
| 659 |
+
inp.div_(scales.view(1, -1).to(inp.device))
|
| 660 |
+
|
| 661 |
+
prev_op.cpu()
|
| 662 |
+
for layer in layers:
|
| 663 |
+
layer.cpu()
|
| 664 |
+
scales.cpu()
|
qmllm/methods/mbq/quantize/auto_scale_distort.py
ADDED
|
@@ -0,0 +1,784 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gc
|
| 2 |
+
import torch
|
| 3 |
+
import functools
|
| 4 |
+
import torch.nn as nn
|
| 5 |
+
|
| 6 |
+
from transformers.models.bloom.modeling_bloom import BloomBlock, BloomGelu
|
| 7 |
+
from transformers.models.opt.modeling_opt import OPTDecoderLayer
|
| 8 |
+
from transformers.models.llama.modeling_llama import LlamaDecoderLayer, LlamaRMSNorm
|
| 9 |
+
from transformers.activations import GELUActivation
|
| 10 |
+
|
| 11 |
+
from collections import defaultdict
|
| 12 |
+
from transformers.models.qwen2.modeling_qwen2 import Qwen2RMSNorm
|
| 13 |
+
|
| 14 |
+
from .qmodule import ScaledActivation
|
| 15 |
+
from qmllm.utils.search import get_op_by_name, get_op_name, set_op_by_name
|
| 16 |
+
from qmllm.quantization.quant_funcs import pseudo_quantize_tensor
|
| 17 |
+
|
| 18 |
+
__all__ = ["auto_scale_block_distort", "apply_scale"]
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
@torch.no_grad()
|
| 22 |
+
def get_weight_scale(weight, q_group_size=-1):
|
| 23 |
+
org_shape = weight.shape
|
| 24 |
+
if q_group_size > 0:
|
| 25 |
+
weight = weight.view(-1, q_group_size)
|
| 26 |
+
scale = weight.abs() / weight.abs().amax(dim=1, keepdim=True)
|
| 27 |
+
scale = scale.view(org_shape)
|
| 28 |
+
scale = scale.mean(0)
|
| 29 |
+
return scale
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
@torch.no_grad()
|
| 33 |
+
def get_act_scale(x):
|
| 34 |
+
return x.abs().view(-1, x.shape[-1]).mean(0)
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
@torch.no_grad()
|
| 38 |
+
def scale_ln_fcs(ln, fcs, scales):
|
| 39 |
+
if not isinstance(fcs, list):
|
| 40 |
+
fcs = [fcs]
|
| 41 |
+
|
| 42 |
+
scales = scales.to(ln.weight.device)
|
| 43 |
+
|
| 44 |
+
ln.weight.div_(scales)
|
| 45 |
+
if hasattr(ln, "bias") and ln.bias is not None:
|
| 46 |
+
ln.bias.div_(scales)
|
| 47 |
+
|
| 48 |
+
for fc in fcs:
|
| 49 |
+
fc.weight.mul_(scales.view(1, -1))
|
| 50 |
+
|
| 51 |
+
for p in ln.parameters():
|
| 52 |
+
assert torch.isnan(p).sum() == 0
|
| 53 |
+
for fc in fcs:
|
| 54 |
+
for p in fc.parameters():
|
| 55 |
+
assert torch.isnan(p).sum() == 0
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
@torch.no_grad()
|
| 59 |
+
def scale_fc_fc(fc1, fc2, scales):
|
| 60 |
+
assert isinstance(fc1, nn.Linear)
|
| 61 |
+
assert isinstance(fc2, nn.Linear)
|
| 62 |
+
# assert fc1.out_features == fc2.in_features
|
| 63 |
+
|
| 64 |
+
scales = scales.to(fc1.weight.device)
|
| 65 |
+
|
| 66 |
+
# fc1.weight.div_(scales.view(-1, 1))
|
| 67 |
+
fc1.weight[-scales.size(0) :].div_(scales.view(-1, 1))
|
| 68 |
+
if fc1.bias is not None:
|
| 69 |
+
fc1.bias.div_(scales.view(-1))
|
| 70 |
+
|
| 71 |
+
fc2.weight.mul_(scales.view(1, -1))
|
| 72 |
+
|
| 73 |
+
for p in fc1.parameters():
|
| 74 |
+
assert torch.isnan(p).sum() == 0
|
| 75 |
+
for p in fc2.parameters():
|
| 76 |
+
assert torch.isnan(p).sum() == 0
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
@torch.no_grad()
|
| 80 |
+
def scale_gelu_fc(gelu, fc, scales):
|
| 81 |
+
assert isinstance(gelu, (nn.GELU, BloomGelu, GELUActivation))
|
| 82 |
+
assert isinstance(fc, nn.Linear)
|
| 83 |
+
|
| 84 |
+
fc.weight.mul_(scales.view(1, -1).to(fc.weight.device))
|
| 85 |
+
|
| 86 |
+
for p in fc.parameters():
|
| 87 |
+
assert torch.isnan(p).sum() == 0
|
| 88 |
+
|
| 89 |
+
|
| 90 |
+
@torch.no_grad()
|
| 91 |
+
def auto_scale_block_distort(module, module_kwargs, w_bit, q_config, input_feat, ans_mask, vis_mask, reweight_ratio_dict, q_input, loss_mode="mae"):
|
| 92 |
+
|
| 93 |
+
# firstly, get the weight quantize function
|
| 94 |
+
if w_bit is not None:
|
| 95 |
+
|
| 96 |
+
def w_quantize_func(p):
|
| 97 |
+
return pseudo_quantize_tensor(
|
| 98 |
+
p,
|
| 99 |
+
n_bits=w_bit,
|
| 100 |
+
**q_config,
|
| 101 |
+
).detach()
|
| 102 |
+
|
| 103 |
+
else:
|
| 104 |
+
|
| 105 |
+
def w_quantize_func(p):
|
| 106 |
+
return p
|
| 107 |
+
|
| 108 |
+
if "use_cache" in module_kwargs:
|
| 109 |
+
module_kwargs.pop("use_cache")
|
| 110 |
+
|
| 111 |
+
# find the best scale ratio
|
| 112 |
+
def _search_module_scale_distort(block, linears2scale: list, x, x_q, reweight_ratio=None, kwargs={}):
|
| 113 |
+
# w: co, ci
|
| 114 |
+
# x: n, ci
|
| 115 |
+
x = x.to(next(block.parameters()).device)
|
| 116 |
+
x_q = x_q.to(next(block.parameters()).device)
|
| 117 |
+
with torch.no_grad():
|
| 118 |
+
org_out = block(x, **kwargs)
|
| 119 |
+
if isinstance(org_out, tuple):
|
| 120 |
+
org_out = org_out[0]
|
| 121 |
+
|
| 122 |
+
x_max = get_act_scale(x_q)
|
| 123 |
+
|
| 124 |
+
best_error = float("inf")
|
| 125 |
+
best_ratio = -1
|
| 126 |
+
best_scales = None
|
| 127 |
+
|
| 128 |
+
n_grid = 20
|
| 129 |
+
history = []
|
| 130 |
+
|
| 131 |
+
org_sd = {k: v.cpu() for k, v in block.state_dict().items()}
|
| 132 |
+
for ratio in range(n_grid):
|
| 133 |
+
ratio = ratio * 1 / n_grid
|
| 134 |
+
scales = x_max.pow(ratio).clamp(min=1e-4).view(-1)
|
| 135 |
+
scales = scales / (scales.max() * scales.min()).sqrt()
|
| 136 |
+
for fc in linears2scale:
|
| 137 |
+
fc.weight.mul_(scales.view(1, -1).to(fc.weight.device))
|
| 138 |
+
fc.weight.data = w_quantize_func(fc.weight.data) / (scales.view(1, -1))
|
| 139 |
+
out = block(x_q, **kwargs)
|
| 140 |
+
if isinstance(out, tuple):
|
| 141 |
+
out = out[0]
|
| 142 |
+
|
| 143 |
+
# loss = (
|
| 144 |
+
# (org_out - out).float().pow(2).mean().item()
|
| 145 |
+
# ) # float prevents overflow
|
| 146 |
+
|
| 147 |
+
if loss_mode == "mse":
|
| 148 |
+
if ans_mask is not None and vis_mask is not None:
|
| 149 |
+
ans_mask_expand = ans_mask.unsqueeze(-1).expand_as(out)
|
| 150 |
+
vis_mask_expand = vis_mask.unsqueeze(-1).expand_as(out).cuda()
|
| 151 |
+
masked_diff_ans = ((org_out - out).float().pow(2) * ans_mask_expand)
|
| 152 |
+
masked_diff_vis = ((org_out - out).float().pow(2) * vis_mask_expand)
|
| 153 |
+
if reweight_ratio is not None:
|
| 154 |
+
loss = masked_diff_ans.sum() / ans_mask_expand.sum() + reweight_ratio * (masked_diff_vis.sum() / vis_mask_expand.sum())
|
| 155 |
+
else:
|
| 156 |
+
loss = (
|
| 157 |
+
(org_out - out).float().pow(2).mean().item()
|
| 158 |
+
)
|
| 159 |
+
elif ans_mask is not None and vis_mask is None:
|
| 160 |
+
ans_mask_expand = ans_mask.unsqueeze(-1).expand_as(out)
|
| 161 |
+
masked_diff = ((org_out - out).float().pow(2) * ans_mask_expand)
|
| 162 |
+
loss = masked_diff.sum() / ans_mask_expand.sum()
|
| 163 |
+
else:
|
| 164 |
+
loss = (
|
| 165 |
+
(org_out - out).float().pow(2).mean().item()
|
| 166 |
+
) # float prevents overflow
|
| 167 |
+
elif loss_mode == "mae":
|
| 168 |
+
if ans_mask is not None and vis_mask is not None:
|
| 169 |
+
ans_mask_expand = ans_mask.unsqueeze(-1).expand_as(out)
|
| 170 |
+
vis_mask_expand = vis_mask.unsqueeze(-1).expand_as(out).cuda()
|
| 171 |
+
masked_diff_ans = ((org_out - out).float().abs() * ans_mask_expand)
|
| 172 |
+
masked_diff_vis = ((org_out - out).float().abs() * vis_mask_expand)
|
| 173 |
+
if reweight_ratio is not None:
|
| 174 |
+
loss = (masked_diff_ans.sum() + reweight_ratio * masked_diff_vis.sum()) / (ans_mask_expand.sum() + vis_mask_expand.sum())
|
| 175 |
+
else:
|
| 176 |
+
loss = (
|
| 177 |
+
(org_out - out).float().abs().mean().item()
|
| 178 |
+
)
|
| 179 |
+
elif ans_mask is not None and vis_mask is None:
|
| 180 |
+
ans_mask_expand = ans_mask.unsqueeze(-1).expand_as(out)
|
| 181 |
+
masked_diff = ((org_out - out).float().abs() * ans_mask_expand)
|
| 182 |
+
loss = masked_diff.sum() / ans_mask_expand.sum()
|
| 183 |
+
else:
|
| 184 |
+
loss = (
|
| 185 |
+
(org_out - out).float().abs().mean().item()
|
| 186 |
+
) # float prevents overflow
|
| 187 |
+
|
| 188 |
+
history.append(loss)
|
| 189 |
+
is_best = loss < best_error
|
| 190 |
+
if is_best:
|
| 191 |
+
best_error = loss
|
| 192 |
+
best_ratio = ratio
|
| 193 |
+
best_scales = scales
|
| 194 |
+
block.load_state_dict(org_sd)
|
| 195 |
+
if best_ratio == -1:
|
| 196 |
+
print(history)
|
| 197 |
+
raise Exception
|
| 198 |
+
# print(best_ratio)
|
| 199 |
+
best_scales = best_scales.view(-1)
|
| 200 |
+
|
| 201 |
+
assert torch.isnan(best_scales).sum() == 0, best_scales
|
| 202 |
+
return best_scales.detach()
|
| 203 |
+
|
| 204 |
+
def _auto_get_scale_distort(prev_op, layers, inp, inp_q, reweight_ratio=None, module2inspect=None, kwargs={}):
|
| 205 |
+
# module2inspect: if given, we will check the output diff of this module instead of layers
|
| 206 |
+
if module2inspect is None:
|
| 207 |
+
assert len(layers) == 1
|
| 208 |
+
module2inspect = layers[0]
|
| 209 |
+
|
| 210 |
+
scales = _search_module_scale_distort(module2inspect, layers, inp, inp_q, reweight_ratio, kwargs)
|
| 211 |
+
scales = scales.detach().cpu()
|
| 212 |
+
# prev_op_name, [layer_name], scale
|
| 213 |
+
return (
|
| 214 |
+
get_op_name(module, prev_op),
|
| 215 |
+
tuple([get_op_name(module, m) for m in layers]),
|
| 216 |
+
scales,
|
| 217 |
+
)
|
| 218 |
+
|
| 219 |
+
scales_list = [] # return the searched scales
|
| 220 |
+
|
| 221 |
+
|
| 222 |
+
def _auto_get_input_feat_distort(inps_q, scales_list=None):
|
| 223 |
+
|
| 224 |
+
org_sd = {k: v.cpu() for k, v in module.state_dict().items()}
|
| 225 |
+
|
| 226 |
+
named_linears = {name: m for name, m in module.named_modules() if isinstance(m, nn.Linear)}
|
| 227 |
+
|
| 228 |
+
if scales_list is not None:
|
| 229 |
+
apply_scale(module, scales_list)
|
| 230 |
+
module.cuda()
|
| 231 |
+
for name in named_linears:
|
| 232 |
+
named_linears[name].weight.data = w_quantize_func(named_linears[name].weight.data).cuda()
|
| 233 |
+
|
| 234 |
+
def cache_input_hook(m, x, y, name, feat_dict):
|
| 235 |
+
x = x[0]
|
| 236 |
+
x = x.detach().cpu()
|
| 237 |
+
feat_dict[name].append(x)
|
| 238 |
+
|
| 239 |
+
input_feat_q = defaultdict(list)
|
| 240 |
+
handles = []
|
| 241 |
+
for name in named_linears:
|
| 242 |
+
handles.append(
|
| 243 |
+
named_linears[name].register_forward_hook(
|
| 244 |
+
functools.partial(cache_input_hook, name=name, feat_dict=input_feat_q)
|
| 245 |
+
)
|
| 246 |
+
)
|
| 247 |
+
|
| 248 |
+
inps_q = inps_q.to(next(module.parameters()).device)
|
| 249 |
+
module(inps_q, **module_kwargs)
|
| 250 |
+
for h in handles:
|
| 251 |
+
h.remove()
|
| 252 |
+
|
| 253 |
+
input_feat_q = {k: torch.cat(v, dim=0) for k, v in input_feat_q.items()}
|
| 254 |
+
|
| 255 |
+
torch.cuda.empty_cache()
|
| 256 |
+
|
| 257 |
+
module.load_state_dict(org_sd)
|
| 258 |
+
|
| 259 |
+
return input_feat_q
|
| 260 |
+
|
| 261 |
+
|
| 262 |
+
if isinstance(module, OPTDecoderLayer):
|
| 263 |
+
# attention input
|
| 264 |
+
scales_list.append(
|
| 265 |
+
_auto_get_scale(
|
| 266 |
+
prev_op=module.self_attn_layer_norm,
|
| 267 |
+
layers=[
|
| 268 |
+
module.self_attn.q_proj,
|
| 269 |
+
module.self_attn.k_proj,
|
| 270 |
+
module.self_attn.v_proj,
|
| 271 |
+
],
|
| 272 |
+
inp=input_feat["self_attn.q_proj"],
|
| 273 |
+
module2inspect=module.self_attn,
|
| 274 |
+
kwargs=module_kwargs,
|
| 275 |
+
)
|
| 276 |
+
)
|
| 277 |
+
# attn out
|
| 278 |
+
scales_list.append(
|
| 279 |
+
_auto_get_scale(
|
| 280 |
+
prev_op=module.self_attn.v_proj,
|
| 281 |
+
layers=[module.self_attn.out_proj],
|
| 282 |
+
inp=input_feat["self_attn.out_proj"],
|
| 283 |
+
)
|
| 284 |
+
)
|
| 285 |
+
# fc1
|
| 286 |
+
scales_list.append(
|
| 287 |
+
_auto_get_scale(
|
| 288 |
+
prev_op=module.final_layer_norm,
|
| 289 |
+
layers=[module.fc1],
|
| 290 |
+
inp=input_feat["fc1"],
|
| 291 |
+
)
|
| 292 |
+
)
|
| 293 |
+
# fc2
|
| 294 |
+
scales_list.append(
|
| 295 |
+
_auto_get_scale(
|
| 296 |
+
prev_op=module.fc1,
|
| 297 |
+
layers=[module.fc2],
|
| 298 |
+
inp=input_feat["fc2"],
|
| 299 |
+
)
|
| 300 |
+
)
|
| 301 |
+
|
| 302 |
+
elif isinstance(module, LlamaDecoderLayer):
|
| 303 |
+
# attention input
|
| 304 |
+
input_feat_q = _auto_get_input_feat_distort(
|
| 305 |
+
inps_q=q_input,
|
| 306 |
+
)
|
| 307 |
+
scales_list.append(
|
| 308 |
+
_auto_get_scale_distort(
|
| 309 |
+
prev_op=module.input_layernorm,
|
| 310 |
+
layers=[
|
| 311 |
+
module.self_attn.q_proj,
|
| 312 |
+
module.self_attn.k_proj,
|
| 313 |
+
module.self_attn.v_proj,
|
| 314 |
+
],
|
| 315 |
+
inp=input_feat["self_attn.q_proj"],
|
| 316 |
+
inp_q=input_feat_q["self_attn.q_proj"],
|
| 317 |
+
reweight_ratio=reweight_ratio_dict["attn"],
|
| 318 |
+
module2inspect=module.self_attn,
|
| 319 |
+
kwargs=module_kwargs,
|
| 320 |
+
)
|
| 321 |
+
)
|
| 322 |
+
# attn out
|
| 323 |
+
# Please refer to https://github.com/mit-han-lab/llm-awq/pull/67#issue-1850622696
|
| 324 |
+
input_feat_q = _auto_get_input_feat_distort(
|
| 325 |
+
inps_q=q_input,
|
| 326 |
+
scales_list=scales_list,
|
| 327 |
+
)
|
| 328 |
+
if module.self_attn.v_proj.weight.shape == module.self_attn.o_proj.weight.shape:
|
| 329 |
+
scales_list.append(
|
| 330 |
+
_auto_get_scale_distort(
|
| 331 |
+
prev_op=module.self_attn.v_proj,
|
| 332 |
+
layers=[module.self_attn.o_proj],
|
| 333 |
+
inp=input_feat["self_attn.o_proj"],
|
| 334 |
+
inp_q=input_feat_q["self_attn.o_proj"],
|
| 335 |
+
reweight_ratio=reweight_ratio_dict["attn"],
|
| 336 |
+
)
|
| 337 |
+
)
|
| 338 |
+
# fc1
|
| 339 |
+
input_feat_q = _auto_get_input_feat_distort(
|
| 340 |
+
inps_q=q_input,
|
| 341 |
+
scales_list=scales_list,
|
| 342 |
+
)
|
| 343 |
+
scales_list.append(
|
| 344 |
+
_auto_get_scale_distort(
|
| 345 |
+
prev_op=module.post_attention_layernorm,
|
| 346 |
+
layers=[module.mlp.gate_proj, module.mlp.up_proj],
|
| 347 |
+
inp=input_feat["mlp.gate_proj"],
|
| 348 |
+
inp_q=input_feat_q["mlp.gate_proj"],
|
| 349 |
+
reweight_ratio=reweight_ratio_dict["mlp"],
|
| 350 |
+
module2inspect=module.mlp,
|
| 351 |
+
)
|
| 352 |
+
)
|
| 353 |
+
# fc2
|
| 354 |
+
input_feat_q = _auto_get_input_feat_distort(
|
| 355 |
+
inps_q=q_input,
|
| 356 |
+
scales_list=scales_list,
|
| 357 |
+
)
|
| 358 |
+
scales_list.append(
|
| 359 |
+
_auto_get_scale_distort(
|
| 360 |
+
prev_op=module.mlp.up_proj,
|
| 361 |
+
layers=[module.mlp.down_proj],
|
| 362 |
+
inp=input_feat["mlp.down_proj"],
|
| 363 |
+
inp_q=input_feat_q["mlp.down_proj"],
|
| 364 |
+
reweight_ratio=reweight_ratio_dict["mlp"],
|
| 365 |
+
)
|
| 366 |
+
)
|
| 367 |
+
|
| 368 |
+
elif isinstance(module, BloomBlock):
|
| 369 |
+
# attention input
|
| 370 |
+
scales_list.append(
|
| 371 |
+
_auto_get_scale(
|
| 372 |
+
prev_op=module.input_layernorm,
|
| 373 |
+
layers=[module.self_attention.query_key_value],
|
| 374 |
+
inp=input_feat["self_attention.query_key_value"],
|
| 375 |
+
module2inspect=module,
|
| 376 |
+
kwargs=module_kwargs,
|
| 377 |
+
)
|
| 378 |
+
)
|
| 379 |
+
# attn out
|
| 380 |
+
# Please refer to https://github.com/mit-han-lab/llm-awq/issues/2#issuecomment-1606297469
|
| 381 |
+
"""
|
| 382 |
+
scales_list.append(_auto_get_scale(
|
| 383 |
+
prev_op=module.self_attention.query_key_value,
|
| 384 |
+
layers=[module.self_attention.dense],
|
| 385 |
+
inp=input_feat['self_attention.dense'],
|
| 386 |
+
))
|
| 387 |
+
"""
|
| 388 |
+
# fc1
|
| 389 |
+
scales_list.append(
|
| 390 |
+
_auto_get_scale(
|
| 391 |
+
prev_op=module.post_attention_layernorm,
|
| 392 |
+
layers=[module.mlp.dense_h_to_4h],
|
| 393 |
+
inp=input_feat["mlp.dense_h_to_4h"],
|
| 394 |
+
module2inspect=module,
|
| 395 |
+
kwargs=module_kwargs,
|
| 396 |
+
)
|
| 397 |
+
)
|
| 398 |
+
# fc2
|
| 399 |
+
scales_list.append(
|
| 400 |
+
_auto_get_scale(
|
| 401 |
+
prev_op=module.mlp.gelu_impl,
|
| 402 |
+
layers=[module.mlp.dense_4h_to_h],
|
| 403 |
+
inp=input_feat["mlp.dense_4h_to_h"],
|
| 404 |
+
)
|
| 405 |
+
)
|
| 406 |
+
elif "mpt" in str(module.__class__).lower():
|
| 407 |
+
# attention input
|
| 408 |
+
scales_list.append(
|
| 409 |
+
_auto_get_scale(
|
| 410 |
+
prev_op=module.norm_1,
|
| 411 |
+
layers=[module.attn.Wqkv],
|
| 412 |
+
inp=input_feat["attn.Wqkv"],
|
| 413 |
+
module2inspect=module.attn,
|
| 414 |
+
kwargs=module_kwargs,
|
| 415 |
+
)
|
| 416 |
+
)
|
| 417 |
+
|
| 418 |
+
# attn out
|
| 419 |
+
scales_list.append(
|
| 420 |
+
_auto_get_scale(
|
| 421 |
+
prev_op=module.attn.Wqkv,
|
| 422 |
+
layers=[module.attn.out_proj],
|
| 423 |
+
inp=input_feat["attn.out_proj"],
|
| 424 |
+
)
|
| 425 |
+
)
|
| 426 |
+
# fc1
|
| 427 |
+
scales_list.append(
|
| 428 |
+
_auto_get_scale(
|
| 429 |
+
prev_op=module.norm_2,
|
| 430 |
+
layers=[module.ffn.up_proj],
|
| 431 |
+
inp=input_feat["ffn.up_proj"],
|
| 432 |
+
module2inspect=module.ffn,
|
| 433 |
+
)
|
| 434 |
+
)
|
| 435 |
+
# fc2
|
| 436 |
+
scales_list.append(
|
| 437 |
+
_auto_get_scale(
|
| 438 |
+
prev_op=module.ffn.act,
|
| 439 |
+
layers=[module.ffn.down_proj],
|
| 440 |
+
inp=input_feat["ffn.down_proj"],
|
| 441 |
+
)
|
| 442 |
+
)
|
| 443 |
+
|
| 444 |
+
elif "falcon" in str(module.__class__).lower():
|
| 445 |
+
# attn out
|
| 446 |
+
# Haotian: TBD: need to handle repeated scales for MQ
|
| 447 |
+
"""
|
| 448 |
+
scales_list.append(_auto_get_scale(
|
| 449 |
+
prev_op=module.self_attention.query_key_value,
|
| 450 |
+
layers=[module.self_attention.dense],
|
| 451 |
+
inp=input_feat['self_attention.dense'],
|
| 452 |
+
))
|
| 453 |
+
"""
|
| 454 |
+
# fc1, as long as it is scaled, everything is screwed up
|
| 455 |
+
if "falcon-7b" in str(module.__class__).lower():
|
| 456 |
+
scales_list.append(
|
| 457 |
+
_auto_get_scale(
|
| 458 |
+
prev_op=module.input_layernorm,
|
| 459 |
+
layers=[
|
| 460 |
+
module.mlp.dense_h_to_4h,
|
| 461 |
+
module.self_attention.query_key_value,
|
| 462 |
+
],
|
| 463 |
+
inp=input_feat["self_attention.query_key_value"],
|
| 464 |
+
module2inspect=module,
|
| 465 |
+
kwargs=module_kwargs,
|
| 466 |
+
)
|
| 467 |
+
)
|
| 468 |
+
elif "falcon-40b" in str(module.__class__).lower():
|
| 469 |
+
scales_list.append(
|
| 470 |
+
_auto_get_scale(
|
| 471 |
+
prev_op=module.ln_attn,
|
| 472 |
+
layers=[module.self_attention.query_key_value],
|
| 473 |
+
inp=input_feat["self_attention.query_key_value"],
|
| 474 |
+
module2inspect=module,
|
| 475 |
+
kwargs=module_kwargs,
|
| 476 |
+
)
|
| 477 |
+
)
|
| 478 |
+
scales_list.append(
|
| 479 |
+
_auto_get_scale(
|
| 480 |
+
prev_op=module.ln_mlp,
|
| 481 |
+
layers=[module.mlp.dense_h_to_4h],
|
| 482 |
+
inp=input_feat["mlp.dense_h_to_4h"],
|
| 483 |
+
module2inspect=module,
|
| 484 |
+
kwargs=module_kwargs,
|
| 485 |
+
)
|
| 486 |
+
)
|
| 487 |
+
else:
|
| 488 |
+
raise NotImplementedError(
|
| 489 |
+
"Unknown Falcon architecture, currently only falcon-7b and falcon-40b are supported"
|
| 490 |
+
)
|
| 491 |
+
# fc2
|
| 492 |
+
scales_list.append(
|
| 493 |
+
_auto_get_scale(
|
| 494 |
+
prev_op=module.mlp.act,
|
| 495 |
+
layers=[module.mlp.dense_4h_to_h],
|
| 496 |
+
inp=input_feat["mlp.dense_4h_to_h"],
|
| 497 |
+
)
|
| 498 |
+
)
|
| 499 |
+
elif "bigcode" in str(module.__class__).lower():
|
| 500 |
+
scales_list.append(
|
| 501 |
+
_auto_get_scale(
|
| 502 |
+
prev_op=module.ln_1,
|
| 503 |
+
layers=[module.attn.c_attn],
|
| 504 |
+
inp=input_feat["attn.c_attn"],
|
| 505 |
+
module2inspect=module.attn,
|
| 506 |
+
kwargs=module_kwargs,
|
| 507 |
+
)
|
| 508 |
+
)
|
| 509 |
+
# fc1
|
| 510 |
+
scales_list.append(
|
| 511 |
+
_auto_get_scale(
|
| 512 |
+
prev_op=module.ln_2,
|
| 513 |
+
layers=[module.mlp.c_fc],
|
| 514 |
+
inp=input_feat["mlp.c_fc"],
|
| 515 |
+
module2inspect=module.mlp,
|
| 516 |
+
)
|
| 517 |
+
)
|
| 518 |
+
# fc2
|
| 519 |
+
scales_list.append(
|
| 520 |
+
_auto_get_scale(
|
| 521 |
+
prev_op=module.mlp.act,
|
| 522 |
+
layers=[module.mlp.c_proj],
|
| 523 |
+
inp=input_feat["mlp.c_proj"],
|
| 524 |
+
)
|
| 525 |
+
)
|
| 526 |
+
elif "neox" in str(module.__class__).lower():
|
| 527 |
+
scales_list.append(
|
| 528 |
+
_auto_get_scale(
|
| 529 |
+
prev_op=module.input_layernorm,
|
| 530 |
+
layers=[module.attention.query_key_value],
|
| 531 |
+
inp=input_feat["attention.query_key_value"],
|
| 532 |
+
module2inspect=module.attention,
|
| 533 |
+
kwargs=module_kwargs,
|
| 534 |
+
)
|
| 535 |
+
)
|
| 536 |
+
# fc1
|
| 537 |
+
scales_list.append(
|
| 538 |
+
_auto_get_scale(
|
| 539 |
+
prev_op=module.post_attention_layernorm,
|
| 540 |
+
layers=[module.mlp.dense_h_to_4h],
|
| 541 |
+
inp=input_feat["mlp.dense_h_to_4h"],
|
| 542 |
+
module2inspect=module.mlp,
|
| 543 |
+
)
|
| 544 |
+
)
|
| 545 |
+
# fc2
|
| 546 |
+
scales_list.append(
|
| 547 |
+
_auto_get_scale(
|
| 548 |
+
prev_op=module.mlp.act,
|
| 549 |
+
layers=[module.mlp.dense_4h_to_h],
|
| 550 |
+
inp=input_feat["mlp.dense_4h_to_h"],
|
| 551 |
+
)
|
| 552 |
+
)
|
| 553 |
+
elif module.__class__.__name__ == "Qwen2DecoderLayer":
|
| 554 |
+
# attention input
|
| 555 |
+
input_feat_q = _auto_get_input_feat_distort(
|
| 556 |
+
inps_q=q_input,
|
| 557 |
+
)
|
| 558 |
+
scales_list.append(
|
| 559 |
+
_auto_get_scale_distort(
|
| 560 |
+
prev_op=module.input_layernorm,
|
| 561 |
+
layers=[
|
| 562 |
+
module.self_attn.q_proj,
|
| 563 |
+
module.self_attn.k_proj,
|
| 564 |
+
module.self_attn.v_proj,
|
| 565 |
+
],
|
| 566 |
+
inp=input_feat["self_attn.q_proj"],
|
| 567 |
+
inp_q=input_feat_q["self_attn.q_proj"],
|
| 568 |
+
reweight_ratio=reweight_ratio_dict["attn"],
|
| 569 |
+
module2inspect=module.self_attn,
|
| 570 |
+
kwargs=module_kwargs,
|
| 571 |
+
)
|
| 572 |
+
)
|
| 573 |
+
# attn out
|
| 574 |
+
# Please refer to https://github.com/mit-han-lab/llm-awq/pull/67#issue-1850622696
|
| 575 |
+
input_feat_q = _auto_get_input_feat_distort(
|
| 576 |
+
inps_q=q_input,
|
| 577 |
+
scales_list=scales_list,
|
| 578 |
+
)
|
| 579 |
+
if module.self_attn.v_proj.weight.shape == module.self_attn.o_proj.weight.shape:
|
| 580 |
+
scales_list.append(
|
| 581 |
+
_auto_get_scale_distort(
|
| 582 |
+
prev_op=module.self_attn.v_proj,
|
| 583 |
+
layers=[module.self_attn.o_proj],
|
| 584 |
+
inp=input_feat["self_attn.o_proj"],
|
| 585 |
+
inp_q=input_feat_q["self_attn.o_proj"],
|
| 586 |
+
reweight_ratio=reweight_ratio_dict["attn"],
|
| 587 |
+
)
|
| 588 |
+
)
|
| 589 |
+
# fc1
|
| 590 |
+
input_feat_q = _auto_get_input_feat_distort(
|
| 591 |
+
inps_q=q_input,
|
| 592 |
+
scales_list=scales_list,
|
| 593 |
+
)
|
| 594 |
+
scales_list.append(
|
| 595 |
+
_auto_get_scale_distort(
|
| 596 |
+
prev_op=module.post_attention_layernorm,
|
| 597 |
+
layers=[module.mlp.gate_proj, module.mlp.up_proj],
|
| 598 |
+
inp=input_feat["mlp.gate_proj"],
|
| 599 |
+
inp_q=input_feat_q["mlp.gate_proj"],
|
| 600 |
+
reweight_ratio=reweight_ratio_dict["mlp"],
|
| 601 |
+
module2inspect=module.mlp,
|
| 602 |
+
)
|
| 603 |
+
)
|
| 604 |
+
# fc2
|
| 605 |
+
input_feat_q = _auto_get_input_feat_distort(
|
| 606 |
+
inps_q=q_input,
|
| 607 |
+
scales_list=scales_list,
|
| 608 |
+
)
|
| 609 |
+
scales_list.append(
|
| 610 |
+
_auto_get_scale_distort(
|
| 611 |
+
prev_op=module.mlp.up_proj,
|
| 612 |
+
layers=[module.mlp.down_proj],
|
| 613 |
+
inp=input_feat["mlp.down_proj"],
|
| 614 |
+
inp_q=input_feat_q["mlp.down_proj"],
|
| 615 |
+
reweight_ratio=reweight_ratio_dict["mlp"],
|
| 616 |
+
)
|
| 617 |
+
)
|
| 618 |
+
elif module.__class__.__name__ == "InternLM2DecoderLayer":
|
| 619 |
+
# attention input
|
| 620 |
+
input_feat_q = _auto_get_input_feat_distort(
|
| 621 |
+
inps_q=q_input,
|
| 622 |
+
)
|
| 623 |
+
scales_list.append(
|
| 624 |
+
_auto_get_scale_distort(
|
| 625 |
+
prev_op=module.attention_norm,
|
| 626 |
+
layers=[
|
| 627 |
+
module.attention.wqkv,
|
| 628 |
+
],
|
| 629 |
+
inp=input_feat["attention.wqkv"],
|
| 630 |
+
inp_q=input_feat_q["attention.wqkv"],
|
| 631 |
+
reweight_ratio=reweight_ratio_dict["attn"],
|
| 632 |
+
module2inspect=module.attention,
|
| 633 |
+
kwargs=module_kwargs,
|
| 634 |
+
)
|
| 635 |
+
)
|
| 636 |
+
# attn out
|
| 637 |
+
# Please refer to https://github.com/mit-han-lab/llm-awq/pull/67#issue-1850622696
|
| 638 |
+
input_feat_q = _auto_get_input_feat_distort(
|
| 639 |
+
inps_q=q_input,
|
| 640 |
+
scales_list=scales_list,
|
| 641 |
+
)
|
| 642 |
+
if module.attention.wqkv.weight.shape == module.attention.wo.weight.shape:
|
| 643 |
+
scales_list.append(
|
| 644 |
+
_auto_get_scale_distort(
|
| 645 |
+
prev_op=module.attention.wqkv,
|
| 646 |
+
layers=[module.attention.wo],
|
| 647 |
+
inp=input_feat["attention.wo"],
|
| 648 |
+
inp_q=input_feat_q["attention.wo"],
|
| 649 |
+
reweight_ratio=reweight_ratio_dict["attn"],
|
| 650 |
+
)
|
| 651 |
+
)
|
| 652 |
+
# fc1
|
| 653 |
+
input_feat_q = _auto_get_input_feat_distort(
|
| 654 |
+
inps_q=q_input,
|
| 655 |
+
scales_list=scales_list,
|
| 656 |
+
)
|
| 657 |
+
scales_list.append(
|
| 658 |
+
_auto_get_scale_distort(
|
| 659 |
+
prev_op=module.ffn_norm,
|
| 660 |
+
layers=[module.feed_forward.w1, module.feed_forward.w3],
|
| 661 |
+
inp=input_feat["feed_forward.w1"],
|
| 662 |
+
inp_q=input_feat_q["feed_forward.w1"],
|
| 663 |
+
reweight_ratio=reweight_ratio_dict["mlp"],
|
| 664 |
+
module2inspect=module.feed_forward,
|
| 665 |
+
)
|
| 666 |
+
)
|
| 667 |
+
# fc2
|
| 668 |
+
input_feat_q = _auto_get_input_feat_distort(
|
| 669 |
+
inps_q=q_input,
|
| 670 |
+
scales_list=scales_list,
|
| 671 |
+
)
|
| 672 |
+
scales_list.append(
|
| 673 |
+
_auto_get_scale_distort(
|
| 674 |
+
prev_op=module.feed_forward.w3,
|
| 675 |
+
layers=[module.feed_forward.w2],
|
| 676 |
+
inp=input_feat["feed_forward.w2"],
|
| 677 |
+
inp_q=input_feat_q["feed_forward.w2"],
|
| 678 |
+
reweight_ratio=reweight_ratio_dict["mlp"],
|
| 679 |
+
)
|
| 680 |
+
)
|
| 681 |
+
|
| 682 |
+
elif module.__class__.__name__ == "Qwen2VLDecoderLayer":
|
| 683 |
+
# attention input
|
| 684 |
+
input_feat_q = _auto_get_input_feat_distort(
|
| 685 |
+
inps_q=q_input,
|
| 686 |
+
)
|
| 687 |
+
scales_list.append(
|
| 688 |
+
_auto_get_scale_distort(
|
| 689 |
+
prev_op=module.input_layernorm,
|
| 690 |
+
layers=[
|
| 691 |
+
module.self_attn.q_proj,
|
| 692 |
+
module.self_attn.k_proj,
|
| 693 |
+
module.self_attn.v_proj,
|
| 694 |
+
],
|
| 695 |
+
inp=input_feat["self_attn.q_proj"],
|
| 696 |
+
inp_q=input_feat_q["self_attn.q_proj"],
|
| 697 |
+
reweight_ratio=reweight_ratio_dict["attn"],
|
| 698 |
+
module2inspect=module.self_attn,
|
| 699 |
+
kwargs=module_kwargs,
|
| 700 |
+
)
|
| 701 |
+
)
|
| 702 |
+
# attn out
|
| 703 |
+
# Please refer to https://github.com/mit-han-lab/llm-awq/pull/67#issue-1850622696
|
| 704 |
+
input_feat_q = _auto_get_input_feat_distort(
|
| 705 |
+
inps_q=q_input,
|
| 706 |
+
scales_list=scales_list,
|
| 707 |
+
)
|
| 708 |
+
if module.self_attn.v_proj.weight.shape == module.self_attn.o_proj.weight.shape:
|
| 709 |
+
scales_list.append(
|
| 710 |
+
_auto_get_scale_distort(
|
| 711 |
+
prev_op=module.self_attn.v_proj,
|
| 712 |
+
layers=[module.self_attn.o_proj],
|
| 713 |
+
inp=input_feat["self_attn.o_proj"],
|
| 714 |
+
inp_q=input_feat_q["self_attn.o_proj"],
|
| 715 |
+
reweight_ratio=reweight_ratio_dict["attn"],
|
| 716 |
+
)
|
| 717 |
+
)
|
| 718 |
+
# fc1
|
| 719 |
+
input_feat_q = _auto_get_input_feat_distort(
|
| 720 |
+
inps_q=q_input,
|
| 721 |
+
scales_list=scales_list,
|
| 722 |
+
)
|
| 723 |
+
scales_list.append(
|
| 724 |
+
_auto_get_scale_distort(
|
| 725 |
+
prev_op=module.post_attention_layernorm,
|
| 726 |
+
layers=[module.mlp.gate_proj, module.mlp.up_proj],
|
| 727 |
+
inp=input_feat["mlp.gate_proj"],
|
| 728 |
+
inp_q=input_feat_q["mlp.gate_proj"],
|
| 729 |
+
reweight_ratio=reweight_ratio_dict["mlp"],
|
| 730 |
+
module2inspect=module.mlp,
|
| 731 |
+
)
|
| 732 |
+
)
|
| 733 |
+
# fc2
|
| 734 |
+
input_feat_q = _auto_get_input_feat_distort(
|
| 735 |
+
inps_q=q_input,
|
| 736 |
+
scales_list=scales_list,
|
| 737 |
+
)
|
| 738 |
+
scales_list.append(
|
| 739 |
+
_auto_get_scale_distort(
|
| 740 |
+
prev_op=module.mlp.up_proj,
|
| 741 |
+
layers=[module.mlp.down_proj],
|
| 742 |
+
inp=input_feat["mlp.down_proj"],
|
| 743 |
+
inp_q=input_feat_q["mlp.down_proj"],
|
| 744 |
+
reweight_ratio=reweight_ratio_dict["mlp"],
|
| 745 |
+
)
|
| 746 |
+
)
|
| 747 |
+
else:
|
| 748 |
+
raise NotImplementedError(f"{type(module)} not supported yet!")
|
| 749 |
+
|
| 750 |
+
return scales_list
|
| 751 |
+
|
| 752 |
+
|
| 753 |
+
def apply_scale(module, scales_list, input_feat_dict=None):
|
| 754 |
+
for prev_op_name, layer_names, scales in scales_list:
|
| 755 |
+
prev_op = get_op_by_name(module, prev_op_name)
|
| 756 |
+
layers = [get_op_by_name(module, name) for name in layer_names]
|
| 757 |
+
|
| 758 |
+
prev_op.cuda()
|
| 759 |
+
for layer in layers:
|
| 760 |
+
layer.cuda()
|
| 761 |
+
scales.cuda()
|
| 762 |
+
|
| 763 |
+
if isinstance(prev_op, nn.Linear):
|
| 764 |
+
assert len(layers) == 1
|
| 765 |
+
scale_fc_fc(prev_op, layers[0], scales)
|
| 766 |
+
elif isinstance(prev_op, (nn.LayerNorm, LlamaRMSNorm)) or prev_op.__class__.__name__ == "InternLM2RMSNorm" or prev_op.__class__.__name__ == "Qwen2RMSNorm":
|
| 767 |
+
scale_ln_fcs(prev_op, layers, scales)
|
| 768 |
+
elif isinstance(prev_op, (nn.GELU, BloomGelu, GELUActivation)):
|
| 769 |
+
new_module = ScaledActivation(prev_op, scales)
|
| 770 |
+
set_op_by_name(module, prev_op_name, new_module)
|
| 771 |
+
scale_gelu_fc(prev_op, layers[0], scales)
|
| 772 |
+
else:
|
| 773 |
+
raise NotImplementedError(f"prev_op {type(prev_op)} not supported yet!")
|
| 774 |
+
|
| 775 |
+
# apply the scaling to input feat if given;
|
| 776 |
+
if input_feat_dict is not None:
|
| 777 |
+
for layer_name in layer_names:
|
| 778 |
+
inp = input_feat_dict[layer_name]
|
| 779 |
+
inp.div_(scales.view(1, -1).to(inp.device))
|
| 780 |
+
|
| 781 |
+
prev_op.cpu()
|
| 782 |
+
for layer in layers:
|
| 783 |
+
layer.cpu()
|
| 784 |
+
scales.cpu()
|
qmllm/methods/mbq/quantize/auto_scale_wa.py
ADDED
|
@@ -0,0 +1,681 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gc
|
| 2 |
+
import copy
|
| 3 |
+
import torch
|
| 4 |
+
import functools
|
| 5 |
+
import torch.nn as nn
|
| 6 |
+
|
| 7 |
+
from transformers.models.bloom.modeling_bloom import BloomBlock, BloomGelu
|
| 8 |
+
from transformers.models.opt.modeling_opt import OPTDecoderLayer
|
| 9 |
+
from transformers.models.llama.modeling_llama import LlamaDecoderLayer, LlamaRMSNorm
|
| 10 |
+
from transformers.activations import GELUActivation
|
| 11 |
+
|
| 12 |
+
from collections import defaultdict
|
| 13 |
+
from transformers.models.qwen2.modeling_qwen2 import Qwen2RMSNorm
|
| 14 |
+
|
| 15 |
+
from .qmodule import ScaledActivation
|
| 16 |
+
from qmllm.utils.search import get_op_by_name, get_op_name, set_op_by_name
|
| 17 |
+
from qmllm.quantization.quant_funcs import pseudo_quantize_tensor
|
| 18 |
+
from qmllm.quantization.qlinear import WALinear
|
| 19 |
+
|
| 20 |
+
__all__ = ["auto_scale_block_wa"]
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
@torch.no_grad()
|
| 24 |
+
def get_weight_scale(weight, q_group_size=-1):
|
| 25 |
+
org_shape = weight.shape
|
| 26 |
+
if q_group_size > 0:
|
| 27 |
+
weight = weight.view(-1, q_group_size)
|
| 28 |
+
scale = weight.abs() / weight.abs().amax(dim=1, keepdim=True)
|
| 29 |
+
scale = scale.view(org_shape)
|
| 30 |
+
scale = scale.mean(0)
|
| 31 |
+
return scale
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
@torch.no_grad()
|
| 35 |
+
def get_act_scale(x):
|
| 36 |
+
return x.abs().view(-1, x.shape[-1]).mean(0)
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
@torch.no_grad()
|
| 40 |
+
def scale_ln_fcs(ln, fcs, scales):
|
| 41 |
+
if not isinstance(fcs, list):
|
| 42 |
+
fcs = [fcs]
|
| 43 |
+
|
| 44 |
+
scales = scales.to(ln.weight.device)
|
| 45 |
+
|
| 46 |
+
ln.weight.div_(scales)
|
| 47 |
+
if hasattr(ln, "bias") and ln.bias is not None:
|
| 48 |
+
ln.bias.div_(scales)
|
| 49 |
+
|
| 50 |
+
for fc in fcs:
|
| 51 |
+
fc.weight.mul_(scales.view(1, -1))
|
| 52 |
+
|
| 53 |
+
for p in ln.parameters():
|
| 54 |
+
assert torch.isnan(p).sum() == 0
|
| 55 |
+
for fc in fcs:
|
| 56 |
+
for p in fc.parameters():
|
| 57 |
+
assert torch.isnan(p).sum() == 0
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
@torch.no_grad()
|
| 61 |
+
def scale_fc_fc(fc1, fc2, scales):
|
| 62 |
+
assert isinstance(fc1, nn.Linear)
|
| 63 |
+
assert isinstance(fc2, nn.Linear)
|
| 64 |
+
# assert fc1.out_features == fc2.in_features
|
| 65 |
+
|
| 66 |
+
scales = scales.to(fc1.weight.device)
|
| 67 |
+
|
| 68 |
+
# fc1.weight.div_(scales.view(-1, 1))
|
| 69 |
+
fc1.weight[-scales.size(0) :].div_(scales.view(-1, 1))
|
| 70 |
+
if fc1.bias is not None:
|
| 71 |
+
fc1.bias.div_(scales.view(-1))
|
| 72 |
+
|
| 73 |
+
fc2.weight.mul_(scales.view(1, -1))
|
| 74 |
+
|
| 75 |
+
for p in fc1.parameters():
|
| 76 |
+
assert torch.isnan(p).sum() == 0
|
| 77 |
+
for p in fc2.parameters():
|
| 78 |
+
assert torch.isnan(p).sum() == 0
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
@torch.no_grad()
|
| 82 |
+
def scale_gelu_fc(gelu, fc, scales):
|
| 83 |
+
assert isinstance(gelu, (nn.GELU, BloomGelu, GELUActivation))
|
| 84 |
+
assert isinstance(fc, nn.Linear)
|
| 85 |
+
|
| 86 |
+
fc.weight.mul_(scales.view(1, -1).to(fc.weight.device))
|
| 87 |
+
|
| 88 |
+
for p in fc.parameters():
|
| 89 |
+
assert torch.isnan(p).sum() == 0
|
| 90 |
+
|
| 91 |
+
|
| 92 |
+
@torch.no_grad()
|
| 93 |
+
def auto_scale_block_wa(module, module_kwargs, w_bit, a_bit, q_config, input_feat, ans_mask, vis_mask, reweight_ratio_dict, loss_mode="mae"):
|
| 94 |
+
|
| 95 |
+
if "use_cache" in module_kwargs:
|
| 96 |
+
module_kwargs.pop("use_cache")
|
| 97 |
+
|
| 98 |
+
def _search_module_scale_wa(block, linears2scale: list, layers_name, x, reweight_ratio=None, kwargs={}):
|
| 99 |
+
# w: co, ci
|
| 100 |
+
# x: n, ci
|
| 101 |
+
x = x.to(next(block.parameters()).device)
|
| 102 |
+
with torch.no_grad():
|
| 103 |
+
org_out = block(x, **kwargs)
|
| 104 |
+
if isinstance(org_out, tuple):
|
| 105 |
+
org_out = org_out[0]
|
| 106 |
+
|
| 107 |
+
x_max = get_act_scale(x)
|
| 108 |
+
|
| 109 |
+
best_error = float("inf")
|
| 110 |
+
best_ratio = -1
|
| 111 |
+
best_scales = None
|
| 112 |
+
|
| 113 |
+
n_grid = 20
|
| 114 |
+
history = []
|
| 115 |
+
|
| 116 |
+
org_sd = {k: v.cpu() for k, v in block.state_dict().items()}
|
| 117 |
+
for ratio in range(n_grid):
|
| 118 |
+
ratio = ratio * 1 / n_grid
|
| 119 |
+
scales = x_max.pow(ratio).clamp(min=1e-4).view(-1)
|
| 120 |
+
scales = scales / (scales.max() * scales.min()).sqrt()
|
| 121 |
+
|
| 122 |
+
if isinstance(block, nn.Linear):
|
| 123 |
+
new_block = None
|
| 124 |
+
for fc, fc_name in zip(linears2scale, layers_name):
|
| 125 |
+
# fc.weight.mul_(scales.view(1, -1).to(fc.weight.device))
|
| 126 |
+
# fc.weight.data = w_quantize_func(fc.weight.data) / (scales.view(1, -1))
|
| 127 |
+
fc.weight.mul_(scales.view(1, -1).to(fc.weight.device))
|
| 128 |
+
new_fc = WALinear.from_float(fc, weight_quant="per_channel", act_quant="per_token", w_bit=w_bit, a_bit=a_bit)
|
| 129 |
+
|
| 130 |
+
if isinstance(block, nn.Linear):
|
| 131 |
+
new_block = copy.deepcopy(new_fc)
|
| 132 |
+
else:
|
| 133 |
+
setattr(block, fc_name, new_fc)
|
| 134 |
+
|
| 135 |
+
del new_fc
|
| 136 |
+
torch.cuda.empty_cache()
|
| 137 |
+
|
| 138 |
+
x_scale = x / (scales.view(1, 1, -1))
|
| 139 |
+
|
| 140 |
+
if isinstance(block, nn.Linear):
|
| 141 |
+
out = new_block(x_scale, **kwargs)
|
| 142 |
+
else:
|
| 143 |
+
out = block(x_scale, **kwargs)
|
| 144 |
+
|
| 145 |
+
if isinstance(out, tuple):
|
| 146 |
+
out = out[0]
|
| 147 |
+
|
| 148 |
+
# loss = (
|
| 149 |
+
# (org_out - out).float().pow(2).mean().item()
|
| 150 |
+
# ) # float prevents overflow
|
| 151 |
+
|
| 152 |
+
if loss_mode == "mse":
|
| 153 |
+
if ans_mask is not None and vis_mask is not None:
|
| 154 |
+
ans_mask_expand = ans_mask.unsqueeze(-1).expand_as(out)
|
| 155 |
+
vis_mask_expand = vis_mask.unsqueeze(-1).expand_as(out).cuda()
|
| 156 |
+
masked_diff_ans = ((org_out - out).float().pow(2) * ans_mask_expand)
|
| 157 |
+
masked_diff_vis = ((org_out - out).float().pow(2) * vis_mask_expand)
|
| 158 |
+
if reweight_ratio is not None:
|
| 159 |
+
loss = masked_diff_ans.sum() / ans_mask_expand.sum() + reweight_ratio * (masked_diff_vis.sum() / vis_mask_expand.sum())
|
| 160 |
+
else:
|
| 161 |
+
loss = (
|
| 162 |
+
(org_out - out).float().pow(2).mean().item()
|
| 163 |
+
)
|
| 164 |
+
elif ans_mask is not None and vis_mask is None:
|
| 165 |
+
ans_mask_expand = ans_mask.unsqueeze(-1).expand_as(out)
|
| 166 |
+
masked_diff = ((org_out - out).float().pow(2) * ans_mask_expand)
|
| 167 |
+
loss = masked_diff.sum() / ans_mask_expand.sum()
|
| 168 |
+
else:
|
| 169 |
+
loss = (
|
| 170 |
+
(org_out - out).float().pow(2).mean().item()
|
| 171 |
+
) # float prevents overflow
|
| 172 |
+
elif loss_mode == "mae":
|
| 173 |
+
if ans_mask is not None and vis_mask is not None:
|
| 174 |
+
ans_mask_expand = ans_mask.unsqueeze(-1).expand_as(out)
|
| 175 |
+
vis_mask_expand = vis_mask.unsqueeze(-1).expand_as(out).cuda()
|
| 176 |
+
masked_diff_ans = ((org_out - out).float().abs() * ans_mask_expand)
|
| 177 |
+
masked_diff_vis = ((org_out - out).float().abs() * vis_mask_expand)
|
| 178 |
+
if reweight_ratio is not None:
|
| 179 |
+
loss = (masked_diff_ans.sum() + reweight_ratio * masked_diff_vis.sum()) / (ans_mask_expand.sum() + vis_mask_expand.sum())
|
| 180 |
+
else:
|
| 181 |
+
loss = (
|
| 182 |
+
(org_out - out).float().abs().mean().item()
|
| 183 |
+
)
|
| 184 |
+
elif ans_mask is not None and vis_mask is None:
|
| 185 |
+
ans_mask_expand = ans_mask.unsqueeze(-1).expand_as(out)
|
| 186 |
+
masked_diff = ((org_out - out).float().abs() * ans_mask_expand)
|
| 187 |
+
loss = masked_diff.sum() / ans_mask_expand.sum()
|
| 188 |
+
else:
|
| 189 |
+
loss = (
|
| 190 |
+
(org_out - out).float().abs().mean().item()
|
| 191 |
+
) # float prevents overflow
|
| 192 |
+
|
| 193 |
+
|
| 194 |
+
history.append(loss)
|
| 195 |
+
is_best = loss < best_error
|
| 196 |
+
if is_best:
|
| 197 |
+
best_error = loss
|
| 198 |
+
best_ratio = ratio
|
| 199 |
+
best_scales = scales
|
| 200 |
+
|
| 201 |
+
# restore the block
|
| 202 |
+
for fc, fc_name in zip(linears2scale, layers_name):
|
| 203 |
+
if isinstance(block, nn.Linear):
|
| 204 |
+
continue
|
| 205 |
+
else:
|
| 206 |
+
setattr(block, fc_name, fc)
|
| 207 |
+
|
| 208 |
+
if isinstance(block, nn.Linear):
|
| 209 |
+
del new_block
|
| 210 |
+
torch.cuda.empty_cache()
|
| 211 |
+
block.load_state_dict(org_sd)
|
| 212 |
+
if best_ratio == -1:
|
| 213 |
+
print(history)
|
| 214 |
+
raise Exception
|
| 215 |
+
# print(best_ratio)
|
| 216 |
+
best_scales = best_scales.view(-1)
|
| 217 |
+
|
| 218 |
+
assert torch.isnan(best_scales).sum() == 0, best_scales
|
| 219 |
+
return best_scales.detach()
|
| 220 |
+
|
| 221 |
+
def _auto_get_scale_wa(prev_op, layers, layers_name, inp, reweight_ratio=None, module2inspect=None, kwargs={}):
|
| 222 |
+
# module2inspect: if given, we will check the output diff of this module instead of layers
|
| 223 |
+
if module2inspect is None:
|
| 224 |
+
assert len(layers) == 1
|
| 225 |
+
module2inspect = layers[0]
|
| 226 |
+
|
| 227 |
+
scales = _search_module_scale_wa(module2inspect, layers, layers_name, inp, reweight_ratio, kwargs)
|
| 228 |
+
scales = scales.detach().cpu()
|
| 229 |
+
# prev_op_name, [layer_name], scale
|
| 230 |
+
return (
|
| 231 |
+
get_op_name(module, prev_op),
|
| 232 |
+
tuple([get_op_name(module, m) for m in layers]),
|
| 233 |
+
scales,
|
| 234 |
+
)
|
| 235 |
+
|
| 236 |
+
scales_list = [] # return the searched scales
|
| 237 |
+
|
| 238 |
+
if isinstance(module, OPTDecoderLayer):
|
| 239 |
+
# attention input
|
| 240 |
+
scales_list.append(
|
| 241 |
+
_auto_get_scale(
|
| 242 |
+
prev_op=module.self_attn_layer_norm,
|
| 243 |
+
layers=[
|
| 244 |
+
module.self_attn.q_proj,
|
| 245 |
+
module.self_attn.k_proj,
|
| 246 |
+
module.self_attn.v_proj,
|
| 247 |
+
],
|
| 248 |
+
inp=input_feat["self_attn.q_proj"],
|
| 249 |
+
module2inspect=module.self_attn,
|
| 250 |
+
kwargs=module_kwargs,
|
| 251 |
+
)
|
| 252 |
+
)
|
| 253 |
+
# attn out
|
| 254 |
+
scales_list.append(
|
| 255 |
+
_auto_get_scale(
|
| 256 |
+
prev_op=module.self_attn.v_proj,
|
| 257 |
+
layers=[module.self_attn.out_proj],
|
| 258 |
+
inp=input_feat["self_attn.out_proj"],
|
| 259 |
+
)
|
| 260 |
+
)
|
| 261 |
+
# fc1
|
| 262 |
+
scales_list.append(
|
| 263 |
+
_auto_get_scale(
|
| 264 |
+
prev_op=module.final_layer_norm,
|
| 265 |
+
layers=[module.fc1],
|
| 266 |
+
inp=input_feat["fc1"],
|
| 267 |
+
)
|
| 268 |
+
)
|
| 269 |
+
# fc2
|
| 270 |
+
scales_list.append(
|
| 271 |
+
_auto_get_scale(
|
| 272 |
+
prev_op=module.fc1,
|
| 273 |
+
layers=[module.fc2],
|
| 274 |
+
inp=input_feat["fc2"],
|
| 275 |
+
)
|
| 276 |
+
)
|
| 277 |
+
|
| 278 |
+
elif isinstance(module, LlamaDecoderLayer):
|
| 279 |
+
# attention input
|
| 280 |
+
scales_list.append(
|
| 281 |
+
_auto_get_scale_wa(
|
| 282 |
+
prev_op=module.input_layernorm,
|
| 283 |
+
layers=[
|
| 284 |
+
module.self_attn.q_proj,
|
| 285 |
+
module.self_attn.k_proj,
|
| 286 |
+
module.self_attn.v_proj,
|
| 287 |
+
],
|
| 288 |
+
layers_name=[
|
| 289 |
+
"q_proj",
|
| 290 |
+
"k_proj",
|
| 291 |
+
"v_proj",
|
| 292 |
+
],
|
| 293 |
+
inp=input_feat["self_attn.q_proj"],
|
| 294 |
+
reweight_ratio=reweight_ratio_dict["attn"],
|
| 295 |
+
module2inspect=module.self_attn,
|
| 296 |
+
kwargs=module_kwargs,
|
| 297 |
+
)
|
| 298 |
+
)
|
| 299 |
+
# attn out
|
| 300 |
+
# Please refer to https://github.com/mit-han-lab/llm-awq/pull/67#issue-1850622696
|
| 301 |
+
if module.self_attn.v_proj.weight.shape == module.self_attn.o_proj.weight.shape:
|
| 302 |
+
scales_list.append(
|
| 303 |
+
_auto_get_scale_wa(
|
| 304 |
+
prev_op=module.self_attn.v_proj,
|
| 305 |
+
layers=[module.self_attn.o_proj],
|
| 306 |
+
layers_name=["o_proj"],
|
| 307 |
+
inp=input_feat["self_attn.o_proj"],
|
| 308 |
+
reweight_ratio=reweight_ratio_dict["attn"],
|
| 309 |
+
)
|
| 310 |
+
)
|
| 311 |
+
# fc1
|
| 312 |
+
scales_list.append(
|
| 313 |
+
_auto_get_scale_wa(
|
| 314 |
+
prev_op=module.post_attention_layernorm,
|
| 315 |
+
layers=[module.mlp.gate_proj, module.mlp.up_proj],
|
| 316 |
+
layers_name=["gate_proj", "up_proj"],
|
| 317 |
+
inp=input_feat["mlp.gate_proj"],
|
| 318 |
+
reweight_ratio=reweight_ratio_dict["mlp"],
|
| 319 |
+
module2inspect=module.mlp,
|
| 320 |
+
)
|
| 321 |
+
)
|
| 322 |
+
# fc2
|
| 323 |
+
scales_list.append(
|
| 324 |
+
_auto_get_scale_wa(
|
| 325 |
+
prev_op=module.mlp.up_proj,
|
| 326 |
+
layers=[module.mlp.down_proj],
|
| 327 |
+
layers_name=["down_proj"],
|
| 328 |
+
inp=input_feat["mlp.down_proj"],
|
| 329 |
+
reweight_ratio=reweight_ratio_dict["mlp"],
|
| 330 |
+
)
|
| 331 |
+
)
|
| 332 |
+
|
| 333 |
+
elif isinstance(module, BloomBlock):
|
| 334 |
+
# attention input
|
| 335 |
+
scales_list.append(
|
| 336 |
+
_auto_get_scale(
|
| 337 |
+
prev_op=module.input_layernorm,
|
| 338 |
+
layers=[module.self_attention.query_key_value],
|
| 339 |
+
inp=input_feat["self_attention.query_key_value"],
|
| 340 |
+
module2inspect=module,
|
| 341 |
+
kwargs=module_kwargs,
|
| 342 |
+
)
|
| 343 |
+
)
|
| 344 |
+
# attn out
|
| 345 |
+
# Please refer to https://github.com/mit-han-lab/llm-awq/issues/2#issuecomment-1606297469
|
| 346 |
+
"""
|
| 347 |
+
scales_list.append(_auto_get_scale(
|
| 348 |
+
prev_op=module.self_attention.query_key_value,
|
| 349 |
+
layers=[module.self_attention.dense],
|
| 350 |
+
inp=input_feat['self_attention.dense'],
|
| 351 |
+
))
|
| 352 |
+
"""
|
| 353 |
+
# fc1
|
| 354 |
+
scales_list.append(
|
| 355 |
+
_auto_get_scale(
|
| 356 |
+
prev_op=module.post_attention_layernorm,
|
| 357 |
+
layers=[module.mlp.dense_h_to_4h],
|
| 358 |
+
inp=input_feat["mlp.dense_h_to_4h"],
|
| 359 |
+
module2inspect=module,
|
| 360 |
+
kwargs=module_kwargs,
|
| 361 |
+
)
|
| 362 |
+
)
|
| 363 |
+
# fc2
|
| 364 |
+
scales_list.append(
|
| 365 |
+
_auto_get_scale(
|
| 366 |
+
prev_op=module.mlp.gelu_impl,
|
| 367 |
+
layers=[module.mlp.dense_4h_to_h],
|
| 368 |
+
inp=input_feat["mlp.dense_4h_to_h"],
|
| 369 |
+
)
|
| 370 |
+
)
|
| 371 |
+
elif "mpt" in str(module.__class__).lower():
|
| 372 |
+
# attention input
|
| 373 |
+
scales_list.append(
|
| 374 |
+
_auto_get_scale(
|
| 375 |
+
prev_op=module.norm_1,
|
| 376 |
+
layers=[module.attn.Wqkv],
|
| 377 |
+
inp=input_feat["attn.Wqkv"],
|
| 378 |
+
module2inspect=module.attn,
|
| 379 |
+
kwargs=module_kwargs,
|
| 380 |
+
)
|
| 381 |
+
)
|
| 382 |
+
|
| 383 |
+
# attn out
|
| 384 |
+
scales_list.append(
|
| 385 |
+
_auto_get_scale(
|
| 386 |
+
prev_op=module.attn.Wqkv,
|
| 387 |
+
layers=[module.attn.out_proj],
|
| 388 |
+
inp=input_feat["attn.out_proj"],
|
| 389 |
+
)
|
| 390 |
+
)
|
| 391 |
+
# fc1
|
| 392 |
+
scales_list.append(
|
| 393 |
+
_auto_get_scale(
|
| 394 |
+
prev_op=module.norm_2,
|
| 395 |
+
layers=[module.ffn.up_proj],
|
| 396 |
+
inp=input_feat["ffn.up_proj"],
|
| 397 |
+
module2inspect=module.ffn,
|
| 398 |
+
)
|
| 399 |
+
)
|
| 400 |
+
# fc2
|
| 401 |
+
scales_list.append(
|
| 402 |
+
_auto_get_scale(
|
| 403 |
+
prev_op=module.ffn.act,
|
| 404 |
+
layers=[module.ffn.down_proj],
|
| 405 |
+
inp=input_feat["ffn.down_proj"],
|
| 406 |
+
)
|
| 407 |
+
)
|
| 408 |
+
|
| 409 |
+
elif "falcon" in str(module.__class__).lower():
|
| 410 |
+
# attn out
|
| 411 |
+
# Haotian: TBD: need to handle repeated scales for MQ
|
| 412 |
+
"""
|
| 413 |
+
scales_list.append(_auto_get_scale(
|
| 414 |
+
prev_op=module.self_attention.query_key_value,
|
| 415 |
+
layers=[module.self_attention.dense],
|
| 416 |
+
inp=input_feat['self_attention.dense'],
|
| 417 |
+
))
|
| 418 |
+
"""
|
| 419 |
+
# fc1, as long as it is scaled, everything is screwed up
|
| 420 |
+
if "falcon-7b" in str(module.__class__).lower():
|
| 421 |
+
scales_list.append(
|
| 422 |
+
_auto_get_scale(
|
| 423 |
+
prev_op=module.input_layernorm,
|
| 424 |
+
layers=[
|
| 425 |
+
module.mlp.dense_h_to_4h,
|
| 426 |
+
module.self_attention.query_key_value,
|
| 427 |
+
],
|
| 428 |
+
inp=input_feat["self_attention.query_key_value"],
|
| 429 |
+
module2inspect=module,
|
| 430 |
+
kwargs=module_kwargs,
|
| 431 |
+
)
|
| 432 |
+
)
|
| 433 |
+
elif "falcon-40b" in str(module.__class__).lower():
|
| 434 |
+
scales_list.append(
|
| 435 |
+
_auto_get_scale(
|
| 436 |
+
prev_op=module.ln_attn,
|
| 437 |
+
layers=[module.self_attention.query_key_value],
|
| 438 |
+
inp=input_feat["self_attention.query_key_value"],
|
| 439 |
+
module2inspect=module,
|
| 440 |
+
kwargs=module_kwargs,
|
| 441 |
+
)
|
| 442 |
+
)
|
| 443 |
+
scales_list.append(
|
| 444 |
+
_auto_get_scale(
|
| 445 |
+
prev_op=module.ln_mlp,
|
| 446 |
+
layers=[module.mlp.dense_h_to_4h],
|
| 447 |
+
inp=input_feat["mlp.dense_h_to_4h"],
|
| 448 |
+
module2inspect=module,
|
| 449 |
+
kwargs=module_kwargs,
|
| 450 |
+
)
|
| 451 |
+
)
|
| 452 |
+
else:
|
| 453 |
+
raise NotImplementedError(
|
| 454 |
+
"Unknown Falcon architecture, currently only falcon-7b and falcon-40b are supported"
|
| 455 |
+
)
|
| 456 |
+
# fc2
|
| 457 |
+
scales_list.append(
|
| 458 |
+
_auto_get_scale(
|
| 459 |
+
prev_op=module.mlp.act,
|
| 460 |
+
layers=[module.mlp.dense_4h_to_h],
|
| 461 |
+
inp=input_feat["mlp.dense_4h_to_h"],
|
| 462 |
+
)
|
| 463 |
+
)
|
| 464 |
+
elif "bigcode" in str(module.__class__).lower():
|
| 465 |
+
scales_list.append(
|
| 466 |
+
_auto_get_scale(
|
| 467 |
+
prev_op=module.ln_1,
|
| 468 |
+
layers=[module.attn.c_attn],
|
| 469 |
+
inp=input_feat["attn.c_attn"],
|
| 470 |
+
module2inspect=module.attn,
|
| 471 |
+
kwargs=module_kwargs,
|
| 472 |
+
)
|
| 473 |
+
)
|
| 474 |
+
# fc1
|
| 475 |
+
scales_list.append(
|
| 476 |
+
_auto_get_scale(
|
| 477 |
+
prev_op=module.ln_2,
|
| 478 |
+
layers=[module.mlp.c_fc],
|
| 479 |
+
inp=input_feat["mlp.c_fc"],
|
| 480 |
+
module2inspect=module.mlp,
|
| 481 |
+
)
|
| 482 |
+
)
|
| 483 |
+
# fc2
|
| 484 |
+
scales_list.append(
|
| 485 |
+
_auto_get_scale(
|
| 486 |
+
prev_op=module.mlp.act,
|
| 487 |
+
layers=[module.mlp.c_proj],
|
| 488 |
+
inp=input_feat["mlp.c_proj"],
|
| 489 |
+
)
|
| 490 |
+
)
|
| 491 |
+
elif "neox" in str(module.__class__).lower():
|
| 492 |
+
scales_list.append(
|
| 493 |
+
_auto_get_scale(
|
| 494 |
+
prev_op=module.input_layernorm,
|
| 495 |
+
layers=[module.attention.query_key_value],
|
| 496 |
+
inp=input_feat["attention.query_key_value"],
|
| 497 |
+
module2inspect=module.attention,
|
| 498 |
+
kwargs=module_kwargs,
|
| 499 |
+
)
|
| 500 |
+
)
|
| 501 |
+
# fc1
|
| 502 |
+
scales_list.append(
|
| 503 |
+
_auto_get_scale(
|
| 504 |
+
prev_op=module.post_attention_layernorm,
|
| 505 |
+
layers=[module.mlp.dense_h_to_4h],
|
| 506 |
+
inp=input_feat["mlp.dense_h_to_4h"],
|
| 507 |
+
module2inspect=module.mlp,
|
| 508 |
+
)
|
| 509 |
+
)
|
| 510 |
+
# fc2
|
| 511 |
+
scales_list.append(
|
| 512 |
+
_auto_get_scale(
|
| 513 |
+
prev_op=module.mlp.act,
|
| 514 |
+
layers=[module.mlp.dense_4h_to_h],
|
| 515 |
+
inp=input_feat["mlp.dense_4h_to_h"],
|
| 516 |
+
)
|
| 517 |
+
)
|
| 518 |
+
elif module.__class__.__name__ == "Qwen2DecoderLayer":
|
| 519 |
+
# attention input
|
| 520 |
+
scales_list.append(
|
| 521 |
+
_auto_get_scale_wa(
|
| 522 |
+
prev_op=module.input_layernorm,
|
| 523 |
+
layers=[
|
| 524 |
+
module.self_attn.q_proj,
|
| 525 |
+
module.self_attn.k_proj,
|
| 526 |
+
module.self_attn.v_proj,
|
| 527 |
+
],
|
| 528 |
+
layers_name=[
|
| 529 |
+
"q_proj",
|
| 530 |
+
"k_proj",
|
| 531 |
+
"v_proj",
|
| 532 |
+
],
|
| 533 |
+
inp=input_feat["self_attn.q_proj"],
|
| 534 |
+
reweight_ratio=reweight_ratio_dict["attn"],
|
| 535 |
+
module2inspect=module.self_attn,
|
| 536 |
+
kwargs=module_kwargs,
|
| 537 |
+
)
|
| 538 |
+
)
|
| 539 |
+
# attn out
|
| 540 |
+
# Please refer to https://github.com/mit-han-lab/llm-awq/pull/67#issue-1850622696
|
| 541 |
+
if module.self_attn.v_proj.weight.shape == module.self_attn.o_proj.weight.shape:
|
| 542 |
+
scales_list.append(
|
| 543 |
+
_auto_get_scale_wa(
|
| 544 |
+
prev_op=module.self_attn.v_proj,
|
| 545 |
+
layers=[module.self_attn.o_proj],
|
| 546 |
+
layers_name=["o_proj"],
|
| 547 |
+
inp=input_feat["self_attn.o_proj"],
|
| 548 |
+
reweight_ratio=reweight_ratio_dict["attn"],
|
| 549 |
+
)
|
| 550 |
+
)
|
| 551 |
+
# fc1
|
| 552 |
+
scales_list.append(
|
| 553 |
+
_auto_get_scale_wa(
|
| 554 |
+
prev_op=module.post_attention_layernorm,
|
| 555 |
+
layers=[module.mlp.gate_proj, module.mlp.up_proj],
|
| 556 |
+
layers_name=["gate_proj", "up_proj"],
|
| 557 |
+
inp=input_feat["mlp.gate_proj"],
|
| 558 |
+
reweight_ratio=reweight_ratio_dict["mlp"],
|
| 559 |
+
module2inspect=module.mlp,
|
| 560 |
+
)
|
| 561 |
+
)
|
| 562 |
+
# fc2
|
| 563 |
+
scales_list.append(
|
| 564 |
+
_auto_get_scale_wa(
|
| 565 |
+
prev_op=module.mlp.up_proj,
|
| 566 |
+
layers=[module.mlp.down_proj],
|
| 567 |
+
layers_name=["down_proj"],
|
| 568 |
+
inp=input_feat["mlp.down_proj"],
|
| 569 |
+
reweight_ratio=reweight_ratio_dict["mlp"],
|
| 570 |
+
)
|
| 571 |
+
)
|
| 572 |
+
elif module.__class__.__name__ == "InternLM2DecoderLayer":
|
| 573 |
+
# attention input
|
| 574 |
+
scales_list.append(
|
| 575 |
+
_auto_get_scale_wa(
|
| 576 |
+
prev_op=module.attention_norm,
|
| 577 |
+
layers=[
|
| 578 |
+
module.attention.wqkv,
|
| 579 |
+
],
|
| 580 |
+
layers_name=[
|
| 581 |
+
"wqkv",
|
| 582 |
+
],
|
| 583 |
+
inp=input_feat["attention.wqkv"],
|
| 584 |
+
reweight_ratio=reweight_ratio_dict["attn"],
|
| 585 |
+
module2inspect=module.attention,
|
| 586 |
+
kwargs=module_kwargs,
|
| 587 |
+
)
|
| 588 |
+
)
|
| 589 |
+
# attn out
|
| 590 |
+
# Please refer to https://github.com/mit-han-lab/llm-awq/pull/67#issue-1850622696
|
| 591 |
+
if module.attention.wqkv.weight.shape == module.attention.wo.weight.shape:
|
| 592 |
+
scales_list.append(
|
| 593 |
+
_auto_get_scale_wa(
|
| 594 |
+
prev_op=module.attention.wqkv,
|
| 595 |
+
layers=[module.attention.wo],
|
| 596 |
+
layers_name=["wo"],
|
| 597 |
+
inp=input_feat["attention.wo"],
|
| 598 |
+
reweight_ratio=reweight_ratio_dict["attn"],
|
| 599 |
+
)
|
| 600 |
+
)
|
| 601 |
+
# fc1
|
| 602 |
+
scales_list.append(
|
| 603 |
+
_auto_get_scale_wa(
|
| 604 |
+
prev_op=module.ffn_norm,
|
| 605 |
+
layers=[module.feed_forward.w1, module.feed_forward.w3],
|
| 606 |
+
layers_name=["w1","w3"],
|
| 607 |
+
inp=input_feat["feed_forward.w1"],
|
| 608 |
+
reweight_ratio=reweight_ratio_dict["mlp"],
|
| 609 |
+
module2inspect=module.feed_forward,
|
| 610 |
+
)
|
| 611 |
+
)
|
| 612 |
+
# fc2
|
| 613 |
+
scales_list.append(
|
| 614 |
+
_auto_get_scale_wa(
|
| 615 |
+
prev_op=module.feed_forward.w3,
|
| 616 |
+
layers=[module.feed_forward.w2],
|
| 617 |
+
layers_name=["w2"],
|
| 618 |
+
inp=input_feat["feed_forward.w2"],
|
| 619 |
+
reweight_ratio=reweight_ratio_dict["mlp"],
|
| 620 |
+
)
|
| 621 |
+
)
|
| 622 |
+
|
| 623 |
+
elif module.__class__.__name__ == "Qwen2VLDecoderLayer":
|
| 624 |
+
# attention input
|
| 625 |
+
scales_list.append(
|
| 626 |
+
_auto_get_scale_wa(
|
| 627 |
+
prev_op=module.input_layernorm,
|
| 628 |
+
layers=[
|
| 629 |
+
module.self_attn.q_proj,
|
| 630 |
+
module.self_attn.k_proj,
|
| 631 |
+
module.self_attn.v_proj,
|
| 632 |
+
],
|
| 633 |
+
layers_name=[
|
| 634 |
+
"q_proj",
|
| 635 |
+
"k_proj",
|
| 636 |
+
"v_proj",
|
| 637 |
+
],
|
| 638 |
+
inp=input_feat["self_attn.q_proj"],
|
| 639 |
+
reweight_ratio=reweight_ratio_dict["attn"],
|
| 640 |
+
module2inspect=module.self_attn,
|
| 641 |
+
kwargs=module_kwargs,
|
| 642 |
+
)
|
| 643 |
+
)
|
| 644 |
+
# attn out
|
| 645 |
+
# Please refer to https://github.com/mit-han-lab/llm-awq/pull/67#issue-1850622696
|
| 646 |
+
if module.self_attn.v_proj.weight.shape == module.self_attn.o_proj.weight.shape:
|
| 647 |
+
scales_list.append(
|
| 648 |
+
_auto_get_scale_wa(
|
| 649 |
+
prev_op=module.self_attn.v_proj,
|
| 650 |
+
layers=[module.self_attn.o_proj],
|
| 651 |
+
layers_name=["o_proj"],
|
| 652 |
+
inp=input_feat["self_attn.o_proj"],
|
| 653 |
+
reweight_ratio=reweight_ratio_dict["attn"],
|
| 654 |
+
)
|
| 655 |
+
)
|
| 656 |
+
# fc1
|
| 657 |
+
scales_list.append(
|
| 658 |
+
_auto_get_scale_wa(
|
| 659 |
+
prev_op=module.post_attention_layernorm,
|
| 660 |
+
layers=[module.mlp.gate_proj, module.mlp.up_proj],
|
| 661 |
+
layers_name=["gate_proj", "up_proj"],
|
| 662 |
+
inp=input_feat["mlp.gate_proj"],
|
| 663 |
+
reweight_ratio=reweight_ratio_dict["mlp"],
|
| 664 |
+
module2inspect=module.mlp,
|
| 665 |
+
)
|
| 666 |
+
)
|
| 667 |
+
# fc2
|
| 668 |
+
scales_list.append(
|
| 669 |
+
_auto_get_scale_wa(
|
| 670 |
+
prev_op=module.mlp.up_proj,
|
| 671 |
+
layers=[module.mlp.down_proj],
|
| 672 |
+
layers_name=["down_proj"],
|
| 673 |
+
inp=input_feat["mlp.down_proj"],
|
| 674 |
+
reweight_ratio=reweight_ratio_dict["mlp"],
|
| 675 |
+
)
|
| 676 |
+
)
|
| 677 |
+
else:
|
| 678 |
+
raise NotImplementedError(f"{type(module)} not supported yet!")
|
| 679 |
+
|
| 680 |
+
return scales_list
|
| 681 |
+
|
qmllm/methods/mbq/quantize/auto_scale_wa_distort.py
ADDED
|
@@ -0,0 +1,840 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gc
|
| 2 |
+
import copy
|
| 3 |
+
import torch
|
| 4 |
+
import functools
|
| 5 |
+
import torch.nn as nn
|
| 6 |
+
|
| 7 |
+
from transformers.models.bloom.modeling_bloom import BloomBlock, BloomGelu
|
| 8 |
+
from transformers.models.opt.modeling_opt import OPTDecoderLayer
|
| 9 |
+
from transformers.models.llama.modeling_llama import LlamaDecoderLayer, LlamaRMSNorm
|
| 10 |
+
from transformers.activations import GELUActivation
|
| 11 |
+
|
| 12 |
+
from collections import defaultdict
|
| 13 |
+
from transformers.models.qwen2.modeling_qwen2 import Qwen2RMSNorm
|
| 14 |
+
|
| 15 |
+
from .qmodule import ScaledActivation
|
| 16 |
+
from .quantizer import get_module_by_name_suffix
|
| 17 |
+
from qmllm.utils.search import get_op_by_name, get_op_name, set_op_by_name
|
| 18 |
+
from qmllm.quantization.quant_funcs import pseudo_quantize_tensor
|
| 19 |
+
from qmllm.quantization.qlinear import WALinear
|
| 20 |
+
|
| 21 |
+
__all__ = ["auto_scale_block_wa_distort"]
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
@torch.no_grad()
|
| 25 |
+
def get_weight_scale(weight, q_group_size=-1):
|
| 26 |
+
org_shape = weight.shape
|
| 27 |
+
if q_group_size > 0:
|
| 28 |
+
weight = weight.view(-1, q_group_size)
|
| 29 |
+
scale = weight.abs() / weight.abs().amax(dim=1, keepdim=True)
|
| 30 |
+
scale = scale.view(org_shape)
|
| 31 |
+
scale = scale.mean(0)
|
| 32 |
+
return scale
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
@torch.no_grad()
|
| 36 |
+
def get_act_scale(x):
|
| 37 |
+
return x.abs().view(-1, x.shape[-1]).mean(0)
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
@torch.no_grad()
|
| 41 |
+
def scale_ln_fcs(ln, fcs, scales):
|
| 42 |
+
if not isinstance(fcs, list):
|
| 43 |
+
fcs = [fcs]
|
| 44 |
+
|
| 45 |
+
scales = scales.to(ln.weight.device)
|
| 46 |
+
|
| 47 |
+
ln.weight.div_(scales)
|
| 48 |
+
if hasattr(ln, "bias") and ln.bias is not None:
|
| 49 |
+
ln.bias.div_(scales)
|
| 50 |
+
|
| 51 |
+
for fc in fcs:
|
| 52 |
+
fc.weight.mul_(scales.view(1, -1))
|
| 53 |
+
|
| 54 |
+
for p in ln.parameters():
|
| 55 |
+
assert torch.isnan(p).sum() == 0
|
| 56 |
+
for fc in fcs:
|
| 57 |
+
for p in fc.parameters():
|
| 58 |
+
assert torch.isnan(p).sum() == 0
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
@torch.no_grad()
|
| 62 |
+
def scale_fc_fc(fc1, fc2, scales):
|
| 63 |
+
assert isinstance(fc1, nn.Linear)
|
| 64 |
+
assert isinstance(fc2, nn.Linear)
|
| 65 |
+
# assert fc1.out_features == fc2.in_features
|
| 66 |
+
|
| 67 |
+
scales = scales.to(fc1.weight.device)
|
| 68 |
+
|
| 69 |
+
# fc1.weight.div_(scales.view(-1, 1))
|
| 70 |
+
fc1.weight[-scales.size(0) :].div_(scales.view(-1, 1))
|
| 71 |
+
if fc1.bias is not None:
|
| 72 |
+
fc1.bias.div_(scales.view(-1))
|
| 73 |
+
|
| 74 |
+
fc2.weight.mul_(scales.view(1, -1))
|
| 75 |
+
|
| 76 |
+
for p in fc1.parameters():
|
| 77 |
+
assert torch.isnan(p).sum() == 0
|
| 78 |
+
for p in fc2.parameters():
|
| 79 |
+
assert torch.isnan(p).sum() == 0
|
| 80 |
+
|
| 81 |
+
|
| 82 |
+
@torch.no_grad()
|
| 83 |
+
def scale_gelu_fc(gelu, fc, scales):
|
| 84 |
+
assert isinstance(gelu, (nn.GELU, BloomGelu, GELUActivation))
|
| 85 |
+
assert isinstance(fc, nn.Linear)
|
| 86 |
+
|
| 87 |
+
fc.weight.mul_(scales.view(1, -1).to(fc.weight.device))
|
| 88 |
+
|
| 89 |
+
for p in fc.parameters():
|
| 90 |
+
assert torch.isnan(p).sum() == 0
|
| 91 |
+
|
| 92 |
+
|
| 93 |
+
@torch.no_grad()
|
| 94 |
+
def auto_scale_block_wa_distort(module, module_kwargs, w_bit, a_bit, q_config, input_feat, ans_mask, vis_mask, reweight_ratio_dict, q_input, loss_mode="mae"):
|
| 95 |
+
|
| 96 |
+
if "use_cache" in module_kwargs:
|
| 97 |
+
module_kwargs.pop("use_cache")
|
| 98 |
+
|
| 99 |
+
def _search_module_scale_wa_distort(block, linears2scale: list, layers_name, x, x_q, reweight_ratio, kwargs={}):
|
| 100 |
+
# w: co, ci
|
| 101 |
+
# x: n, ci
|
| 102 |
+
x = x.to(next(block.parameters()).device)
|
| 103 |
+
x_q = x_q.to(next(block.parameters()).device)
|
| 104 |
+
with torch.no_grad():
|
| 105 |
+
org_out = block(x, **kwargs)
|
| 106 |
+
if isinstance(org_out, tuple):
|
| 107 |
+
org_out = org_out[0]
|
| 108 |
+
|
| 109 |
+
x_max = get_act_scale(x_q)
|
| 110 |
+
|
| 111 |
+
best_error = float("inf")
|
| 112 |
+
best_ratio = -1
|
| 113 |
+
best_scales = None
|
| 114 |
+
|
| 115 |
+
n_grid = 20
|
| 116 |
+
history = []
|
| 117 |
+
|
| 118 |
+
org_sd = {k: v.cpu() for k, v in block.state_dict().items()}
|
| 119 |
+
for ratio in range(n_grid):
|
| 120 |
+
ratio = ratio * 1 / n_grid
|
| 121 |
+
scales = x_max.pow(ratio).clamp(min=1e-4).view(-1)
|
| 122 |
+
scales = scales / (scales.max() * scales.min()).sqrt()
|
| 123 |
+
|
| 124 |
+
if isinstance(block, nn.Linear):
|
| 125 |
+
new_block = None
|
| 126 |
+
for fc, fc_name in zip(linears2scale, layers_name):
|
| 127 |
+
# fc.weight.mul_(scales.view(1, -1).to(fc.weight.device))
|
| 128 |
+
# fc.weight.data = w_quantize_func(fc.weight.data) / (scales.view(1, -1))
|
| 129 |
+
fc.weight.mul_(scales.view(1, -1).to(fc.weight.device))
|
| 130 |
+
new_fc = WALinear.from_float(fc, weight_quant="per_channel", act_quant="per_token", w_bit=w_bit, a_bit=a_bit)
|
| 131 |
+
|
| 132 |
+
if isinstance(block, nn.Linear):
|
| 133 |
+
new_block = copy.deepcopy(new_fc)
|
| 134 |
+
else:
|
| 135 |
+
setattr(block, fc_name, new_fc)
|
| 136 |
+
|
| 137 |
+
del new_fc
|
| 138 |
+
torch.cuda.empty_cache()
|
| 139 |
+
|
| 140 |
+
x_scale = x_q / (scales.view(1, 1, -1))
|
| 141 |
+
|
| 142 |
+
if isinstance(block, nn.Linear):
|
| 143 |
+
out = new_block(x_scale, **kwargs)
|
| 144 |
+
else:
|
| 145 |
+
out = block(x_scale, **kwargs)
|
| 146 |
+
|
| 147 |
+
if isinstance(out, tuple):
|
| 148 |
+
out = out[0]
|
| 149 |
+
|
| 150 |
+
# loss = (
|
| 151 |
+
# (org_out - out).float().pow(2).mean().item()
|
| 152 |
+
# ) # float prevents overflow
|
| 153 |
+
|
| 154 |
+
if loss_mode == "mse":
|
| 155 |
+
if ans_mask is not None and vis_mask is not None:
|
| 156 |
+
ans_mask_expand = ans_mask.unsqueeze(-1).expand_as(out)
|
| 157 |
+
vis_mask_expand = vis_mask.unsqueeze(-1).expand_as(out).cuda()
|
| 158 |
+
masked_diff_ans = ((org_out - out).float().pow(2) * ans_mask_expand)
|
| 159 |
+
masked_diff_vis = ((org_out - out).float().pow(2) * vis_mask_expand)
|
| 160 |
+
if reweight_ratio is not None:
|
| 161 |
+
loss = masked_diff_ans.sum() / ans_mask_expand.sum() + reweight_ratio * (masked_diff_vis.sum() / vis_mask_expand.sum())
|
| 162 |
+
else:
|
| 163 |
+
loss = (
|
| 164 |
+
(org_out - out).float().pow(2).mean().item()
|
| 165 |
+
)
|
| 166 |
+
elif ans_mask is not None and vis_mask is None:
|
| 167 |
+
ans_mask_expand = ans_mask.unsqueeze(-1).expand_as(out)
|
| 168 |
+
masked_diff = ((org_out - out).float().pow(2) * ans_mask_expand)
|
| 169 |
+
loss = masked_diff.sum() / ans_mask_expand.sum()
|
| 170 |
+
else:
|
| 171 |
+
loss = (
|
| 172 |
+
(org_out - out).float().pow(2).mean().item()
|
| 173 |
+
) # float prevents overflow
|
| 174 |
+
elif loss_mode == "mae":
|
| 175 |
+
if ans_mask is not None and vis_mask is not None:
|
| 176 |
+
ans_mask_expand = ans_mask.unsqueeze(-1).expand_as(out)
|
| 177 |
+
vis_mask_expand = vis_mask.unsqueeze(-1).expand_as(out).cuda()
|
| 178 |
+
masked_diff_ans = ((org_out - out).float().abs() * ans_mask_expand)
|
| 179 |
+
masked_diff_vis = ((org_out - out).float().abs() * vis_mask_expand)
|
| 180 |
+
if reweight_ratio is not None:
|
| 181 |
+
loss = (masked_diff_ans.sum() + reweight_ratio * masked_diff_vis.sum()) / (ans_mask_expand.sum() + vis_mask_expand.sum())
|
| 182 |
+
else:
|
| 183 |
+
loss = (
|
| 184 |
+
(org_out - out).float().abs().mean().item()
|
| 185 |
+
)
|
| 186 |
+
elif ans_mask is not None and vis_mask is None:
|
| 187 |
+
ans_mask_expand = ans_mask.unsqueeze(-1).expand_as(out)
|
| 188 |
+
masked_diff = ((org_out - out).float().abs() * ans_mask_expand)
|
| 189 |
+
loss = masked_diff.sum() / ans_mask_expand.sum()
|
| 190 |
+
else:
|
| 191 |
+
loss = (
|
| 192 |
+
(org_out - out).float().abs().mean().item()
|
| 193 |
+
) # float prevents overflow
|
| 194 |
+
|
| 195 |
+
history.append(loss)
|
| 196 |
+
is_best = loss < best_error
|
| 197 |
+
if is_best:
|
| 198 |
+
best_error = loss
|
| 199 |
+
best_ratio = ratio
|
| 200 |
+
best_scales = scales
|
| 201 |
+
|
| 202 |
+
# restore the block
|
| 203 |
+
for fc, fc_name in zip(linears2scale, layers_name):
|
| 204 |
+
if isinstance(block, nn.Linear):
|
| 205 |
+
continue
|
| 206 |
+
else:
|
| 207 |
+
setattr(block, fc_name, fc)
|
| 208 |
+
|
| 209 |
+
if isinstance(block, nn.Linear):
|
| 210 |
+
del new_block
|
| 211 |
+
torch.cuda.empty_cache()
|
| 212 |
+
block.load_state_dict(org_sd)
|
| 213 |
+
if best_ratio == -1:
|
| 214 |
+
print(history)
|
| 215 |
+
raise Exception
|
| 216 |
+
# print(best_ratio)
|
| 217 |
+
best_scales = best_scales.view(-1)
|
| 218 |
+
|
| 219 |
+
assert torch.isnan(best_scales).sum() == 0, best_scales
|
| 220 |
+
return best_scales.detach()
|
| 221 |
+
|
| 222 |
+
def _auto_get_scale_wa_distort(prev_op, layers, layers_name, inp, inp_q, reweight_ratio, module2inspect=None, kwargs={}):
|
| 223 |
+
# module2inspect: if given, we will check the output diff of this module instead of layers
|
| 224 |
+
if module2inspect is None:
|
| 225 |
+
assert len(layers) == 1
|
| 226 |
+
module2inspect = layers[0]
|
| 227 |
+
|
| 228 |
+
scales = _search_module_scale_wa_distort(module2inspect, layers, layers_name, inp, inp_q, reweight_ratio, kwargs)
|
| 229 |
+
scales = scales.detach().cpu()
|
| 230 |
+
# prev_op_name, [layer_name], scale
|
| 231 |
+
return (
|
| 232 |
+
get_op_name(module, prev_op),
|
| 233 |
+
tuple([get_op_name(module, m) for m in layers]),
|
| 234 |
+
scales,
|
| 235 |
+
)
|
| 236 |
+
|
| 237 |
+
scales_list = [] # return the searched scales
|
| 238 |
+
|
| 239 |
+
|
| 240 |
+
def _auto_get_input_feat_distort(inps_q, scales_list=None):
|
| 241 |
+
|
| 242 |
+
# org_sd = {k: v.cpu() for k, v in module.state_dict().items()}
|
| 243 |
+
|
| 244 |
+
new_module = copy.deepcopy(module)
|
| 245 |
+
|
| 246 |
+
named_linears = {name: m for name, m in new_module.named_modules() if isinstance(m, nn.Linear)}
|
| 247 |
+
|
| 248 |
+
if scales_list is not None:
|
| 249 |
+
apply_scale(new_module, scales_list)
|
| 250 |
+
new_module.cuda()
|
| 251 |
+
for n, m in named_linears.items():
|
| 252 |
+
new_linear = WALinear.from_float(m, weight_quant="per_channel", act_quant="per_token", w_bit=w_bit, a_bit=a_bit)
|
| 253 |
+
father_module = get_module_by_name_suffix(new_module, '.'.join(n.split(".")[:-1]))
|
| 254 |
+
setattr(father_module, n.split('.')[-1], new_linear)
|
| 255 |
+
del new_linear, m
|
| 256 |
+
torch.cuda.empty_cache()
|
| 257 |
+
|
| 258 |
+
named_linears = {name: m for name, m in new_module.named_modules() if isinstance(m, WALinear)}
|
| 259 |
+
|
| 260 |
+
|
| 261 |
+
def cache_input_hook(m, x, y, name, feat_dict):
|
| 262 |
+
x = x[0]
|
| 263 |
+
x = x.detach().cpu()
|
| 264 |
+
feat_dict[name].append(x)
|
| 265 |
+
|
| 266 |
+
input_feat_q = defaultdict(list)
|
| 267 |
+
handles = []
|
| 268 |
+
for name in named_linears:
|
| 269 |
+
handles.append(
|
| 270 |
+
named_linears[name].register_forward_hook(
|
| 271 |
+
functools.partial(cache_input_hook, name=name, feat_dict=input_feat_q)
|
| 272 |
+
)
|
| 273 |
+
)
|
| 274 |
+
|
| 275 |
+
inps_q = inps_q.to(next(new_module.parameters()).device)
|
| 276 |
+
new_module(inps_q, **module_kwargs)
|
| 277 |
+
for h in handles:
|
| 278 |
+
h.remove()
|
| 279 |
+
|
| 280 |
+
input_feat_q = {k: torch.cat(v, dim=0) for k, v in input_feat_q.items()}
|
| 281 |
+
|
| 282 |
+
del new_module
|
| 283 |
+
torch.cuda.empty_cache()
|
| 284 |
+
|
| 285 |
+
# module.load_state_dict(org_sd)
|
| 286 |
+
|
| 287 |
+
return input_feat_q
|
| 288 |
+
|
| 289 |
+
|
| 290 |
+
if isinstance(module, OPTDecoderLayer):
|
| 291 |
+
# attention input
|
| 292 |
+
scales_list.append(
|
| 293 |
+
_auto_get_scale(
|
| 294 |
+
prev_op=module.self_attn_layer_norm,
|
| 295 |
+
layers=[
|
| 296 |
+
module.self_attn.q_proj,
|
| 297 |
+
module.self_attn.k_proj,
|
| 298 |
+
module.self_attn.v_proj,
|
| 299 |
+
],
|
| 300 |
+
inp=input_feat["self_attn.q_proj"],
|
| 301 |
+
module2inspect=module.self_attn,
|
| 302 |
+
kwargs=module_kwargs,
|
| 303 |
+
)
|
| 304 |
+
)
|
| 305 |
+
# attn out
|
| 306 |
+
scales_list.append(
|
| 307 |
+
_auto_get_scale(
|
| 308 |
+
prev_op=module.self_attn.v_proj,
|
| 309 |
+
layers=[module.self_attn.out_proj],
|
| 310 |
+
inp=input_feat["self_attn.out_proj"],
|
| 311 |
+
)
|
| 312 |
+
)
|
| 313 |
+
# fc1
|
| 314 |
+
scales_list.append(
|
| 315 |
+
_auto_get_scale(
|
| 316 |
+
prev_op=module.final_layer_norm,
|
| 317 |
+
layers=[module.fc1],
|
| 318 |
+
inp=input_feat["fc1"],
|
| 319 |
+
)
|
| 320 |
+
)
|
| 321 |
+
# fc2
|
| 322 |
+
scales_list.append(
|
| 323 |
+
_auto_get_scale(
|
| 324 |
+
prev_op=module.fc1,
|
| 325 |
+
layers=[module.fc2],
|
| 326 |
+
inp=input_feat["fc2"],
|
| 327 |
+
)
|
| 328 |
+
)
|
| 329 |
+
|
| 330 |
+
elif isinstance(module, LlamaDecoderLayer):
|
| 331 |
+
# attention input
|
| 332 |
+
input_feat_q = _auto_get_input_feat_distort(
|
| 333 |
+
inps_q=q_input,
|
| 334 |
+
)
|
| 335 |
+
scales_list.append(
|
| 336 |
+
_auto_get_scale_wa_distort(
|
| 337 |
+
prev_op=module.input_layernorm,
|
| 338 |
+
layers=[
|
| 339 |
+
module.self_attn.q_proj,
|
| 340 |
+
module.self_attn.k_proj,
|
| 341 |
+
module.self_attn.v_proj,
|
| 342 |
+
],
|
| 343 |
+
layers_name=[
|
| 344 |
+
"q_proj",
|
| 345 |
+
"k_proj",
|
| 346 |
+
"v_proj",
|
| 347 |
+
],
|
| 348 |
+
inp=input_feat["self_attn.q_proj"],
|
| 349 |
+
inp_q=input_feat_q["self_attn.q_proj"],
|
| 350 |
+
reweight_ratio=reweight_ratio_dict["attn"],
|
| 351 |
+
module2inspect=module.self_attn,
|
| 352 |
+
kwargs=module_kwargs,
|
| 353 |
+
)
|
| 354 |
+
)
|
| 355 |
+
# attn out
|
| 356 |
+
# Please refer to https://github.com/mit-han-lab/llm-awq/pull/67#issue-1850622696
|
| 357 |
+
input_feat_q = _auto_get_input_feat_distort(
|
| 358 |
+
inps_q=q_input,
|
| 359 |
+
scales_list=scales_list,
|
| 360 |
+
)
|
| 361 |
+
if module.self_attn.v_proj.weight.shape == module.self_attn.o_proj.weight.shape:
|
| 362 |
+
scales_list.append(
|
| 363 |
+
_auto_get_scale_wa_distort(
|
| 364 |
+
prev_op=module.self_attn.v_proj,
|
| 365 |
+
layers=[module.self_attn.o_proj],
|
| 366 |
+
layers_name=["o_proj"],
|
| 367 |
+
inp=input_feat["self_attn.o_proj"],
|
| 368 |
+
inp_q=input_feat_q["self_attn.o_proj"],
|
| 369 |
+
reweight_ratio=reweight_ratio_dict["attn"],
|
| 370 |
+
)
|
| 371 |
+
)
|
| 372 |
+
# fc1
|
| 373 |
+
input_feat_q = _auto_get_input_feat_distort(
|
| 374 |
+
inps_q=q_input,
|
| 375 |
+
scales_list=scales_list,
|
| 376 |
+
)
|
| 377 |
+
scales_list.append(
|
| 378 |
+
_auto_get_scale_wa_distort(
|
| 379 |
+
prev_op=module.post_attention_layernorm,
|
| 380 |
+
layers=[module.mlp.gate_proj, module.mlp.up_proj],
|
| 381 |
+
layers_name=["gate_proj", "up_proj"],
|
| 382 |
+
inp=input_feat["mlp.gate_proj"],
|
| 383 |
+
inp_q=input_feat_q["mlp.gate_proj"],
|
| 384 |
+
reweight_ratio=reweight_ratio_dict["mlp"],
|
| 385 |
+
module2inspect=module.mlp,
|
| 386 |
+
)
|
| 387 |
+
)
|
| 388 |
+
# fc2
|
| 389 |
+
input_feat_q = _auto_get_input_feat_distort(
|
| 390 |
+
inps_q=q_input,
|
| 391 |
+
scales_list=scales_list,
|
| 392 |
+
)
|
| 393 |
+
scales_list.append(
|
| 394 |
+
_auto_get_scale_wa_distort(
|
| 395 |
+
prev_op=module.mlp.up_proj,
|
| 396 |
+
layers=[module.mlp.down_proj],
|
| 397 |
+
layers_name=["down_proj"],
|
| 398 |
+
inp=input_feat["mlp.down_proj"],
|
| 399 |
+
inp_q=input_feat_q["mlp.down_proj"],
|
| 400 |
+
reweight_ratio=reweight_ratio_dict["mlp"],
|
| 401 |
+
)
|
| 402 |
+
)
|
| 403 |
+
|
| 404 |
+
elif isinstance(module, BloomBlock):
|
| 405 |
+
# attention input
|
| 406 |
+
scales_list.append(
|
| 407 |
+
_auto_get_scale(
|
| 408 |
+
prev_op=module.input_layernorm,
|
| 409 |
+
layers=[module.self_attention.query_key_value],
|
| 410 |
+
inp=input_feat["self_attention.query_key_value"],
|
| 411 |
+
module2inspect=module,
|
| 412 |
+
kwargs=module_kwargs,
|
| 413 |
+
)
|
| 414 |
+
)
|
| 415 |
+
# attn out
|
| 416 |
+
# Please refer to https://github.com/mit-han-lab/llm-awq/issues/2#issuecomment-1606297469
|
| 417 |
+
"""
|
| 418 |
+
scales_list.append(_auto_get_scale(
|
| 419 |
+
prev_op=module.self_attention.query_key_value,
|
| 420 |
+
layers=[module.self_attention.dense],
|
| 421 |
+
inp=input_feat['self_attention.dense'],
|
| 422 |
+
))
|
| 423 |
+
"""
|
| 424 |
+
# fc1
|
| 425 |
+
scales_list.append(
|
| 426 |
+
_auto_get_scale(
|
| 427 |
+
prev_op=module.post_attention_layernorm,
|
| 428 |
+
layers=[module.mlp.dense_h_to_4h],
|
| 429 |
+
inp=input_feat["mlp.dense_h_to_4h"],
|
| 430 |
+
module2inspect=module,
|
| 431 |
+
kwargs=module_kwargs,
|
| 432 |
+
)
|
| 433 |
+
)
|
| 434 |
+
# fc2
|
| 435 |
+
scales_list.append(
|
| 436 |
+
_auto_get_scale(
|
| 437 |
+
prev_op=module.mlp.gelu_impl,
|
| 438 |
+
layers=[module.mlp.dense_4h_to_h],
|
| 439 |
+
inp=input_feat["mlp.dense_4h_to_h"],
|
| 440 |
+
)
|
| 441 |
+
)
|
| 442 |
+
elif "mpt" in str(module.__class__).lower():
|
| 443 |
+
# attention input
|
| 444 |
+
scales_list.append(
|
| 445 |
+
_auto_get_scale(
|
| 446 |
+
prev_op=module.norm_1,
|
| 447 |
+
layers=[module.attn.Wqkv],
|
| 448 |
+
inp=input_feat["attn.Wqkv"],
|
| 449 |
+
module2inspect=module.attn,
|
| 450 |
+
kwargs=module_kwargs,
|
| 451 |
+
)
|
| 452 |
+
)
|
| 453 |
+
|
| 454 |
+
# attn out
|
| 455 |
+
scales_list.append(
|
| 456 |
+
_auto_get_scale(
|
| 457 |
+
prev_op=module.attn.Wqkv,
|
| 458 |
+
layers=[module.attn.out_proj],
|
| 459 |
+
inp=input_feat["attn.out_proj"],
|
| 460 |
+
)
|
| 461 |
+
)
|
| 462 |
+
# fc1
|
| 463 |
+
scales_list.append(
|
| 464 |
+
_auto_get_scale(
|
| 465 |
+
prev_op=module.norm_2,
|
| 466 |
+
layers=[module.ffn.up_proj],
|
| 467 |
+
inp=input_feat["ffn.up_proj"],
|
| 468 |
+
module2inspect=module.ffn,
|
| 469 |
+
)
|
| 470 |
+
)
|
| 471 |
+
# fc2
|
| 472 |
+
scales_list.append(
|
| 473 |
+
_auto_get_scale(
|
| 474 |
+
prev_op=module.ffn.act,
|
| 475 |
+
layers=[module.ffn.down_proj],
|
| 476 |
+
inp=input_feat["ffn.down_proj"],
|
| 477 |
+
)
|
| 478 |
+
)
|
| 479 |
+
|
| 480 |
+
elif "falcon" in str(module.__class__).lower():
|
| 481 |
+
# attn out
|
| 482 |
+
# Haotian: TBD: need to handle repeated scales for MQ
|
| 483 |
+
"""
|
| 484 |
+
scales_list.append(_auto_get_scale(
|
| 485 |
+
prev_op=module.self_attention.query_key_value,
|
| 486 |
+
layers=[module.self_attention.dense],
|
| 487 |
+
inp=input_feat['self_attention.dense'],
|
| 488 |
+
))
|
| 489 |
+
"""
|
| 490 |
+
# fc1, as long as it is scaled, everything is screwed up
|
| 491 |
+
if "falcon-7b" in str(module.__class__).lower():
|
| 492 |
+
scales_list.append(
|
| 493 |
+
_auto_get_scale(
|
| 494 |
+
prev_op=module.input_layernorm,
|
| 495 |
+
layers=[
|
| 496 |
+
module.mlp.dense_h_to_4h,
|
| 497 |
+
module.self_attention.query_key_value,
|
| 498 |
+
],
|
| 499 |
+
inp=input_feat["self_attention.query_key_value"],
|
| 500 |
+
module2inspect=module,
|
| 501 |
+
kwargs=module_kwargs,
|
| 502 |
+
)
|
| 503 |
+
)
|
| 504 |
+
elif "falcon-40b" in str(module.__class__).lower():
|
| 505 |
+
scales_list.append(
|
| 506 |
+
_auto_get_scale(
|
| 507 |
+
prev_op=module.ln_attn,
|
| 508 |
+
layers=[module.self_attention.query_key_value],
|
| 509 |
+
inp=input_feat["self_attention.query_key_value"],
|
| 510 |
+
module2inspect=module,
|
| 511 |
+
kwargs=module_kwargs,
|
| 512 |
+
)
|
| 513 |
+
)
|
| 514 |
+
scales_list.append(
|
| 515 |
+
_auto_get_scale(
|
| 516 |
+
prev_op=module.ln_mlp,
|
| 517 |
+
layers=[module.mlp.dense_h_to_4h],
|
| 518 |
+
inp=input_feat["mlp.dense_h_to_4h"],
|
| 519 |
+
module2inspect=module,
|
| 520 |
+
kwargs=module_kwargs,
|
| 521 |
+
)
|
| 522 |
+
)
|
| 523 |
+
else:
|
| 524 |
+
raise NotImplementedError(
|
| 525 |
+
"Unknown Falcon architecture, currently only falcon-7b and falcon-40b are supported"
|
| 526 |
+
)
|
| 527 |
+
# fc2
|
| 528 |
+
scales_list.append(
|
| 529 |
+
_auto_get_scale(
|
| 530 |
+
prev_op=module.mlp.act,
|
| 531 |
+
layers=[module.mlp.dense_4h_to_h],
|
| 532 |
+
inp=input_feat["mlp.dense_4h_to_h"],
|
| 533 |
+
)
|
| 534 |
+
)
|
| 535 |
+
elif "bigcode" in str(module.__class__).lower():
|
| 536 |
+
scales_list.append(
|
| 537 |
+
_auto_get_scale(
|
| 538 |
+
prev_op=module.ln_1,
|
| 539 |
+
layers=[module.attn.c_attn],
|
| 540 |
+
inp=input_feat["attn.c_attn"],
|
| 541 |
+
module2inspect=module.attn,
|
| 542 |
+
kwargs=module_kwargs,
|
| 543 |
+
)
|
| 544 |
+
)
|
| 545 |
+
# fc1
|
| 546 |
+
scales_list.append(
|
| 547 |
+
_auto_get_scale(
|
| 548 |
+
prev_op=module.ln_2,
|
| 549 |
+
layers=[module.mlp.c_fc],
|
| 550 |
+
inp=input_feat["mlp.c_fc"],
|
| 551 |
+
module2inspect=module.mlp,
|
| 552 |
+
)
|
| 553 |
+
)
|
| 554 |
+
# fc2
|
| 555 |
+
scales_list.append(
|
| 556 |
+
_auto_get_scale(
|
| 557 |
+
prev_op=module.mlp.act,
|
| 558 |
+
layers=[module.mlp.c_proj],
|
| 559 |
+
inp=input_feat["mlp.c_proj"],
|
| 560 |
+
)
|
| 561 |
+
)
|
| 562 |
+
elif "neox" in str(module.__class__).lower():
|
| 563 |
+
scales_list.append(
|
| 564 |
+
_auto_get_scale(
|
| 565 |
+
prev_op=module.input_layernorm,
|
| 566 |
+
layers=[module.attention.query_key_value],
|
| 567 |
+
inp=input_feat["attention.query_key_value"],
|
| 568 |
+
module2inspect=module.attention,
|
| 569 |
+
kwargs=module_kwargs,
|
| 570 |
+
)
|
| 571 |
+
)
|
| 572 |
+
# fc1
|
| 573 |
+
scales_list.append(
|
| 574 |
+
_auto_get_scale(
|
| 575 |
+
prev_op=module.post_attention_layernorm,
|
| 576 |
+
layers=[module.mlp.dense_h_to_4h],
|
| 577 |
+
inp=input_feat["mlp.dense_h_to_4h"],
|
| 578 |
+
module2inspect=module.mlp,
|
| 579 |
+
)
|
| 580 |
+
)
|
| 581 |
+
# fc2
|
| 582 |
+
scales_list.append(
|
| 583 |
+
_auto_get_scale(
|
| 584 |
+
prev_op=module.mlp.act,
|
| 585 |
+
layers=[module.mlp.dense_4h_to_h],
|
| 586 |
+
inp=input_feat["mlp.dense_4h_to_h"],
|
| 587 |
+
)
|
| 588 |
+
)
|
| 589 |
+
elif module.__class__.__name__ == "Qwen2DecoderLayer":
|
| 590 |
+
# attention input
|
| 591 |
+
input_feat_q = _auto_get_input_feat_distort(
|
| 592 |
+
inps_q=q_input,
|
| 593 |
+
)
|
| 594 |
+
scales_list.append(
|
| 595 |
+
_auto_get_scale_wa_distort(
|
| 596 |
+
prev_op=module.input_layernorm,
|
| 597 |
+
layers=[
|
| 598 |
+
module.self_attn.q_proj,
|
| 599 |
+
module.self_attn.k_proj,
|
| 600 |
+
module.self_attn.v_proj,
|
| 601 |
+
],
|
| 602 |
+
layers_name=[
|
| 603 |
+
"q_proj",
|
| 604 |
+
"k_proj",
|
| 605 |
+
"v_proj",
|
| 606 |
+
],
|
| 607 |
+
inp=input_feat["self_attn.q_proj"],
|
| 608 |
+
inp_q=input_feat_q["self_attn.q_proj"],
|
| 609 |
+
reweight_ratio=reweight_ratio_dict["attn"],
|
| 610 |
+
module2inspect=module.self_attn,
|
| 611 |
+
kwargs=module_kwargs,
|
| 612 |
+
)
|
| 613 |
+
)
|
| 614 |
+
# attn out
|
| 615 |
+
# Please refer to https://github.com/mit-han-lab/llm-awq/pull/67#issue-1850622696
|
| 616 |
+
input_feat_q = _auto_get_input_feat_distort(
|
| 617 |
+
inps_q=q_input,
|
| 618 |
+
scales_list=scales_list,
|
| 619 |
+
)
|
| 620 |
+
if module.self_attn.v_proj.weight.shape == module.self_attn.o_proj.weight.shape:
|
| 621 |
+
scales_list.append(
|
| 622 |
+
_auto_get_scale_wa_distort(
|
| 623 |
+
prev_op=module.self_attn.v_proj,
|
| 624 |
+
layers=[module.self_attn.o_proj],
|
| 625 |
+
layers_name=["o_proj"],
|
| 626 |
+
inp=input_feat["self_attn.o_proj"],
|
| 627 |
+
inp_q=input_feat_q["self_attn.o_proj"],
|
| 628 |
+
reweight_ratio=reweight_ratio_dict["attn"],
|
| 629 |
+
)
|
| 630 |
+
)
|
| 631 |
+
# fc1
|
| 632 |
+
input_feat_q = _auto_get_input_feat_distort(
|
| 633 |
+
inps_q=q_input,
|
| 634 |
+
scales_list=scales_list,
|
| 635 |
+
)
|
| 636 |
+
scales_list.append(
|
| 637 |
+
_auto_get_scale_wa_distort(
|
| 638 |
+
prev_op=module.post_attention_layernorm,
|
| 639 |
+
layers=[module.mlp.gate_proj, module.mlp.up_proj],
|
| 640 |
+
layers_name=["gate_proj", "up_proj"],
|
| 641 |
+
inp=input_feat["mlp.gate_proj"],
|
| 642 |
+
inp_q=input_feat_q["mlp.gate_proj"],
|
| 643 |
+
reweight_ratio=reweight_ratio_dict["mlp"],
|
| 644 |
+
module2inspect=module.mlp,
|
| 645 |
+
)
|
| 646 |
+
)
|
| 647 |
+
# fc2
|
| 648 |
+
input_feat_q = _auto_get_input_feat_distort(
|
| 649 |
+
inps_q=q_input,
|
| 650 |
+
scales_list=scales_list,
|
| 651 |
+
)
|
| 652 |
+
scales_list.append(
|
| 653 |
+
_auto_get_scale_wa_distort(
|
| 654 |
+
prev_op=module.mlp.up_proj,
|
| 655 |
+
layers=[module.mlp.down_proj],
|
| 656 |
+
layers_name=["down_proj"],
|
| 657 |
+
inp=input_feat["mlp.down_proj"],
|
| 658 |
+
inp_q=input_feat_q["mlp.down_proj"],
|
| 659 |
+
reweight_ratio=reweight_ratio_dict["mlp"],
|
| 660 |
+
)
|
| 661 |
+
)
|
| 662 |
+
elif module.__class__.__name__ == "InternLM2DecoderLayer":
|
| 663 |
+
# attention input
|
| 664 |
+
input_feat_q = _auto_get_input_feat_distort(
|
| 665 |
+
inps_q=q_input,
|
| 666 |
+
)
|
| 667 |
+
scales_list.append(
|
| 668 |
+
_auto_get_scale_wa_distort(
|
| 669 |
+
prev_op=module.attention_norm,
|
| 670 |
+
layers=[
|
| 671 |
+
module.attention.wqkv,
|
| 672 |
+
],
|
| 673 |
+
layers_name=[
|
| 674 |
+
"wqkv",
|
| 675 |
+
],
|
| 676 |
+
inp=input_feat["attention.wqkv"],
|
| 677 |
+
inp_q=input_feat_q["attention.wqkv"],
|
| 678 |
+
reweight_ratio=reweight_ratio_dict["attn"],
|
| 679 |
+
module2inspect=module.attention,
|
| 680 |
+
kwargs=module_kwargs,
|
| 681 |
+
)
|
| 682 |
+
)
|
| 683 |
+
# attn out
|
| 684 |
+
# Please refer to https://github.com/mit-han-lab/llm-awq/pull/67#issue-1850622696
|
| 685 |
+
input_feat_q = _auto_get_input_feat_distort(
|
| 686 |
+
inps_q=q_input,
|
| 687 |
+
scales_list=scales_list,
|
| 688 |
+
)
|
| 689 |
+
if module.attention.wqkv.weight.shape == module.attention.wo.weight.shape:
|
| 690 |
+
scales_list.append(
|
| 691 |
+
_auto_get_scale_wa_distort(
|
| 692 |
+
prev_op=module.attention.wqkv,
|
| 693 |
+
layers=[module.attention.wo],
|
| 694 |
+
layers_name=["wo"],
|
| 695 |
+
inp=input_feat["attention.wo"],
|
| 696 |
+
inp_q=input_feat_q["attention.wo"],
|
| 697 |
+
reweight_ratio=reweight_ratio_dict["attn"],
|
| 698 |
+
)
|
| 699 |
+
)
|
| 700 |
+
# fc1
|
| 701 |
+
input_feat_q = _auto_get_input_feat_distort(
|
| 702 |
+
inps_q=q_input,
|
| 703 |
+
scales_list=scales_list,
|
| 704 |
+
)
|
| 705 |
+
scales_list.append(
|
| 706 |
+
_auto_get_scale_wa_distort(
|
| 707 |
+
prev_op=module.ffn_norm,
|
| 708 |
+
layers=[module.feed_forward.w1, module.feed_forward.w3],
|
| 709 |
+
layers_name=["w1","w3"],
|
| 710 |
+
inp=input_feat["feed_forward.w1"],
|
| 711 |
+
inp_q=input_feat_q["feed_forward.w1"],
|
| 712 |
+
reweight_ratio=reweight_ratio_dict["mlp"],
|
| 713 |
+
module2inspect=module.feed_forward,
|
| 714 |
+
)
|
| 715 |
+
)
|
| 716 |
+
# fc2
|
| 717 |
+
input_feat_q = _auto_get_input_feat_distort(
|
| 718 |
+
inps_q=q_input,
|
| 719 |
+
scales_list=scales_list,
|
| 720 |
+
)
|
| 721 |
+
scales_list.append(
|
| 722 |
+
_auto_get_scale_wa_distort(
|
| 723 |
+
prev_op=module.feed_forward.w3,
|
| 724 |
+
layers=[module.feed_forward.w2],
|
| 725 |
+
layers_name=["w2"],
|
| 726 |
+
inp=input_feat["feed_forward.w2"],
|
| 727 |
+
inp_q=input_feat_q["feed_forward.w2"],
|
| 728 |
+
reweight_ratio=reweight_ratio_dict["mlp"],
|
| 729 |
+
)
|
| 730 |
+
)
|
| 731 |
+
elif module.__class__.__name__ == "Qwen2VLDecoderLayer":
|
| 732 |
+
# attention input
|
| 733 |
+
input_feat_q = _auto_get_input_feat_distort(
|
| 734 |
+
inps_q=q_input,
|
| 735 |
+
)
|
| 736 |
+
scales_list.append(
|
| 737 |
+
_auto_get_scale_wa_distort(
|
| 738 |
+
prev_op=module.input_layernorm,
|
| 739 |
+
layers=[
|
| 740 |
+
module.self_attn.q_proj,
|
| 741 |
+
module.self_attn.k_proj,
|
| 742 |
+
module.self_attn.v_proj,
|
| 743 |
+
],
|
| 744 |
+
layers_name=[
|
| 745 |
+
"q_proj",
|
| 746 |
+
"k_proj",
|
| 747 |
+
"v_proj",
|
| 748 |
+
],
|
| 749 |
+
inp=input_feat["self_attn.q_proj"],
|
| 750 |
+
inp_q=input_feat_q["self_attn.q_proj"],
|
| 751 |
+
reweight_ratio=reweight_ratio_dict["attn"],
|
| 752 |
+
module2inspect=module.self_attn,
|
| 753 |
+
kwargs=module_kwargs,
|
| 754 |
+
)
|
| 755 |
+
)
|
| 756 |
+
# attn out
|
| 757 |
+
# Please refer to https://github.com/mit-han-lab/llm-awq/pull/67#issue-1850622696
|
| 758 |
+
input_feat_q = _auto_get_input_feat_distort(
|
| 759 |
+
inps_q=q_input,
|
| 760 |
+
scales_list=scales_list,
|
| 761 |
+
)
|
| 762 |
+
if module.self_attn.v_proj.weight.shape == module.self_attn.o_proj.weight.shape:
|
| 763 |
+
scales_list.append(
|
| 764 |
+
_auto_get_scale_wa_distort(
|
| 765 |
+
prev_op=module.self_attn.v_proj,
|
| 766 |
+
layers=[module.self_attn.o_proj],
|
| 767 |
+
layers_name=["o_proj"],
|
| 768 |
+
inp=input_feat["self_attn.o_proj"],
|
| 769 |
+
inp_q=input_feat_q["self_attn.o_proj"],
|
| 770 |
+
reweight_ratio=reweight_ratio_dict["attn"],
|
| 771 |
+
)
|
| 772 |
+
)
|
| 773 |
+
# fc1
|
| 774 |
+
input_feat_q = _auto_get_input_feat_distort(
|
| 775 |
+
inps_q=q_input,
|
| 776 |
+
scales_list=scales_list,
|
| 777 |
+
)
|
| 778 |
+
scales_list.append(
|
| 779 |
+
_auto_get_scale_wa_distort(
|
| 780 |
+
prev_op=module.post_attention_layernorm,
|
| 781 |
+
layers=[module.mlp.gate_proj, module.mlp.up_proj],
|
| 782 |
+
layers_name=["gate_proj", "up_proj"],
|
| 783 |
+
inp=input_feat["mlp.gate_proj"],
|
| 784 |
+
inp_q=input_feat_q["mlp.gate_proj"],
|
| 785 |
+
reweight_ratio=reweight_ratio_dict["mlp"],
|
| 786 |
+
module2inspect=module.mlp,
|
| 787 |
+
)
|
| 788 |
+
)
|
| 789 |
+
# fc2
|
| 790 |
+
input_feat_q = _auto_get_input_feat_distort(
|
| 791 |
+
inps_q=q_input,
|
| 792 |
+
scales_list=scales_list,
|
| 793 |
+
)
|
| 794 |
+
scales_list.append(
|
| 795 |
+
_auto_get_scale_wa_distort(
|
| 796 |
+
prev_op=module.mlp.up_proj,
|
| 797 |
+
layers=[module.mlp.down_proj],
|
| 798 |
+
layers_name=["down_proj"],
|
| 799 |
+
inp=input_feat["mlp.down_proj"],
|
| 800 |
+
inp_q=input_feat_q["mlp.down_proj"],
|
| 801 |
+
reweight_ratio=reweight_ratio_dict["mlp"],
|
| 802 |
+
)
|
| 803 |
+
)
|
| 804 |
+
else:
|
| 805 |
+
raise NotImplementedError(f"{type(module)} not supported yet!")
|
| 806 |
+
|
| 807 |
+
return scales_list
|
| 808 |
+
|
| 809 |
+
def apply_scale(module, scales_list, input_feat_dict=None):
|
| 810 |
+
for prev_op_name, layer_names, scales in scales_list:
|
| 811 |
+
prev_op = get_op_by_name(module, prev_op_name)
|
| 812 |
+
layers = [get_op_by_name(module, name) for name in layer_names]
|
| 813 |
+
|
| 814 |
+
prev_op.cuda()
|
| 815 |
+
for layer in layers:
|
| 816 |
+
layer.cuda()
|
| 817 |
+
scales.cuda()
|
| 818 |
+
|
| 819 |
+
if isinstance(prev_op, nn.Linear):
|
| 820 |
+
assert len(layers) == 1
|
| 821 |
+
scale_fc_fc(prev_op, layers[0], scales)
|
| 822 |
+
elif isinstance(prev_op, (nn.LayerNorm, LlamaRMSNorm)) or prev_op.__class__.__name__ == "InternLM2RMSNorm" or prev_op.__class__.__name__ == "Qwen2RMSNorm":
|
| 823 |
+
scale_ln_fcs(prev_op, layers, scales)
|
| 824 |
+
elif isinstance(prev_op, (nn.GELU, BloomGelu, GELUActivation)):
|
| 825 |
+
new_module = ScaledActivation(prev_op, scales)
|
| 826 |
+
set_op_by_name(module, prev_op_name, new_module)
|
| 827 |
+
scale_gelu_fc(prev_op, layers[0], scales)
|
| 828 |
+
else:
|
| 829 |
+
raise NotImplementedError(f"prev_op {type(prev_op)} not supported yet!")
|
| 830 |
+
|
| 831 |
+
# apply the scaling to input feat if given;
|
| 832 |
+
if input_feat_dict is not None:
|
| 833 |
+
for layer_name in layer_names:
|
| 834 |
+
inp = input_feat_dict[layer_name]
|
| 835 |
+
inp.div_(scales.view(1, -1).to(inp.device))
|
| 836 |
+
|
| 837 |
+
prev_op.cpu()
|
| 838 |
+
for layer in layers:
|
| 839 |
+
layer.cpu()
|
| 840 |
+
scales.cpu()
|
qmllm/methods/mbq/quantize/pre_quant.py
ADDED
|
@@ -0,0 +1,481 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import torch.nn as nn
|
| 3 |
+
import tqdm
|
| 4 |
+
import copy
|
| 5 |
+
import gc
|
| 6 |
+
import functools
|
| 7 |
+
from collections import defaultdict
|
| 8 |
+
from typing import List
|
| 9 |
+
|
| 10 |
+
import numpy as np
|
| 11 |
+
from torch.nn import CrossEntropyLoss
|
| 12 |
+
from transformers.models.bloom.modeling_bloom import BloomForCausalLM
|
| 13 |
+
from transformers.models.opt.modeling_opt import OPTForCausalLM
|
| 14 |
+
from transformers.models.llama.modeling_llama import LlamaForCausalLM
|
| 15 |
+
|
| 16 |
+
from qmllm.utils.search import append_str_prefix, get_op_name
|
| 17 |
+
|
| 18 |
+
from qmllm.methods.mbq.quantize.auto_scale_wa_distort import auto_scale_block_wa_distort
|
| 19 |
+
from qmllm.methods.mbq.quantize.auto_scale_wa import auto_scale_block_wa
|
| 20 |
+
from qmllm.methods.mbq.quantize.auto_scale_distort import auto_scale_block_distort
|
| 21 |
+
from qmllm.methods.mbq.quantize.auto_scale import auto_scale_block, apply_scale
|
| 22 |
+
from qmllm.quantization.qlinear import WALinear
|
| 23 |
+
from qmllm.quantization.quant_funcs import pseudo_quantize_tensor
|
| 24 |
+
from .quantizer import get_module_by_name_suffix
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
__all__ = ["run_mbq"]
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
class GradCacheHook:
|
| 31 |
+
def __init__(self, vis_masks, cap_masks):
|
| 32 |
+
if vis_masks is None or cap_masks is None:
|
| 33 |
+
raise ValueError
|
| 34 |
+
self.hooks = []
|
| 35 |
+
self.vis_masks = vis_masks.cpu()
|
| 36 |
+
self.cap_masks = cap_masks.cpu()
|
| 37 |
+
self.steps = {}
|
| 38 |
+
self.grad_dict = {}
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
def cache_grad_hook(self, module, inp, out, name):
|
| 42 |
+
# initialize step counter, we use step counter to find the right mask for the grad
|
| 43 |
+
if name not in self.steps:
|
| 44 |
+
self.steps[name] = 0
|
| 45 |
+
|
| 46 |
+
if name not in self.grad_dict:
|
| 47 |
+
self.grad_dict[name] = {"vis_grad": [], "cap_grad": []}
|
| 48 |
+
|
| 49 |
+
output_grad = out[0].float()
|
| 50 |
+
step = self.steps[name]
|
| 51 |
+
|
| 52 |
+
B, N, C = output_grad.shape
|
| 53 |
+
|
| 54 |
+
for batch_idx in range(B):
|
| 55 |
+
vis_mask = self.vis_masks[step]
|
| 56 |
+
cap_mask = self.cap_masks[step]
|
| 57 |
+
|
| 58 |
+
vis_grad = output_grad[batch_idx][vis_mask]
|
| 59 |
+
cap_grad = output_grad[batch_idx][cap_mask]
|
| 60 |
+
|
| 61 |
+
vis_grad_avg = vis_grad.abs().mean()
|
| 62 |
+
cap_grad_avg = cap_grad.abs().mean()
|
| 63 |
+
|
| 64 |
+
self.grad_dict[name]["vis_grad"].append(vis_grad_avg.detach().cpu())
|
| 65 |
+
self.grad_dict[name]["cap_grad"].append(cap_grad_avg.detach().cpu())
|
| 66 |
+
|
| 67 |
+
step = step + 1
|
| 68 |
+
|
| 69 |
+
self.steps[name] = step
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
def register_hooks(self, layers):
|
| 73 |
+
for n, m in layers.named_modules():
|
| 74 |
+
if isinstance(m, nn.Linear) and any([_ in n for _ in ["wo", "w2", "down_proj", "o_proj", "v_proj", "gate_proj", "up_proj", "w1", "w3"]]):
|
| 75 |
+
# print(f"Registering hook for layer.{n}")
|
| 76 |
+
self.hooks.append(
|
| 77 |
+
m.register_full_backward_hook(
|
| 78 |
+
functools.partial(self.cache_grad_hook, name=f"layers.{n}")
|
| 79 |
+
)
|
| 80 |
+
)
|
| 81 |
+
|
| 82 |
+
|
| 83 |
+
def remove_hooks(self):
|
| 84 |
+
for h in self.hooks:
|
| 85 |
+
h.remove()
|
| 86 |
+
self.hooks.clear()
|
| 87 |
+
|
| 88 |
+
|
| 89 |
+
def get_grad_dict(self):
|
| 90 |
+
return self.grad_dict
|
| 91 |
+
|
| 92 |
+
|
| 93 |
+
def get_avg_grad_dict(self):
|
| 94 |
+
avg_grad_dict = {}
|
| 95 |
+
|
| 96 |
+
for name, grad_values in self.grad_dict.items():
|
| 97 |
+
mean_vis = torch.mean(torch.stack(grad_values["vis_grad"]))
|
| 98 |
+
mean_cap = torch.mean(torch.stack(grad_values["cap_grad"]))
|
| 99 |
+
|
| 100 |
+
avg_grad_dict[name] = {
|
| 101 |
+
"vis_avg_grad": mean_vis.item(),
|
| 102 |
+
"cap_avg_grad": mean_cap.item()
|
| 103 |
+
}
|
| 104 |
+
|
| 105 |
+
return avg_grad_dict
|
| 106 |
+
|
| 107 |
+
|
| 108 |
+
def get_named_linears(module):
|
| 109 |
+
return {name: m for name, m in module.named_modules() if isinstance(m, nn.Linear)}
|
| 110 |
+
|
| 111 |
+
|
| 112 |
+
def get_blocks(model):
|
| 113 |
+
if model.__class__.__name__ == "LlamaForCausalLM":
|
| 114 |
+
layers = model.model.layers
|
| 115 |
+
elif model.__class__.__name__ == "LlavaLlamaForCausalLM":
|
| 116 |
+
# layers = [model.model.layers, model.model.vision_tower.vision_tower.vision_model.encoder.layers]
|
| 117 |
+
layers = model.model.layers
|
| 118 |
+
elif model.__class__.__name__ == "LlavaQwenForCausalLM":
|
| 119 |
+
layers = model.model.layers
|
| 120 |
+
elif model.__class__.__name__ == "InternLM2ForCausalLM":
|
| 121 |
+
layers = model.model.layers
|
| 122 |
+
elif model.__class__.__name__ == "InternVLChatModel":
|
| 123 |
+
layers = model.language_model.model.layers
|
| 124 |
+
elif model.__class__.__name__ == "Qwen2VLForConditionalGeneration":
|
| 125 |
+
layers = model.model.layers
|
| 126 |
+
elif model.__class__.__name__ == "LlavaLlamaModel":
|
| 127 |
+
layers = model.llm.model.layers
|
| 128 |
+
elif isinstance(model, OPTForCausalLM):
|
| 129 |
+
layers = model.model.decoder.layers
|
| 130 |
+
elif isinstance(model, BloomForCausalLM):
|
| 131 |
+
layers = model.transformer.h
|
| 132 |
+
elif "mpt" in str(model.__class__).lower():
|
| 133 |
+
layers = model.transformer.blocks
|
| 134 |
+
elif "falcon" in str(model.__class__).lower():
|
| 135 |
+
layers = model.transformer.h
|
| 136 |
+
elif "bigcode" in str(model.__class__).lower():
|
| 137 |
+
layers = model.transformer.h
|
| 138 |
+
elif "neox" in str(model.__class__).lower():
|
| 139 |
+
layers = model.gpt_neox.layers
|
| 140 |
+
else:
|
| 141 |
+
raise NotImplementedError(type(model))
|
| 142 |
+
return layers
|
| 143 |
+
|
| 144 |
+
|
| 145 |
+
def move_embed(model, device):
|
| 146 |
+
if isinstance(model, LlamaForCausalLM):
|
| 147 |
+
model.model.embed_tokens = model.model.embed_tokens.to(device)
|
| 148 |
+
model.model.rotary_emb = model.model.rotary_emb.to(device)
|
| 149 |
+
elif isinstance(model, OPTForCausalLM):
|
| 150 |
+
model.model.decoder.embed_tokens = model.model.decoder.embed_tokens.to(device)
|
| 151 |
+
model.model.decoder.embed_positions = model.model.decoder.embed_positions.to(
|
| 152 |
+
device
|
| 153 |
+
)
|
| 154 |
+
elif isinstance(model, BloomForCausalLM):
|
| 155 |
+
model.transformer.word_embeddings = model.transformer.word_embeddings.to(device)
|
| 156 |
+
model.transformer.word_embeddings_layernorm = (
|
| 157 |
+
model.transformer.word_embeddings_layernorm.to(device)
|
| 158 |
+
)
|
| 159 |
+
elif "mpt" in str(model.__class__).lower():
|
| 160 |
+
model.transformer.wte = model.transformer.wte.to(device)
|
| 161 |
+
model.transformer.emb_drop = model.transformer.emb_drop.to(device)
|
| 162 |
+
elif "falcon" in str(model.__class__).lower():
|
| 163 |
+
model.transformer.word_embeddings = model.transformer.word_embeddings.to(device)
|
| 164 |
+
elif "bigcode" in str(model.__class__).lower():
|
| 165 |
+
model.transformer.wte = model.transformer.wte.to(device)
|
| 166 |
+
model.transformer.wpe = model.transformer.wpe.to(device)
|
| 167 |
+
model.transformer.drop = model.transformer.drop.to(device)
|
| 168 |
+
elif "neox" in str(model.__class__).lower():
|
| 169 |
+
model.gpt_neox.embed_in = model.gpt_neox.embed_in.to(device)
|
| 170 |
+
model.gpt_neox.emb_dropout = model.gpt_neox.emb_dropout.to(device)
|
| 171 |
+
model.embed_out = model.embed_out.to(device)
|
| 172 |
+
elif model.__class__.__name__ == "LlavaLlamaForCausalLM":
|
| 173 |
+
model.model.embed_tokens = model.model.embed_tokens.to(device)
|
| 174 |
+
model.model.vision_tower.vision_tower.vision_model.embeddings.to(device)
|
| 175 |
+
elif model.__class__.__name__ == "LlavaQwenForCausalLM":
|
| 176 |
+
model.model.embed_tokens = model.model.embed_tokens.to(device)
|
| 177 |
+
# model.model.rotary_emb = model.model.rotary_emb.to(device)
|
| 178 |
+
elif model.__class__.__name__ == "InternLM2ForCausalLM":
|
| 179 |
+
model.model.tok_embeddings = model.model.tok_embeddings.to(device)
|
| 180 |
+
elif model.__class__.__name__ == "InternVLChatModel":
|
| 181 |
+
model.language_model.model.tok_embeddings = model.language_model.model.tok_embeddings.to(device)
|
| 182 |
+
elif model.__class__.__name__ == "Qwen2VLForConditionalGeneration":
|
| 183 |
+
model.model.embed_tokens = model.model.embed_tokens.to(device)
|
| 184 |
+
elif model.__class__.__name__ == "LlavaLlamaModel":
|
| 185 |
+
model.llm.model.embed_tokens = model.llm.model.embed_tokens.to(device)
|
| 186 |
+
else:
|
| 187 |
+
raise NotImplementedError(type(model))
|
| 188 |
+
|
| 189 |
+
|
| 190 |
+
def process_input(prompt_inputs, prompt_kwargs):
|
| 191 |
+
inputs = {**prompt_inputs, **prompt_kwargs}
|
| 192 |
+
inputs["use_cache"] = False
|
| 193 |
+
vision_mask = inputs.pop("vision_mask", None)
|
| 194 |
+
caption_mask = inputs.pop("caption_mask", None)
|
| 195 |
+
|
| 196 |
+
return inputs, vision_mask, caption_mask
|
| 197 |
+
|
| 198 |
+
|
| 199 |
+
@torch.no_grad()
|
| 200 |
+
def run_mbq(
|
| 201 |
+
model,
|
| 202 |
+
prompt_inputs,
|
| 203 |
+
prompt_kwargs,
|
| 204 |
+
w_bit,
|
| 205 |
+
a_bit,
|
| 206 |
+
q_config,
|
| 207 |
+
auto_scale=True,
|
| 208 |
+
loss_mode="mae",
|
| 209 |
+
wa_quant=False,
|
| 210 |
+
reweight=False,
|
| 211 |
+
distort=False
|
| 212 |
+
):
|
| 213 |
+
if "bigcode" in str(model.model.__class__).lower():
|
| 214 |
+
# otherwise attention_mask will always be on cpu.
|
| 215 |
+
model.transformer.bias = model.transformer.bias.to("cuda")
|
| 216 |
+
|
| 217 |
+
layers = get_blocks(model.model)
|
| 218 |
+
|
| 219 |
+
inps = []
|
| 220 |
+
layer_kwargs = {}
|
| 221 |
+
|
| 222 |
+
layers[0] = layers[0].cuda()
|
| 223 |
+
move_embed(model.model, "cuda")
|
| 224 |
+
|
| 225 |
+
# get input and kwargs to layer 0
|
| 226 |
+
# with_kwargs is only supported in PyTorch 2.0
|
| 227 |
+
# use this Catcher hack for now
|
| 228 |
+
class Catcher(nn.Module):
|
| 229 |
+
def __init__(self, module):
|
| 230 |
+
super().__init__()
|
| 231 |
+
self.module = module
|
| 232 |
+
|
| 233 |
+
def forward(self, inp, **kwargs):
|
| 234 |
+
inps.append(inp)
|
| 235 |
+
layer_kwargs.update(kwargs)
|
| 236 |
+
raise ValueError # early exit to break later inference
|
| 237 |
+
|
| 238 |
+
# patch layer 0 to catch input and kwargs
|
| 239 |
+
layers[0] = Catcher(layers[0])
|
| 240 |
+
|
| 241 |
+
inputs, vision_mask, caption_mask = process_input(prompt_inputs, prompt_kwargs)
|
| 242 |
+
|
| 243 |
+
model.to_cuda()
|
| 244 |
+
try:
|
| 245 |
+
model(**inputs)
|
| 246 |
+
except ValueError: # work with early exit
|
| 247 |
+
pass
|
| 248 |
+
|
| 249 |
+
model.to_cpu()
|
| 250 |
+
layers[0] = layers[0].module # restore
|
| 251 |
+
inps = inps[0]
|
| 252 |
+
layer_kwargs["use_cache"] = False
|
| 253 |
+
|
| 254 |
+
layers[0] = layers[0].cpu()
|
| 255 |
+
move_embed(model.model, "cpu")
|
| 256 |
+
|
| 257 |
+
gc.collect()
|
| 258 |
+
torch.cuda.empty_cache()
|
| 259 |
+
|
| 260 |
+
mbq_results = {
|
| 261 |
+
"scale": [],
|
| 262 |
+
}
|
| 263 |
+
|
| 264 |
+
|
| 265 |
+
if reweight:
|
| 266 |
+
model.to_cuda()
|
| 267 |
+
print("Save gradient...")
|
| 268 |
+
# save gradient
|
| 269 |
+
grad_cache = GradCacheHook(vis_masks=vision_mask, cap_masks=caption_mask)
|
| 270 |
+
grad_cache.register_hooks(layers=layers)
|
| 271 |
+
|
| 272 |
+
with torch.enable_grad():
|
| 273 |
+
mini_batch = 1
|
| 274 |
+
total_samples = next(iter(prompt_inputs.values())).shape[0]
|
| 275 |
+
accum_steps = int(total_samples/mini_batch)
|
| 276 |
+
|
| 277 |
+
for i in tqdm.tqdm(range(0, total_samples, mini_batch), desc="Running gradient calculation..."):
|
| 278 |
+
mini_inputs = {}
|
| 279 |
+
for k in inputs:
|
| 280 |
+
if isinstance(inputs[k], torch.Tensor):
|
| 281 |
+
mini_inputs[k] = inputs[k][i:i+mini_batch]
|
| 282 |
+
|
| 283 |
+
outputs = model(**mini_inputs)
|
| 284 |
+
|
| 285 |
+
loss = outputs[0]
|
| 286 |
+
|
| 287 |
+
loss = loss / accum_steps
|
| 288 |
+
loss.backward()
|
| 289 |
+
|
| 290 |
+
model.to_cpu()
|
| 291 |
+
grad_avg_dict = grad_cache.get_avg_grad_dict()
|
| 292 |
+
grad_cache.remove_hooks()
|
| 293 |
+
del grad_cache
|
| 294 |
+
|
| 295 |
+
attn_list = []
|
| 296 |
+
mlp_list = []
|
| 297 |
+
|
| 298 |
+
for key_name in grad_avg_dict:
|
| 299 |
+
if "down_" in key_name or "w2" in key_name:
|
| 300 |
+
mlp_list.append(grad_avg_dict[key_name]["vis_avg_grad"] / grad_avg_dict[key_name]["cap_avg_grad"])
|
| 301 |
+
if "o_proj" in key_name or "wo" in key_name:
|
| 302 |
+
attn_list.append(grad_avg_dict[key_name]["vis_avg_grad"] / grad_avg_dict[key_name]["cap_avg_grad"])
|
| 303 |
+
|
| 304 |
+
attn_median = np.median(attn_list)
|
| 305 |
+
mlp_median = np.median(mlp_list)
|
| 306 |
+
|
| 307 |
+
|
| 308 |
+
if distort:
|
| 309 |
+
# assert wa_quant, "We only support distort input in weight-activation quantization!!!"
|
| 310 |
+
print("Use distort input...")
|
| 311 |
+
inps_distort = copy.deepcopy(inps)
|
| 312 |
+
|
| 313 |
+
gc.collect()
|
| 314 |
+
torch.cuda.empty_cache()
|
| 315 |
+
|
| 316 |
+
# solve layer by layer
|
| 317 |
+
for i in tqdm.tqdm(range(len(layers)), desc="Running MBQ..."):
|
| 318 |
+
layer = layers[i]
|
| 319 |
+
layer = layer.cuda()
|
| 320 |
+
named_linears = get_named_linears(layer)
|
| 321 |
+
|
| 322 |
+
# firstly, get input features of all linear layers
|
| 323 |
+
def cache_input_hook(m, x, y, name, feat_dict):
|
| 324 |
+
x = x[0]
|
| 325 |
+
x = x.detach().cpu()
|
| 326 |
+
feat_dict[name].append(x)
|
| 327 |
+
|
| 328 |
+
input_feat = defaultdict(list)
|
| 329 |
+
handles = []
|
| 330 |
+
for name in named_linears:
|
| 331 |
+
handles.append(
|
| 332 |
+
named_linears[name].register_forward_hook(
|
| 333 |
+
functools.partial(cache_input_hook, name=name, feat_dict=input_feat)
|
| 334 |
+
)
|
| 335 |
+
)
|
| 336 |
+
inps = inps.to(next(layer.parameters()).device) # in case multi-gpu
|
| 337 |
+
# get output as next layer's input
|
| 338 |
+
|
| 339 |
+
for k in layer_kwargs:
|
| 340 |
+
if isinstance(layer_kwargs[k], torch.Tensor):
|
| 341 |
+
layer_kwargs[k] = layer_kwargs[k].to(next(layer.parameters()).device)
|
| 342 |
+
|
| 343 |
+
inps = layer(inps, **layer_kwargs)[0]
|
| 344 |
+
for h in handles:
|
| 345 |
+
h.remove()
|
| 346 |
+
# now solve for scaling
|
| 347 |
+
input_feat = {k: torch.cat(v, dim=0) for k, v in input_feat.items()}
|
| 348 |
+
|
| 349 |
+
# Clear GPU memory
|
| 350 |
+
torch.cuda.empty_cache()
|
| 351 |
+
|
| 352 |
+
if reweight:
|
| 353 |
+
scale_reweight_ratio_dict = {}
|
| 354 |
+
for key, value in grad_avg_dict.items():
|
| 355 |
+
item_list = key.split(".")
|
| 356 |
+
if str(i) in item_list:
|
| 357 |
+
if "wo" in item_list or "o_proj" in item_list:
|
| 358 |
+
scale_reweight_ratio_dict["attn"] = max((value["vis_avg_grad"] / value["cap_avg_grad"]), attn_median)
|
| 359 |
+
elif "w2" in item_list or "down_proj" in item_list:
|
| 360 |
+
scale_reweight_ratio_dict["mlp"] = max((value["vis_avg_grad"] / value["cap_avg_grad"]), mlp_median)
|
| 361 |
+
else:
|
| 362 |
+
scale_reweight_ratio_dict = {
|
| 363 |
+
"attn": None,
|
| 364 |
+
"mlp": None
|
| 365 |
+
}
|
| 366 |
+
|
| 367 |
+
if (
|
| 368 |
+
auto_scale
|
| 369 |
+
): # if it applies, we should also modify the input_feat with scales
|
| 370 |
+
if not reweight:
|
| 371 |
+
ans_mask = None
|
| 372 |
+
vis_mask = None
|
| 373 |
+
else:
|
| 374 |
+
ans_mask = caption_mask
|
| 375 |
+
vis_mask = vision_mask
|
| 376 |
+
|
| 377 |
+
if wa_quant:
|
| 378 |
+
if distort:
|
| 379 |
+
scales_list = auto_scale_block_wa_distort(
|
| 380 |
+
layer,
|
| 381 |
+
layer_kwargs,
|
| 382 |
+
w_bit=w_bit,
|
| 383 |
+
a_bit=a_bit,
|
| 384 |
+
q_config=q_config,
|
| 385 |
+
input_feat=input_feat,
|
| 386 |
+
ans_mask=ans_mask,
|
| 387 |
+
vis_mask=vis_mask,
|
| 388 |
+
reweight_ratio_dict=scale_reweight_ratio_dict,
|
| 389 |
+
q_input=inps_distort,
|
| 390 |
+
loss_mode=loss_mode
|
| 391 |
+
)
|
| 392 |
+
else:
|
| 393 |
+
scales_list = auto_scale_block_wa(
|
| 394 |
+
layer,
|
| 395 |
+
layer_kwargs,
|
| 396 |
+
w_bit=w_bit,
|
| 397 |
+
a_bit=a_bit,
|
| 398 |
+
q_config=q_config,
|
| 399 |
+
input_feat=input_feat,
|
| 400 |
+
ans_mask=ans_mask,
|
| 401 |
+
vis_mask=vis_mask,
|
| 402 |
+
reweight_ratio_dict=scale_reweight_ratio_dict,
|
| 403 |
+
loss_mode=loss_mode
|
| 404 |
+
)
|
| 405 |
+
else:
|
| 406 |
+
if distort:
|
| 407 |
+
scales_list = auto_scale_block_distort(
|
| 408 |
+
layer,
|
| 409 |
+
layer_kwargs,
|
| 410 |
+
w_bit=w_bit,
|
| 411 |
+
q_config=q_config,
|
| 412 |
+
input_feat=input_feat,
|
| 413 |
+
ans_mask=ans_mask,
|
| 414 |
+
vis_mask=vis_mask,
|
| 415 |
+
reweight_ratio_dict=scale_reweight_ratio_dict,
|
| 416 |
+
q_input=inps_distort,
|
| 417 |
+
loss_mode=loss_mode
|
| 418 |
+
)
|
| 419 |
+
else:
|
| 420 |
+
scales_list = auto_scale_block(
|
| 421 |
+
layer,
|
| 422 |
+
layer_kwargs,
|
| 423 |
+
w_bit=w_bit,
|
| 424 |
+
q_config=q_config,
|
| 425 |
+
input_feat=input_feat,
|
| 426 |
+
ans_mask=ans_mask,
|
| 427 |
+
vis_mask=vis_mask,
|
| 428 |
+
reweight_ratio_dict=scale_reweight_ratio_dict,
|
| 429 |
+
loss_mode=loss_mode
|
| 430 |
+
)
|
| 431 |
+
|
| 432 |
+
# apply_scale(layer, scales_list, input_feat_dict=input_feat)
|
| 433 |
+
apply_scale(layers[i], scales_list, input_feat_dict=input_feat)
|
| 434 |
+
|
| 435 |
+
if distort:
|
| 436 |
+
# get distort output as next layer's input
|
| 437 |
+
if wa_quant:
|
| 438 |
+
layer_q = copy.deepcopy(layer)
|
| 439 |
+
layer_q = layer_q.cuda()
|
| 440 |
+
named_linears_q = get_named_linears(layer_q)
|
| 441 |
+
for n, m in named_linears_q.items():
|
| 442 |
+
new_linear = WALinear.from_float(m, weight_quant="per_channel", act_quant="per_token", w_bit=w_bit, a_bit=a_bit)
|
| 443 |
+
father_module = get_module_by_name_suffix(layer_q, '.'.join(n.split(".")[:-1]))
|
| 444 |
+
setattr(father_module, n.split('.')[-1], new_linear)
|
| 445 |
+
del new_linear, m
|
| 446 |
+
torch.cuda.empty_cache()
|
| 447 |
+
|
| 448 |
+
inps_distort = inps_distort.to(next(layer_q.parameters()).device) # in case multi-gpu
|
| 449 |
+
inps_distort = layer_q(inps_distort, **layer_kwargs)[0]
|
| 450 |
+
del layer_q
|
| 451 |
+
else:
|
| 452 |
+
layer_q = copy.deepcopy(layer)
|
| 453 |
+
layer_q = layer_q.cuda()
|
| 454 |
+
named_linears_q = get_named_linears(layer_q)
|
| 455 |
+
for n, m in named_linears_q.items():
|
| 456 |
+
m.weight.data = pseudo_quantize_tensor(m.weight.data, n_bits=w_bit, **q_config)
|
| 457 |
+
torch.cuda.empty_cache()
|
| 458 |
+
|
| 459 |
+
inps_distort = inps_distort.to(next(layer_q.parameters()).device) # in case multi-gpu
|
| 460 |
+
inps_distort = layer_q(inps_distort, **layer_kwargs)[0]
|
| 461 |
+
del layer_q
|
| 462 |
+
|
| 463 |
+
# append prefix to make names global
|
| 464 |
+
mbq_results["scale"] += append_str_prefix(
|
| 465 |
+
scales_list, get_op_name(model.model, layer) + "."
|
| 466 |
+
)
|
| 467 |
+
|
| 468 |
+
# Clear GPU memory
|
| 469 |
+
torch.cuda.empty_cache()
|
| 470 |
+
|
| 471 |
+
layer = layer.cpu()
|
| 472 |
+
# Haotian: check activation replacement
|
| 473 |
+
del input_feat
|
| 474 |
+
gc.collect()
|
| 475 |
+
torch.cuda.empty_cache()
|
| 476 |
+
|
| 477 |
+
return mbq_results
|
| 478 |
+
|
| 479 |
+
|
| 480 |
+
def apply_mbq(model, mbq_results):
|
| 481 |
+
apply_scale(model, mbq_results["scale"])
|
qmllm/methods/mbq/quantize/qmodule.py
ADDED
|
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import math
|
| 2 |
+
import torch
|
| 3 |
+
import torch.nn as nn
|
| 4 |
+
|
| 5 |
+
def make_divisible(c, divisor):
|
| 6 |
+
return (c + divisor - 1) // divisor
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
def calculate_zeros_width(in_features, group_size=128, pack_num=8):
|
| 10 |
+
if group_size >= 128:
|
| 11 |
+
size_multiplier = 1
|
| 12 |
+
elif group_size == 64:
|
| 13 |
+
size_multiplier = 2
|
| 14 |
+
elif group_size == 32:
|
| 15 |
+
size_multiplier = 4
|
| 16 |
+
else:
|
| 17 |
+
raise NotImplementedError
|
| 18 |
+
|
| 19 |
+
base_width = make_divisible(in_features // group_size, pack_num)
|
| 20 |
+
base_width = make_divisible(base_width, size_multiplier) * size_multiplier
|
| 21 |
+
return base_width
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
class ScaledActivation(nn.Module):
|
| 25 |
+
def __init__(self, module, scales):
|
| 26 |
+
super().__init__()
|
| 27 |
+
self.act = module
|
| 28 |
+
self.scales = nn.Parameter(scales.data)
|
| 29 |
+
|
| 30 |
+
def forward(self, x):
|
| 31 |
+
return self.act(x) / self.scales.view(1, 1, -1).to(x.device)
|
qmllm/methods/mbq/quantize/quantizer.py
ADDED
|
@@ -0,0 +1,102 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import torch.nn as nn
|
| 3 |
+
from tqdm import tqdm
|
| 4 |
+
import gc
|
| 5 |
+
from qmllm.methods.mbq.quantize.qmodule import ScaledActivation
|
| 6 |
+
from qmllm.utils.search import set_op_by_name
|
| 7 |
+
|
| 8 |
+
from transformers.models.bloom.modeling_bloom import BloomBlock
|
| 9 |
+
from qmllm.quantization.quant_funcs import pseudo_quantize_tensor
|
| 10 |
+
from qmllm.quantization.qlinear import WALinear
|
| 11 |
+
|
| 12 |
+
EMBEDDING_KEYWORDS = ["embed"]
|
| 13 |
+
LM_HEAD_KEYWORDS = ["lm_head", "embed_out", "output"]
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
def scale_activations(module):
|
| 17 |
+
param = next(module.parameters())
|
| 18 |
+
dtype = param.dtype
|
| 19 |
+
device = param.device
|
| 20 |
+
if isinstance(module, BloomBlock):
|
| 21 |
+
if isinstance(module.mlp.gelu_impl, ScaledActivation):
|
| 22 |
+
return
|
| 23 |
+
c = module.mlp.dense_h_to_4h.out_features
|
| 24 |
+
act = ScaledActivation(
|
| 25 |
+
module.mlp.gelu_impl, torch.ones(c, dtype=dtype, device=device)
|
| 26 |
+
)
|
| 27 |
+
set_op_by_name(module, "mlp.gelu_impl", act)
|
| 28 |
+
elif "mptblock" in str(module.__class__.__name__).lower():
|
| 29 |
+
if isinstance(module.ffn.act, ScaledActivation):
|
| 30 |
+
return
|
| 31 |
+
c = module.ffn.up_proj.out_features
|
| 32 |
+
act = ScaledActivation(
|
| 33 |
+
module.ffn.act, torch.ones(c, dtype=dtype, device=device)
|
| 34 |
+
)
|
| 35 |
+
set_op_by_name(module, "ffn.act", act)
|
| 36 |
+
elif "falcon" in str(module.__class__).lower():
|
| 37 |
+
if isinstance(module.mlp.act, ScaledActivation):
|
| 38 |
+
return
|
| 39 |
+
c = module.mlp.dense_h_to_4h.out_features
|
| 40 |
+
act = ScaledActivation(
|
| 41 |
+
module.mlp.act, torch.ones(c, dtype=dtype, device=device)
|
| 42 |
+
)
|
| 43 |
+
set_op_by_name(module, "mlp.act", act)
|
| 44 |
+
elif "bigcode" in str(module.__class__).lower():
|
| 45 |
+
if isinstance(module.mlp.act, ScaledActivation):
|
| 46 |
+
return
|
| 47 |
+
c = module.mlp.c_proj.out_features
|
| 48 |
+
act = ScaledActivation(
|
| 49 |
+
module.mlp.act, torch.ones(c, dtype=dtype, device=device)
|
| 50 |
+
)
|
| 51 |
+
set_op_by_name(module, "mlp.act", act)
|
| 52 |
+
elif "neox" in str(module.__class__).lower():
|
| 53 |
+
if isinstance(module.mlp.act, ScaledActivation):
|
| 54 |
+
return
|
| 55 |
+
c = module.mlp.dense_h_to_4h.out_features
|
| 56 |
+
act = ScaledActivation(
|
| 57 |
+
module.mlp.act, torch.ones(c, dtype=dtype, device=device)
|
| 58 |
+
)
|
| 59 |
+
set_op_by_name(module, "mlp.act", act)
|
| 60 |
+
|
| 61 |
+
@torch.no_grad()
|
| 62 |
+
def pseudo_quantize_model_weight(
|
| 63 |
+
model,
|
| 64 |
+
w_bit,
|
| 65 |
+
q_config,
|
| 66 |
+
):
|
| 67 |
+
from .pre_quant import get_blocks, get_named_linears
|
| 68 |
+
|
| 69 |
+
layers = get_blocks(model)
|
| 70 |
+
for i in tqdm(range(len(layers)), desc="pseudo weight quantization..."):
|
| 71 |
+
named_linears = get_named_linears(layers[i])
|
| 72 |
+
for n, m in named_linears.items():
|
| 73 |
+
# m.cuda()
|
| 74 |
+
m.weight.data = pseudo_quantize_tensor(
|
| 75 |
+
m.weight.data, n_bits=w_bit, **q_config
|
| 76 |
+
)
|
| 77 |
+
# m.cpu()
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
def get_module_by_name_suffix(model, module_name: str):
|
| 81 |
+
for name, module in model.named_modules():
|
| 82 |
+
if name.endswith(module_name):
|
| 83 |
+
return module
|
| 84 |
+
|
| 85 |
+
|
| 86 |
+
@torch.no_grad()
|
| 87 |
+
def pseudo_quantize_model_weight_act(
|
| 88 |
+
model,
|
| 89 |
+
w_bit,
|
| 90 |
+
a_bit,
|
| 91 |
+
):
|
| 92 |
+
from .pre_quant import get_blocks, get_named_linears
|
| 93 |
+
|
| 94 |
+
layers = get_blocks(model)
|
| 95 |
+
for i in tqdm(range(len(layers)), desc="pseudo weight activation quantization..."):
|
| 96 |
+
named_linears = get_named_linears(layers[i])
|
| 97 |
+
for n, m in named_linears.items():
|
| 98 |
+
new_linear = WALinear.from_float(m, weight_quant="per_channel", act_quant="per_token", w_bit=w_bit, a_bit=a_bit)
|
| 99 |
+
father_module = get_module_by_name_suffix(layers[i], '.'.join(n.split(".")[:-1]))
|
| 100 |
+
setattr(father_module, n.split('.')[-1], new_linear)
|
| 101 |
+
del new_linear, m
|
| 102 |
+
torch.cuda.empty_cache()
|
qmllm/methods/rtn/__init__.py
ADDED
|
File without changes
|
qmllm/methods/rtn/entry.py
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import torch
|
| 3 |
+
from qmllm.methods.rtn.quantizer import pseudo_quantize_model_weight, pseudo_quantize_model_weight_act
|
| 4 |
+
|
| 5 |
+
def rtn_entry(model, pseudo_quant: bool, wa_quant: bool, zero_point: str=True, q_group_size: int=128, w_bit: int=4, a_bit: int=16):
|
| 6 |
+
q_config = {
|
| 7 |
+
"zero_point": zero_point, # by default True
|
| 8 |
+
"q_group_size": q_group_size, # whether to use group quantization
|
| 9 |
+
}
|
| 10 |
+
|
| 11 |
+
if pseudo_quant:
|
| 12 |
+
if not wa_quant:
|
| 13 |
+
# weight quantization
|
| 14 |
+
pseudo_quantize_model_weight(model.model, w_bit=w_bit, q_config=q_config)
|
| 15 |
+
else:
|
| 16 |
+
# weight activation quantization
|
| 17 |
+
pseudo_quantize_model_weight_act(model.model, w_bit=w_bit, a_bit=a_bit)
|
| 18 |
+
|
| 19 |
+
model.to_cuda()
|
| 20 |
+
|
| 21 |
+
return model
|
qmllm/methods/rtn/quantizer.py
ADDED
|
@@ -0,0 +1,85 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import torch.nn as nn
|
| 3 |
+
from tqdm import tqdm
|
| 4 |
+
from qmllm.quantization.quant_funcs import pseudo_quantize_tensor
|
| 5 |
+
from qmllm.quantization.qlinear import WALinear
|
| 6 |
+
from transformers.models.bloom.modeling_bloom import BloomForCausalLM
|
| 7 |
+
from transformers.models.opt.modeling_opt import OPTForCausalLM
|
| 8 |
+
|
| 9 |
+
def get_named_linears(module):
|
| 10 |
+
return {name: m for name, m in module.named_modules() if isinstance(m, nn.Linear)}
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
def get_blocks(model):
|
| 14 |
+
if model.__class__.__name__ == "LlamaForCausalLM":
|
| 15 |
+
layers = model.model.layers
|
| 16 |
+
elif model.__class__.__name__ == "LlavaLlamaForCausalLM":
|
| 17 |
+
# layers = [model.model.layers, model.model.vision_tower.vision_tower.vision_model.encoder.layers]
|
| 18 |
+
layers = model.model.layers
|
| 19 |
+
elif model.__class__.__name__ == "LlavaQwenForCausalLM":
|
| 20 |
+
layers = model.model.layers
|
| 21 |
+
elif model.__class__.__name__ == "InternLM2ForCausalLM":
|
| 22 |
+
layers = model.model.layers
|
| 23 |
+
elif model.__class__.__name__ == "InternVLChatModel":
|
| 24 |
+
layers = model.language_model.model.layers
|
| 25 |
+
elif model.__class__.__name__ == "Qwen2VLForConditionalGeneration":
|
| 26 |
+
layers = model.model.layers
|
| 27 |
+
elif model.__class__.__name__ == "LlavaLlamaModel":
|
| 28 |
+
layers = model.llm.model.layers
|
| 29 |
+
elif isinstance(model, OPTForCausalLM):
|
| 30 |
+
layers = model.model.decoder.layers
|
| 31 |
+
elif isinstance(model, BloomForCausalLM):
|
| 32 |
+
layers = model.transformer.h
|
| 33 |
+
elif "mpt" in str(model.__class__).lower():
|
| 34 |
+
layers = model.transformer.blocks
|
| 35 |
+
elif "falcon" in str(model.__class__).lower():
|
| 36 |
+
layers = model.transformer.h
|
| 37 |
+
elif "bigcode" in str(model.__class__).lower():
|
| 38 |
+
layers = model.transformer.h
|
| 39 |
+
elif "neox" in str(model.__class__).lower():
|
| 40 |
+
layers = model.gpt_neox.layers
|
| 41 |
+
else:
|
| 42 |
+
raise NotImplementedError(type(model))
|
| 43 |
+
return layers
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
@torch.no_grad()
|
| 47 |
+
def pseudo_quantize_model_weight(
|
| 48 |
+
model,
|
| 49 |
+
w_bit,
|
| 50 |
+
q_config,
|
| 51 |
+
):
|
| 52 |
+
|
| 53 |
+
layers = get_blocks(model)
|
| 54 |
+
for i in tqdm(range(len(layers)), desc="pseudo weight quantization..."):
|
| 55 |
+
named_linears = get_named_linears(layers[i])
|
| 56 |
+
for n, m in named_linears.items():
|
| 57 |
+
# m.cuda()
|
| 58 |
+
m.weight.data = pseudo_quantize_tensor(
|
| 59 |
+
m.weight.data, n_bits=w_bit, **q_config
|
| 60 |
+
)
|
| 61 |
+
# m.cpu()
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
def get_module_by_name_suffix(model, module_name: str):
|
| 65 |
+
for name, module in model.named_modules():
|
| 66 |
+
if name.endswith(module_name):
|
| 67 |
+
return module
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
@torch.no_grad()
|
| 71 |
+
def pseudo_quantize_model_weight_act(
|
| 72 |
+
model,
|
| 73 |
+
w_bit,
|
| 74 |
+
a_bit,
|
| 75 |
+
):
|
| 76 |
+
|
| 77 |
+
layers = get_blocks(model)
|
| 78 |
+
for i in tqdm(range(len(layers)), desc="pseudo weight activation quantization..."):
|
| 79 |
+
named_linears = get_named_linears(layers[i])
|
| 80 |
+
for n, m in named_linears.items():
|
| 81 |
+
new_linear = WALinear.from_float(m, weight_quant="per_channel", act_quant="per_token", w_bit=w_bit, a_bit=a_bit)
|
| 82 |
+
father_module = get_module_by_name_suffix(layers[i], '.'.join(n.split(".")[:-1]))
|
| 83 |
+
setattr(father_module, n.split('.')[-1], new_linear)
|
| 84 |
+
del new_linear, m
|
| 85 |
+
torch.cuda.empty_cache()
|