commitId
stringlengths 40
40
| datetime
stringlengths 30
31
| subject
stringlengths 37
266
| comment
stringlengths 109
15.2k
| diff
stringlengths 238
914k
| gitVersion
stringclasses 9
values |
|---|---|---|---|---|---|
85c93d64db23b5cf495c52458d9b3120b96ee05
|
Fri, 12 Apr 2024 20:11:59 -0700
|
[PATCH 0134/1000] [inductor] Write generated files from parent process (#123409)
|
Before this PR we would pass generated source code over a pipe to the compile worker then the compile worker would write out the file. Doing it this way is faster and results in smaller messages to the workers (and lets us skip creating the workers in the warm start case). Pull Request resolved: https://github.com/pytorch/pytorch/pull/123409 Approved by: https://github.com/desertfire
|
diff --git a/torch/_inductor/codecache.py b/torch/_inductor/codecache.py
index cf8f8f6e53..88d37823a7 100644
--- a/torch/_inductor/codecache.py
+++ b/torch/_inductor/codecache.py
@@ -59,7 +59,7 @@ from torch._dynamo.device_interface import (
from torch._dynamo.utils import counters, dynamo_timed
from torch._inductor import config, exc, metrics
from torch._inductor.codegen.cuda import cuda_env
-from torch._inductor.utils import cache_dir, developer_warning, is_linux
+from torch._inductor.utils import cache_dir, is_linux
from torch._subclasses.fake_tensor import (
extract_tensor_metadata,
FakeTensor,
@@ -2012,7 +2012,7 @@ def custom_op_wrapper(op: str, *args):
class CppCodeCache:
- cache: Dict[str, Union[CDLL, ModuleType]] = {}
+ cache: Dict[str, Callable[[], Union[CDLL, ModuleType]]] = {}
clear = staticmethod(cache.clear)
cpp_compile_command_flags: Dict[str, Any] = {}
@@ -2023,13 +2023,17 @@ class CppCodeCache:
@classmethod
def _load_library(cls, path: str, key: str) -> Union[CDLL, ModuleType]:
try:
- return cls._load_library_inner(path, key)
+ result = cls._load_library_inner(path, key)
+ result.key = key # type: ignore[union-attr]
+ return result
except (ImportError, OSError) as e:
if "gomp" in str(e) and os.path.exists("/usr/lib64/libgomp.so.1"):
# hacky workaround for fbcode/buck
global _libgomp
_libgomp = cdll.LoadLibrary("/usr/lib64/libgomp.so.1")
- return cls._load_library_inner(path, key)
+ result = cls._load_library_inner(path, key)
+ result.key = key # type: ignore[union-attr]
+ return result
if "failed to map segment from shared object" in str(e):
raise OSError(
f"{e}. The most common reason this may occur is if the {tempfile.gettempdir()} folder "
@@ -2040,41 +2044,67 @@ class CppCodeCache:
raise
@classmethod
- def load(cls, source_code: str, cuda: bool = False) -> Union[CDLL, ModuleType]:
- cls.cpp_compile_command_flags.update({"cuda": cuda})
- picked_vec_isa = pick_vec_isa()
- cpp_command = repr(
- cpp_compile_command(
- "i", "o", vec_isa=picked_vec_isa, **cls.cpp_compile_command_flags
- )
- )
+ def load_async(cls, source_code: str, cuda=False, submit_fn=None):
+ compile_command = {
+ **cls.cpp_compile_command_flags,
+ "cuda": cuda,
+ "vec_isa": pick_vec_isa(),
+ }
+ cpp_command = repr(cpp_compile_command("i", "o", **compile_command))
key, input_path = write(source_code, "cpp", extra=cpp_command)
+
if key not in cls.cache:
from filelock import FileLock
- lock_dir = get_lock_dir()
- lock = FileLock(os.path.join(lock_dir, key + ".lock"), timeout=LOCK_TIMEOUT)
- with lock:
- output_path = input_path[:-3] + "so"
- if not os.path.exists(output_path):
- cmd = shlex.split(
- cpp_compile_command(
- input=input_path,
- output=output_path,
- vec_isa=picked_vec_isa,
- **cls.cpp_compile_command_flags,
- )
- )
- compile_file(input_path, output_path, cmd)
- cls.cache[key] = cls._load_library(output_path, key)
- cls.cache[key].key = key # type: ignore[union-attr]
+ lock_path = os.path.join(get_lock_dir(), key + ".lock")
+ output_path = input_path[:-3] + "so"
+ future: Optional[Future[Any]] = None
+ lib = None
+ worker_fn = functools.partial(
+ _worker_compile_cpp,
+ lock_path,
+ input_path,
+ output_path,
+ cpp_compile_command(
+ input=input_path, output=output_path, **compile_command
+ ),
+ )
+
+ def load_fn():
+ nonlocal lib
+ if lib is None:
+ if future is not None:
+ future.result()
+ worker_fn()
+ lib = cls._load_library(output_path, key)
+ assert lib is not None
+ return lib
+
+ if submit_fn is not None:
+ with FileLock(lock_path, timeout=LOCK_TIMEOUT):
+ if not os.path.exists(output_path):
+ future = submit_fn(worker_fn)
+
+ cls.cache[key] = load_fn
return cls.cache[key]
+ @classmethod
+ def load(cls, source_code: str, cuda: bool = False):
+ return cls.load_async(source_code, cuda)()
+
+
+def _worker_compile_cpp(lock_path, input_path, output_path, cmd):
+ from filelock import FileLock
+
+ with FileLock(lock_path, timeout=LOCK_TIMEOUT):
+ if not os.path.exists(output_path):
+ compile_file(input_path, output_path, shlex.split(cmd))
+
# Customized Python binding for cpp kernels
class CppPythonBindingsCodeCache(CppCodeCache):
- cache: Dict[str, Union[CDLL, ModuleType]] = {}
+ cache: Dict[str, Callable[[], Union[CDLL, ModuleType]]] = {}
clear = staticmethod(cache.clear)
cpp_compile_command_flags = {
# kernels have no dependency on libtorch
@@ -2166,12 +2196,13 @@ class CppPythonBindingsCodeCache(CppCodeCache):
return module
@classmethod
- def load_pybinding(
+ def load_pybinding_async(
cls,
argtypes: List[str],
source_code: str,
cuda: bool = False,
num_outputs: int = -1,
+ submit_fn=None,
) -> Any:
"""
Wrap a C++ function in fast Python bindings.
@@ -2199,13 +2230,25 @@ class CppPythonBindingsCodeCache(CppCodeCache):
cls.entry_function,
cls.entry_function,
)
- result = cls.load(source_code + suffix, cuda)
- assert isinstance(result, ModuleType)
- return getattr(result, cls.entry_function)
+ get_result = cls.load_async(source_code + suffix, cuda, submit_fn=submit_fn)
+ result = None
+
+ def future():
+ nonlocal result
+ if result is None:
+ result = get_result()
+ assert isinstance(result, ModuleType)
+ return getattr(result, cls.entry_function)
+
+ return future
+
+ @classmethod
+ def load_pybinding(cls, *args, **kwargs) -> Any:
+ return cls.load_pybinding_async(*args, **kwargs)()
class CppWrapperCodeCache(CppPythonBindingsCodeCache):
- cache: Dict[str, Union[CDLL, ModuleType]] = {}
+ cache: Dict[str, Callable[[], Union[CDLL, ModuleType]]] = {}
clear = staticmethod(cache.clear)
cpp_compile_command_flags = {
"include_pytorch": not config.abi_compatible,
@@ -2266,6 +2309,10 @@ class CppWrapperCodeCache(CppPythonBindingsCodeCache):
)
+def _reload_python_module_in_subproc(key, path):
+ return PyCodeCache.load_by_key_path(key, path)
+
+
class PyCodeCache:
cache: Dict[str, ModuleType] = dict()
linemaps: Dict[str, List[Tuple[Any, ...]]] = dict()
@@ -2318,6 +2365,11 @@ class PyCodeCache:
for k, v in attrs.items():
setattr(mod, k, v)
+ if not (linemap or attrs):
+ mod._reload_in_subproc = functools.partial( # type: ignore[attr-defined]
+ _reload_python_module_in_subproc, key, path
+ )
+
return cls.cache[key]
@classmethod
@@ -2349,11 +2401,25 @@ class PyCodeCache:
return parse_stack_trace(entry)
+def _reload_triton_kernel_in_subproc(reload_module, kernel_name):
+ return TritonCodeCache._mod_to_kernel(reload_module(), kernel_name)
+
+
class TritonCodeCache:
@classmethod
def load(cls, kernel_name: str, source_code: str) -> ModuleType:
mod = PyCodeCache.load(source_code)
- return getattr(mod, kernel_name)
+ return cls._mod_to_kernel(mod, kernel_name)
+
+ @classmethod
+ def _mod_to_kernel(cls, mod, kernel_name):
+ kernel = getattr(mod, kernel_name)
+ kernel._reload_in_subproc = functools.partial(
+ _reload_triton_kernel_in_subproc,
+ mod._reload_in_subproc,
+ kernel_name,
+ )
+ return kernel
def _cuda_compiler() -> Optional[str]:
@@ -2639,6 +2705,7 @@ def caching_device_properties():
device_interface.Worker.get_device_properties()
+@functools.lru_cache(None)
def _set_triton_ptxas_path() -> None:
if os.environ.get("TRITON_PTXAS_PATH") is not None:
return
@@ -2653,54 +2720,50 @@ def _set_triton_ptxas_path() -> None:
warnings.warn(f"{ptxas_path} exists but is not an executable")
-def _worker_compile(
- kernel_name: str,
- source_code: str,
+def _worker_compile_triton(
+ load_kernel: Callable[[], Any],
cc: int,
device: torch.device,
device_interface: Type[DeviceInterface],
-) -> None:
+):
+ _set_triton_ptxas_path()
device_interface.Worker.set_device(device.index)
- kernel = TritonCodeCache.load(kernel_name, source_code)
+ kernel = load_kernel()
kernel.precompile(warm_cache_only_with_cc=cc)
-def _load_kernel(kernel_name: str, source_code: str) -> ModuleType:
- _set_triton_ptxas_path()
- kernel = TritonCodeCache.load(kernel_name, source_code)
- kernel.precompile()
- return kernel
+class CodeCacheFuture:
+ def result(self):
+ raise NotImplementedError()
-class TritonFuture:
+class TritonFuture(CodeCacheFuture):
kernel: ModuleType
def __init__(
self,
- kernel_name: str,
- source_code: str,
- future: Future[Any],
+ kernel: Any,
+ future: Optional[Future[Any]],
) -> None:
- self.kernel_name = kernel_name
- self.source_code = source_code
+ self.kernel = kernel
self.future = future
# @dynamo_utils.dynamo_timed
def result(self) -> ModuleType:
- t0 = time()
- if hasattr(self, "kernel"):
- return self.kernel
- # If the worker failed this will throw an exception.
- self.future.result()
- kernel = self.kernel = _load_kernel(self.kernel_name, self.source_code)
- latency = time() - t0
- if latency > 50:
- developer_warning(
- f"Detected long compilation time of {latency} seconds for kernel name {self.kernel_name}"
- )
- developer_warning(self.source_code)
- del self.kernel_name, self.source_code, self.future
- return kernel
+ if self.future is not None:
+ # If the worker failed this will throw an exception.
+ self.future.result()
+ self.future = None
+ self.kernel.precompile()
+ return self.kernel
+
+
+class LambdaFuture(CodeCacheFuture):
+ def __init__(self, result_fn):
+ self.result_fn = result_fn
+
+ def result(self):
+ return self.result_fn()
# If this process dies abnormally (e.g. segfault)
@@ -2734,10 +2797,21 @@ _pool_set: Set[ProcessPoolExecutor] = set()
def shutdown_compile_workers() -> None:
"""Shut down all outstanding compile-worker pools."""
- global _pool_set
for pool in _pool_set:
pool.shutdown()
+ after_fork()
+
+
+def after_fork():
+ """Reset pools to initial state without shutting them down"""
_pool_set.clear()
+ AsyncCompile.process_pool.cache_clear()
+
+
+try:
+ os.register_at_fork(after_in_child=after_fork)
+except AttributeError:
+ pass # register_at_fork does not exists on windows
class AsyncCompile:
@@ -2812,21 +2886,26 @@ class AsyncCompile:
return task()
return cls.pool().submit(task)
- def triton(
- self, kernel_name: str, source_code: str, device_str: str = "cuda"
- ) -> Union[TritonFuture, ModuleType]:
+ def triton(self, kernel_name: str, source_code: str, device_str: str = "cuda"):
_compile_start()
+ _set_triton_ptxas_path()
+ kernel = TritonCodeCache.load(kernel_name, source_code)
if config.compile_threads > 1:
device_interface = get_interface_for_device(device_str)
device = torch.device(device_str, device_interface.current_device())
cc = device_interface.get_compute_capability(device)
future = self.process_pool().submit(
- _worker_compile, kernel_name, source_code, cc, device, device_interface
+ _worker_compile_triton,
+ kernel._reload_in_subproc,
+ cc,
+ device,
+ device_interface,
)
- return TritonFuture(kernel_name, source_code, future)
+ return TritonFuture(kernel, future)
else:
- return _load_kernel(kernel_name, source_code)
+ kernel.precompile()
+ return kernel
def multi_kernel(self, *args, **kwargs) -> Any:
from torch._inductor.codegen.multi_kernel import MultiKernelCall
@@ -2834,18 +2913,21 @@ class AsyncCompile:
# no need to call this in parallel since the sub-kernels are already parallel tasks
return MultiKernelCall(*args, **kwargs)
- def cpp(self, source_code: str) -> ModuleType:
- def task():
+ def cpp(self, source_code: str):
+ if config.compile_threads <= 1:
return CppCodeCache.load(source_code).kernel
+ else:
+ get_result = CppCodeCache.load_async(source_code, submit_fn=self.submit)
+ return LambdaFuture(lambda: get_result().kernel)
- return self.submit(task)
-
- def cpp_pybinding(self, argtypes: List[str], source_code: str) -> ModuleType:
- return self.submit(
- functools.partial(
- CppPythonBindingsCodeCache.load_pybinding, argtypes, source_code
+ def cpp_pybinding(self, argtypes: List[str], source_code: str):
+ if config.compile_threads <= 1:
+ return CppPythonBindingsCodeCache.load_pybinding(argtypes, source_code)
+ else:
+ get_result = CppPythonBindingsCodeCache.load_pybinding_async(
+ argtypes, source_code, submit_fn=self.submit
)
- )
+ return LambdaFuture(get_result)
def cuda(self, source_code, dst_file_ext):
def task():
@@ -2858,7 +2940,7 @@ class AsyncCompile:
[
value
for key, value in scope.items()
- if isinstance(value, (Future, TritonFuture))
+ if isinstance(value, (Future, CodeCacheFuture))
]
)
pbar = tqdm(
@@ -2871,18 +2953,18 @@ class AsyncCompile:
for key, result in scope.items():
if config.verbose_progress and not isinstance(pbar, _Faketqdm):
pbar.set_postfix_str(key)
- if isinstance(result, (Future, TritonFuture)):
+ if isinstance(result, (Future, CodeCacheFuture)):
scope[key] = result.result()
pbar.update(1)
_compile_end()
-if os.environ.get("TORCH_TNT_IN_USE", "0") == "1":
- # When TorchTNT is used, calling warm_pool() here will cause the
- # compile workers created not being able to be shut down inside
- # shutdown_compile_workers(). This may cause significant QPS drop.
- log.info("Do not call AsyncCompile.warm_pool() because TorchTNT is in use.")
+if (
+ os.environ.get("TORCH_TNT_IN_USE", "0") == "1"
+ or os.environ.get("TORCH_WARM_POOL", "1") != "1"
+):
+ pass
elif sys.version_info >= (3, 12):
log.info("AsyncCompile.warm_pool() is broken on 3.12+.")
else:
|
2.41.0
|
ac8fe46dde91cd17f4a271751113deb5763de3f
|
Sat, 13 Apr 2024 06:36:07 +0000
|
[PATCH 0135/1000] Enable UFMT on all of test/quantization/ao_migration &bc (#123994)
|
Partially addresses #123062 Ran lintrunner on: - test/quantization/ao_migration - test/quantization/bc Detail: ``` $ lintrunner -a --take UFMT --all-files ok No lint issues. Successfully applied all patches. ``` @ezyang Pull Request resolved: https://github.com/pytorch/pytorch/pull/123994 Approved by: https://github.com/ezyang
|
diff --git a/.lintrunner.toml b/.lintrunner.toml
index 4073f1bbff..872b6452ab 100644
--- a/.lintrunner.toml
+++ b/.lintrunner.toml
@@ -1334,13 +1334,6 @@ exclude_patterns = [
'test/profiler/test_profiler.py',
'test/profiler/test_profiler_tree.py',
'test/quantization/__init__.py',
- 'test/quantization/ao_migration/__init__.py',
- 'test/quantization/ao_migration/common.py',
- 'test/quantization/ao_migration/test_ao_migration.py',
- 'test/quantization/ao_migration/test_quantization.py',
- 'test/quantization/ao_migration/test_quantization_fx.py',
- 'test/quantization/bc/__init__.py',
- 'test/quantization/bc/test_backward_compatibility.py',
'test/quantization/core/__init__.py',
'test/quantization/core/experimental/apot_fx_graph_mode_ptq.py',
'test/quantization/core/experimental/apot_fx_graph_mode_qat.py',
diff --git a/test/quantization/ao_migration/common.py b/test/quantization/ao_migration/common.py
index de6e67d35a..30a1034e9b 100644
--- a/test/quantization/ao_migration/common.py
+++ b/test/quantization/ao_migration/common.py
@@ -1,42 +1,52 @@
-from torch.testing._internal.common_utils import TestCase
-
import importlib
from typing import List, Optional
+from torch.testing._internal.common_utils import TestCase
+
+
class AOMigrationTestCase(TestCase):
- def _test_function_import(self, package_name: str, function_list: List[str],
- base: Optional[str] = None, new_package_name: Optional[str] = None):
+ def _test_function_import(
+ self,
+ package_name: str,
+ function_list: List[str],
+ base: Optional[str] = None,
+ new_package_name: Optional[str] = None,
+ ):
r"""Tests individual function list import by comparing the functions
and their hashes."""
if base is None:
- base = 'quantization'
- old_base = 'torch.' + base
- new_base = 'torch.ao.' + base
+ base = "quantization"
+ old_base = "torch." + base
+ new_base = "torch.ao." + base
if new_package_name is None:
new_package_name = package_name
- old_location = importlib.import_module(f'{old_base}.{package_name}')
- new_location = importlib.import_module(f'{new_base}.{new_package_name}')
+ old_location = importlib.import_module(f"{old_base}.{package_name}")
+ new_location = importlib.import_module(f"{new_base}.{new_package_name}")
for fn_name in function_list:
old_function = getattr(old_location, fn_name)
new_function = getattr(new_location, fn_name)
assert old_function == new_function, f"Functions don't match: {fn_name}"
- assert hash(old_function) == hash(new_function), \
- f"Hashes don't match: {old_function}({hash(old_function)}) vs. " \
+ assert hash(old_function) == hash(new_function), (
+ f"Hashes don't match: {old_function}({hash(old_function)}) vs. "
f"{new_function}({hash(new_function)})"
+ )
- def _test_dict_import(self, package_name: str, dict_list: List[str],
- base: Optional[str] = None):
+ def _test_dict_import(
+ self, package_name: str, dict_list: List[str], base: Optional[str] = None
+ ):
r"""Tests individual function list import by comparing the functions
and their hashes."""
if base is None:
- base = 'quantization'
- old_base = 'torch.' + base
- new_base = 'torch.ao.' + base
- old_location = importlib.import_module(f'{old_base}.{package_name}')
- new_location = importlib.import_module(f'{new_base}.{package_name}')
+ base = "quantization"
+ old_base = "torch." + base
+ new_base = "torch.ao." + base
+ old_location = importlib.import_module(f"{old_base}.{package_name}")
+ new_location = importlib.import_module(f"{new_base}.{package_name}")
for dict_name in dict_list:
old_dict = getattr(old_location, dict_name)
new_dict = getattr(new_location, dict_name)
assert old_dict == new_dict, f"Dicts don't match: {dict_name}"
for key in new_dict.keys():
- assert old_dict[key] == new_dict[key], f"Dicts don't match: {dict_name} for key {key}"
+ assert (
+ old_dict[key] == new_dict[key]
+ ), f"Dicts don't match: {dict_name} for key {key}"
diff --git a/test/quantization/ao_migration/test_ao_migration.py b/test/quantization/ao_migration/test_ao_migration.py
index 374fc205e3..020dc6d56d 100644
--- a/test/quantization/ao_migration/test_ao_migration.py
+++ b/test/quantization/ao_migration/test_ao_migration.py
@@ -7,257 +7,261 @@ class TestAOMigrationNNQuantized(AOMigrationTestCase):
def test_functional_import(self):
r"""Tests the migration of the torch.nn.quantized.functional"""
function_list = [
- 'avg_pool2d',
- 'avg_pool3d',
- 'adaptive_avg_pool2d',
- 'adaptive_avg_pool3d',
- 'conv1d',
- 'conv2d',
- 'conv3d',
- 'interpolate',
- 'linear',
- 'max_pool1d',
- 'max_pool2d',
- 'celu',
- 'leaky_relu',
- 'hardtanh',
- 'hardswish',
- 'threshold',
- 'elu',
- 'hardsigmoid',
- 'clamp',
- 'upsample',
- 'upsample_bilinear',
- 'upsample_nearest',
+ "avg_pool2d",
+ "avg_pool3d",
+ "adaptive_avg_pool2d",
+ "adaptive_avg_pool3d",
+ "conv1d",
+ "conv2d",
+ "conv3d",
+ "interpolate",
+ "linear",
+ "max_pool1d",
+ "max_pool2d",
+ "celu",
+ "leaky_relu",
+ "hardtanh",
+ "hardswish",
+ "threshold",
+ "elu",
+ "hardsigmoid",
+ "clamp",
+ "upsample",
+ "upsample_bilinear",
+ "upsample_nearest",
]
- self._test_function_import('functional', function_list, base='nn.quantized')
+ self._test_function_import("functional", function_list, base="nn.quantized")
def test_modules_import(self):
module_list = [
# Modules
- 'BatchNorm2d',
- 'BatchNorm3d',
- 'Conv1d',
- 'Conv2d',
- 'Conv3d',
- 'ConvTranspose1d',
- 'ConvTranspose2d',
- 'ConvTranspose3d',
- 'DeQuantize',
- 'ELU',
- 'Embedding',
- 'EmbeddingBag',
- 'GroupNorm',
- 'Hardswish',
- 'InstanceNorm1d',
- 'InstanceNorm2d',
- 'InstanceNorm3d',
- 'LayerNorm',
- 'LeakyReLU',
- 'Linear',
- 'MaxPool2d',
- 'Quantize',
- 'ReLU6',
- 'Sigmoid',
- 'Softmax',
- 'Dropout',
+ "BatchNorm2d",
+ "BatchNorm3d",
+ "Conv1d",
+ "Conv2d",
+ "Conv3d",
+ "ConvTranspose1d",
+ "ConvTranspose2d",
+ "ConvTranspose3d",
+ "DeQuantize",
+ "ELU",
+ "Embedding",
+ "EmbeddingBag",
+ "GroupNorm",
+ "Hardswish",
+ "InstanceNorm1d",
+ "InstanceNorm2d",
+ "InstanceNorm3d",
+ "LayerNorm",
+ "LeakyReLU",
+ "Linear",
+ "MaxPool2d",
+ "Quantize",
+ "ReLU6",
+ "Sigmoid",
+ "Softmax",
+ "Dropout",
# Wrapper modules
- 'FloatFunctional',
- 'FXFloatFunctional',
- 'QFunctional',
+ "FloatFunctional",
+ "FXFloatFunctional",
+ "QFunctional",
]
- self._test_function_import('modules', module_list, base='nn.quantized')
+ self._test_function_import("modules", module_list, base="nn.quantized")
def test_modules_activation(self):
function_list = [
- 'ReLU6',
- 'Hardswish',
- 'ELU',
- 'LeakyReLU',
- 'Sigmoid',
- 'Softmax',
+ "ReLU6",
+ "Hardswish",
+ "ELU",
+ "LeakyReLU",
+ "Sigmoid",
+ "Softmax",
]
- self._test_function_import('activation', function_list,
- base='nn.quantized.modules')
+ self._test_function_import(
+ "activation", function_list, base="nn.quantized.modules"
+ )
def test_modules_batchnorm(self):
function_list = [
- 'BatchNorm2d',
- 'BatchNorm3d',
+ "BatchNorm2d",
+ "BatchNorm3d",
]
- self._test_function_import('batchnorm', function_list,
- base='nn.quantized.modules')
+ self._test_function_import(
+ "batchnorm", function_list, base="nn.quantized.modules"
+ )
def test_modules_conv(self):
function_list = [
- '_reverse_repeat_padding',
- 'Conv1d',
- 'Conv2d',
- 'Conv3d',
- 'ConvTranspose1d',
- 'ConvTranspose2d',
- 'ConvTranspose3d',
+ "_reverse_repeat_padding",
+ "Conv1d",
+ "Conv2d",
+ "Conv3d",
+ "ConvTranspose1d",
+ "ConvTranspose2d",
+ "ConvTranspose3d",
]
- self._test_function_import('conv', function_list,
- base='nn.quantized.modules')
+ self._test_function_import("conv", function_list, base="nn.quantized.modules")
def test_modules_dropout(self):
function_list = [
- 'Dropout',
+ "Dropout",
]
- self._test_function_import('dropout', function_list,
- base='nn.quantized.modules')
+ self._test_function_import(
+ "dropout", function_list, base="nn.quantized.modules"
+ )
def test_modules_embedding_ops(self):
function_list = [
- 'EmbeddingPackedParams',
- 'Embedding',
- 'EmbeddingBag',
+ "EmbeddingPackedParams",
+ "Embedding",
+ "EmbeddingBag",
]
- self._test_function_import('embedding_ops', function_list,
- base='nn.quantized.modules')
+ self._test_function_import(
+ "embedding_ops", function_list, base="nn.quantized.modules"
+ )
def test_modules_functional_modules(self):
function_list = [
- 'FloatFunctional',
- 'FXFloatFunctional',
- 'QFunctional',
+ "FloatFunctional",
+ "FXFloatFunctional",
+ "QFunctional",
]
- self._test_function_import('functional_modules', function_list,
- base='nn.quantized.modules')
+ self._test_function_import(
+ "functional_modules", function_list, base="nn.quantized.modules"
+ )
def test_modules_linear(self):
function_list = [
- 'Linear',
- 'LinearPackedParams',
+ "Linear",
+ "LinearPackedParams",
]
- self._test_function_import('linear', function_list,
- base='nn.quantized.modules')
+ self._test_function_import("linear", function_list, base="nn.quantized.modules")
def test_modules_normalization(self):
function_list = [
- 'LayerNorm',
- 'GroupNorm',
- 'InstanceNorm1d',
- 'InstanceNorm2d',
- 'InstanceNorm3d',
+ "LayerNorm",
+ "GroupNorm",
+ "InstanceNorm1d",
+ "InstanceNorm2d",
+ "InstanceNorm3d",
]
- self._test_function_import('normalization', function_list,
- base='nn.quantized.modules')
+ self._test_function_import(
+ "normalization", function_list, base="nn.quantized.modules"
+ )
def test_modules_utils(self):
function_list = [
- '_ntuple_from_first',
- '_pair_from_first',
- '_quantize_weight',
- '_hide_packed_params_repr',
- 'WeightedQuantizedModule',
+ "_ntuple_from_first",
+ "_pair_from_first",
+ "_quantize_weight",
+ "_hide_packed_params_repr",
+ "WeightedQuantizedModule",
]
- self._test_function_import('utils', function_list,
- base='nn.quantized.modules')
+ self._test_function_import("utils", function_list, base="nn.quantized.modules")
def test_import_nn_quantized_dynamic_import(self):
module_list = [
# Modules
- 'Linear',
- 'LSTM',
- 'GRU',
- 'LSTMCell',
- 'RNNCell',
- 'GRUCell',
- 'Conv1d',
- 'Conv2d',
- 'Conv3d',
- 'ConvTranspose1d',
- 'ConvTranspose2d',
- 'ConvTranspose3d',
+ "Linear",
+ "LSTM",
+ "GRU",
+ "LSTMCell",
+ "RNNCell",
+ "GRUCell",
+ "Conv1d",
+ "Conv2d",
+ "Conv3d",
+ "ConvTranspose1d",
+ "ConvTranspose2d",
+ "ConvTranspose3d",
]
- self._test_function_import('dynamic', module_list, base='nn.quantized')
+ self._test_function_import("dynamic", module_list, base="nn.quantized")
def test_import_nn_quantizable_activation(self):
module_list = [
# Modules
- 'MultiheadAttention',
+ "MultiheadAttention",
]
- self._test_function_import('activation', module_list, base='nn.quantizable.modules')
+ self._test_function_import(
+ "activation", module_list, base="nn.quantizable.modules"
+ )
def test_import_nn_quantizable_rnn(self):
module_list = [
# Modules
- 'LSTM',
- 'LSTMCell',
+ "LSTM",
+ "LSTMCell",
]
- self._test_function_import('rnn', module_list, base='nn.quantizable.modules')
+ self._test_function_import("rnn", module_list, base="nn.quantizable.modules")
def test_import_nn_qat_conv(self):
module_list = [
- 'Conv1d',
- 'Conv2d',
- 'Conv3d',
+ "Conv1d",
+ "Conv2d",
+ "Conv3d",
]
- self._test_function_import('conv', module_list, base='nn.qat.modules')
+ self._test_function_import("conv", module_list, base="nn.qat.modules")
def test_import_nn_qat_embedding_ops(self):
module_list = [
- 'Embedding',
- 'EmbeddingBag',
+ "Embedding",
+ "EmbeddingBag",
]
- self._test_function_import('embedding_ops', module_list, base='nn.qat.modules')
+ self._test_function_import("embedding_ops", module_list, base="nn.qat.modules")
def test_import_nn_qat_linear(self):
module_list = [
- 'Linear',
+ "Linear",
]
- self._test_function_import('linear', module_list, base='nn.qat.modules')
+ self._test_function_import("linear", module_list, base="nn.qat.modules")
def test_import_nn_qat_dynamic_linear(self):
module_list = [
- 'Linear',
+ "Linear",
]
- self._test_function_import('linear', module_list, base='nn.qat.dynamic.modules')
+ self._test_function_import("linear", module_list, base="nn.qat.dynamic.modules")
class TestAOMigrationNNIntrinsic(AOMigrationTestCase):
def test_modules_import_nn_intrinsic(self):
module_list = [
# Modules
- '_FusedModule',
- 'ConvBn1d',
- 'ConvBn2d',
- 'ConvBn3d',
- 'ConvBnReLU1d',
- 'ConvBnReLU2d',
- 'ConvBnReLU3d',
- 'ConvReLU1d',
- 'ConvReLU2d',
- 'ConvReLU3d',
- 'LinearReLU',
- 'BNReLU2d',
- 'BNReLU3d',
- 'LinearBn1d',
+ "_FusedModule",
+ "ConvBn1d",
+ "ConvBn2d",
+ "ConvBn3d",
+ "ConvBnReLU1d",
+ "ConvBnReLU2d",
+ "ConvBnReLU3d",
+ "ConvReLU1d",
+ "ConvReLU2d",
+ "ConvReLU3d",
+ "LinearReLU",
+ "BNReLU2d",
+ "BNReLU3d",
+ "LinearBn1d",
]
- self._test_function_import('intrinsic', module_list, base='nn')
+ self._test_function_import("intrinsic", module_list, base="nn")
def test_modules_nn_intrinsic_fused(self):
function_list = [
- '_FusedModule',
- 'ConvBn1d',
- 'ConvBn2d',
- 'ConvBn3d',
- 'ConvBnReLU1d',
- 'ConvBnReLU2d',
- 'ConvBnReLU3d',
- 'ConvReLU1d',
- 'ConvReLU2d',
- 'ConvReLU3d',
- 'LinearReLU',
- 'BNReLU2d',
- 'BNReLU3d',
- 'LinearBn1d',
+ "_FusedModule",
+ "ConvBn1d",
+ "ConvBn2d",
+ "ConvBn3d",
+ "ConvBnReLU1d",
+ "ConvBnReLU2d",
+ "ConvBnReLU3d",
+ "ConvReLU1d",
+ "ConvReLU2d",
+ "ConvReLU3d",
+ "LinearReLU",
+ "BNReLU2d",
+ "BNReLU3d",
+ "LinearBn1d",
]
- self._test_function_import('fused', function_list,
- base='nn.intrinsic.modules')
+ self._test_function_import("fused", function_list, base="nn.intrinsic.modules")
def test_modules_import_nn_intrinsic_qat(self):
module_list = [
@@ -275,76 +279,83 @@ class TestAOMigrationNNIntrinsic(AOMigrationTestCase):
"update_bn_stats",
"freeze_bn_stats",
]
- self._test_function_import('qat', module_list, base='nn.intrinsic')
+ self._test_function_import("qat", module_list, base="nn.intrinsic")
def test_modules_intrinsic_qat_conv_fused(self):
function_list = [
- 'ConvBn1d',
- 'ConvBnReLU1d',
- 'ConvReLU1d',
- 'ConvBn2d',
- 'ConvBnReLU2d',
- 'ConvReLU2d',
- 'ConvBn3d',
- 'ConvBnReLU3d',
- 'ConvReLU3d',
- 'update_bn_stats',
- 'freeze_bn_stats'
+ "ConvBn1d",
+ "ConvBnReLU1d",
+ "ConvReLU1d",
+ "ConvBn2d",
+ "ConvBnReLU2d",
+ "ConvReLU2d",
+ "ConvBn3d",
+ "ConvBnReLU3d",
+ "ConvReLU3d",
+ "update_bn_stats",
+ "freeze_bn_stats",
]
- self._test_function_import('conv_fused', function_list,
- base='nn.intrinsic.qat.modules')
+ self._test_function_import(
+ "conv_fused", function_list, base="nn.intrinsic.qat.modules"
+ )
def test_modules_intrinsic_qat_linear_fused(self):
function_list = [
- 'LinearBn1d',
+ "LinearBn1d",
]
- self._test_function_import('linear_fused', function_list,
- base='nn.intrinsic.qat.modules')
+ self._test_function_import(
+ "linear_fused", function_list, base="nn.intrinsic.qat.modules"
+ )
def test_modules_intrinsic_qat_linear_relu(self):
function_list = [
- 'LinearReLU',
+ "LinearReLU",
]
- self._test_function_import('linear_relu', function_list,
- base='nn.intrinsic.qat.modules')
+ self._test_function_import(
+ "linear_relu", function_list, base="nn.intrinsic.qat.modules"
+ )
def test_modules_import_nn_intrinsic_quantized(self):
module_list = [
- 'BNReLU2d',
- 'BNReLU3d',
- 'ConvReLU1d',
- 'ConvReLU2d',
- 'ConvReLU3d',
- 'LinearReLU',
+ "BNReLU2d",
+ "BNReLU3d",
+ "ConvReLU1d",
+ "ConvReLU2d",
+ "ConvReLU3d",
+ "LinearReLU",
]
- self._test_function_import('quantized', module_list, base='nn.intrinsic')
+ self._test_function_import("quantized", module_list, base="nn.intrinsic")
def test_modules_intrinsic_quantized_bn_relu(self):
function_list = [
- 'BNReLU2d',
- 'BNReLU3d',
+ "BNReLU2d",
+ "BNReLU3d",
]
- self._test_function_import('bn_relu', function_list,
- base='nn.intrinsic.quantized.modules')
+ self._test_function_import(
+ "bn_relu", function_list, base="nn.intrinsic.quantized.modules"
+ )
def test_modules_intrinsic_quantized_conv_relu(self):
function_list = [
- 'ConvReLU1d',
- 'ConvReLU2d',
- 'ConvReLU3d',
+ "ConvReLU1d",
+ "ConvReLU2d",
+ "ConvReLU3d",
]
- self._test_function_import('conv_relu', function_list,
- base='nn.intrinsic.quantized.modules')
+ self._test_function_import(
+ "conv_relu", function_list, base="nn.intrinsic.quantized.modules"
+ )
def test_modules_intrinsic_quantized_linear_relu(self):
function_list = [
- 'LinearReLU',
+ "LinearReLU",
]
- self._test_function_import('linear_relu', function_list,
- base='nn.intrinsic.quantized.modules')
+ self._test_function_import(
+ "linear_relu", function_list, base="nn.intrinsic.quantized.modules"
+ )
def test_modules_no_import_nn_intrinsic_quantized_dynamic(self):
# TODO(future PR): generalize this
import torch
+
_ = torch.ao.nn.intrinsic.quantized.dynamic
_ = torch.nn.intrinsic.quantized.dynamic
diff --git a/test/quantization/ao_migration/test_quantization.py b/test/quantization/ao_migration/test_quantization.py
index 356ab4da0e..3d416f3b67 100644
--- a/test/quantization/ao_migration/test_quantization.py
+++ b/test/quantization/ao_migration/test_quantization.py
@@ -7,102 +7,103 @@ class TestAOMigrationQuantization(AOMigrationTestCase):
r"""Modules and functions related to the
`torch/quantization` migration to `torch/ao/quantization`.
"""
+
def test_function_import_quantize(self):
function_list = [
- '_convert',
- '_observer_forward_hook',
- '_propagate_qconfig_helper',
- '_remove_activation_post_process',
- '_remove_qconfig',
- '_add_observer_',
- 'add_quant_dequant',
- 'convert',
- '_get_observer_dict',
- '_get_unique_devices_',
- '_is_activation_post_process',
- 'prepare',
- 'prepare_qat',
- 'propagate_qconfig_',
- 'quantize',
- 'quantize_dynamic',
- 'quantize_qat',
- '_register_activation_post_process_hook',
- 'swap_module',
+ "_convert",
+ "_observer_forward_hook",
+ "_propagate_qconfig_helper",
+ "_remove_activation_post_process",
+ "_remove_qconfig",
+ "_add_observer_",
+ "add_quant_dequant",
+ "convert",
+ "_get_observer_dict",
+ "_get_unique_devices_",
+ "_is_activation_post_process",
+ "prepare",
+ "prepare_qat",
+ "propagate_qconfig_",
+ "quantize",
+ "quantize_dynamic",
+ "quantize_qat",
+ "_register_activation_post_process_hook",
+ "swap_module",
]
- self._test_function_import('quantize', function_list)
+ self._test_function_import("quantize", function_list)
def test_function_import_stubs(self):
function_list = [
- 'QuantStub',
- 'DeQuantStub',
- 'QuantWrapper',
+ "QuantStub",
+ "DeQuantStub",
+ "QuantWrapper",
]
- self._test_function_import('stubs', function_list)
+ self._test_function_import("stubs", function_list)
def test_function_import_quantize_jit(self):
function_list = [
- '_check_is_script_module',
- '_check_forward_method',
- 'script_qconfig',
- 'script_qconfig_dict',
- 'fuse_conv_bn_jit',
- '_prepare_jit',
- 'prepare_jit',
- 'prepare_dynamic_jit',
- '_convert_jit',
- 'convert_jit',
- 'convert_dynamic_jit',
- '_quantize_jit',
- 'quantize_jit',
- 'quantize_dynamic_jit',
+ "_check_is_script_module",
+ "_check_forward_method",
+ "script_qconfig",
+ "script_qconfig_dict",
+ "fuse_conv_bn_jit",
+ "_prepare_jit",
+ "prepare_jit",
+ "prepare_dynamic_jit",
+ "_convert_jit",
+ "convert_jit",
+ "convert_dynamic_jit",
+ "_quantize_jit",
+ "quantize_jit",
+ "quantize_dynamic_jit",
]
- self._test_function_import('quantize_jit', function_list)
+ self._test_function_import("quantize_jit", function_list)
def test_function_import_fake_quantize(self):
function_list = [
- '_is_per_channel',
- '_is_per_tensor',
- '_is_symmetric_quant',
- 'FakeQuantizeBase',
- 'FakeQuantize',
- 'FixedQParamsFakeQuantize',
- 'FusedMovingAvgObsFakeQuantize',
- 'default_fake_quant',
- 'default_weight_fake_quant',
- 'default_fixed_qparams_range_neg1to1_fake_quant',
- 'default_fixed_qparams_range_0to1_fake_quant',
- 'default_per_channel_weight_fake_quant',
- 'default_histogram_fake_quant',
- 'default_fused_act_fake_quant',
- 'default_fused_wt_fake_quant',
- 'default_fused_per_channel_wt_fake_quant',
- '_is_fake_quant_script_module',
- 'disable_fake_quant',
- 'enable_fake_quant',
- 'disable_observer',
- 'enable_observer',
+ "_is_per_channel",
+ "_is_per_tensor",
+ "_is_symmetric_quant",
+ "FakeQuantizeBase",
+ "FakeQuantize",
+ "FixedQParamsFakeQuantize",
+ "FusedMovingAvgObsFakeQuantize",
+ "default_fake_quant",
+ "default_weight_fake_quant",
+ "default_fixed_qparams_range_neg1to1_fake_quant",
+ "default_fixed_qparams_range_0to1_fake_quant",
+ "default_per_channel_weight_fake_quant",
+ "default_histogram_fake_quant",
+ "default_fused_act_fake_quant",
+ "default_fused_wt_fake_quant",
+ "default_fused_per_channel_wt_fake_quant",
+ "_is_fake_quant_script_module",
+ "disable_fake_quant",
+ "enable_fake_quant",
+ "disable_observer",
+ "enable_observer",
]
- self._test_function_import('fake_quantize', function_list)
+ self._test_function_import("fake_quantize", function_list)
def test_function_import_fuse_modules(self):
function_list = [
- '_fuse_modules',
- '_get_module',
- '_set_module',
- 'fuse_conv_bn',
- 'fuse_conv_bn_relu',
- 'fuse_known_modules',
- 'fuse_modules',
- 'get_fuser_method',
+ "_fuse_modules",
+ "_get_module",
+ "_set_module",
+ "fuse_conv_bn",
+ "fuse_conv_bn_relu",
+ "fuse_known_modules",
+ "fuse_modules",
+ "get_fuser_method",
]
- self._test_function_import('fuse_modules', function_list)
+ self._test_function_import("fuse_modules", function_list)
def test_function_import_quant_type(self):
function_list = [
- 'QuantType',
- '_get_quant_type_to_str',
+ "QuantType",
+ "_get_quant_type_to_str",
]
- self._test_function_import('quant_type', function_list)
+ self._test_function_import("quant_type", function_list)
def test_function_import_observer(self):
function_list = [
@@ -133,7 +134,7 @@ class TestAOMigrationQuantization(AOMigrationTestCase):
"default_dynamic_quant_observer",
"default_float_qparams_observer",
]
- self._test_function_import('observer', function_list)
+ self._test_function_import("observer", function_list)
def test_function_import_qconfig(self):
function_list = [
@@ -156,9 +157,9 @@ class TestAOMigrationQuantization(AOMigrationTestCase):
"_assert_valid_qconfig",
"QConfigAny",
"_add_module_to_qconfig_obs_ctr",
- "qconfig_equals"
+ "qconfig_equals",
]
- self._test_function_import('qconfig', function_list)
+ self._test_function_import("qconfig", function_list)
def test_function_import_quantization_mappings(self):
function_list = [
@@ -184,8 +185,8 @@ class TestAOMigrationQuantization(AOMigrationTestCase):
"DEFAULT_FLOAT_TO_QUANTIZED_OPERATOR_MAPPINGS",
"DEFAULT_MODULE_TO_ACT_POST_PROCESS",
]
- self._test_function_import('quantization_mappings', function_list)
- self._test_dict_import('quantization_mappings', dict_list)
+ self._test_function_import("quantization_mappings", function_list)
+ self._test_dict_import("quantization_mappings", dict_list)
def test_function_import_fuser_method_mappings(self):
function_list = [
@@ -194,29 +195,27 @@ class TestAOMigrationQuantization(AOMigrationTestCase):
"fuse_linear_bn",
"get_fuser_method",
]
- dict_list = [
- "_DEFAULT_OP_LIST_TO_FUSER_METHOD"
- ]
- self._test_function_import('fuser_method_mappings', function_list)
- self._test_dict_import('fuser_method_mappings', dict_list)
+ dict_list = ["_DEFAULT_OP_LIST_TO_FUSER_METHOD"]
+ self._test_function_import("fuser_method_mappings", function_list)
+ self._test_dict_import("fuser_method_mappings", dict_list)
def test_function_import_utils(self):
function_list = [
- 'activation_dtype',
- 'activation_is_int8_quantized',
- 'activation_is_statically_quantized',
- 'calculate_qmin_qmax',
- 'check_min_max_valid',
- 'get_combined_dict',
- 'get_qconfig_dtypes',
- 'get_qparam_dict',
- 'get_quant_type',
- 'get_swapped_custom_module_class',
- 'getattr_from_fqn',
- 'is_per_channel',
- 'is_per_tensor',
- 'weight_dtype',
- 'weight_is_quantized',
- 'weight_is_statically_quantized',
+ "activation_dtype",
+ "activation_is_int8_quantized",
+ "activation_is_statically_quantized",
+ "calculate_qmin_qmax",
+ "check_min_max_valid",
+ "get_combined_dict",
+ "get_qconfig_dtypes",
+ "get_qparam_dict",
+ "get_quant_type",
+ "get_swapped_custom_module_class",
+ "getattr_from_fqn",
+ "is_per_channel",
+ "is_per_tensor",
+ "weight_dtype",
+ "weight_is_quantized",
+ "weight_is_statically_quantized",
]
- self._test_function_import('utils', function_list)
+ self._test_function_import("utils", function_list)
diff --git a/test/quantization/ao_migration/test_quantization_fx.py b/test/quantization/ao_migration/test_quantization_fx.py
index 1c4d30a391..25b3328c8f 100644
--- a/test/quantization/ao_migration/test_quantization_fx.py
+++ b/test/quantization/ao_migration/test_quantization_fx.py
@@ -2,144 +2,133 @@
from .common import AOMigrationTestCase
+
class TestAOMigrationQuantizationFx(AOMigrationTestCase):
def test_function_import_quantize_fx(self):
function_list = [
- '_check_is_graph_module',
- '_swap_ff_with_fxff',
- '_fuse_fx',
- 'QuantizationTracer',
- '_prepare_fx',
- '_prepare_standalone_module_fx',
- 'fuse_fx',
- 'Scope',
- 'ScopeContextManager',
- 'prepare_fx',
- 'prepare_qat_fx',
- '_convert_fx',
- 'convert_fx',
- '_convert_standalone_module_fx',
+ "_check_is_graph_module",
+ "_swap_ff_with_fxff",
+ "_fuse_fx",
+ "QuantizationTracer",
+ "_prepare_fx",
+ "_prepare_standalone_module_fx",
+ "fuse_fx",
+ "Scope",
+ "ScopeContextManager",
+ "prepare_fx",
+ "prepare_qat_fx",
+ "_convert_fx",
+ "convert_fx",
+ "_convert_standalone_module_fx",
]
- self._test_function_import('quantize_fx', function_list)
+ self._test_function_import("quantize_fx", function_list)
def test_function_import_fx(self):
function_list = [
- 'prepare',
- 'convert',
- 'fuse',
+ "prepare",
+ "convert",
+ "fuse",
]
- self._test_function_import('fx', function_list)
+ self._test_function_import("fx", function_list)
def test_function_import_fx_graph_module(self):
function_list = [
- 'FusedGraphModule',
- 'ObservedGraphModule',
- '_is_observed_module',
- 'ObservedStandaloneGraphModule',
- '_is_observed_standalone_module',
- 'QuantizedGraphModule'
+ "FusedGraphModule",
+ "ObservedGraphModule",
+ "_is_observed_module",
+ "ObservedStandaloneGraphModule",
+ "_is_observed_standalone_module",
+ "QuantizedGraphModule",
]
- self._test_function_import('fx.graph_module', function_list)
+ self._test_function_import("fx.graph_module", function_list)
def test_function_import_fx_pattern_utils(self):
function_list = [
- 'QuantizeHandler',
- '_register_fusion_pattern',
- 'get_default_fusion_patterns',
- '_register_quant_pattern',
- 'get_default_quant_patterns',
- 'get_default_output_activation_post_process_map'
+ "QuantizeHandler",
+ "_register_fusion_pattern",
+ "get_default_fusion_patterns",
+ "_register_quant_pattern",
+ "get_default_quant_patterns",
+ "get_default_output_activation_post_process_map",
]
- self._test_function_import('fx.pattern_utils', function_list)
+ self._test_function_import("fx.pattern_utils", function_list)
def test_function_import_fx_equalize(self):
function_list = [
- 'reshape_scale',
- '_InputEqualizationObserver',
- '_WeightEqualizationObserver',
- 'calculate_equalization_scale',
- 'EqualizationQConfig',
- 'input_equalization_observer',
- 'weight_equalization_observer',
- 'default_equalization_qconfig',
- 'fused_module_supports_equalization',
- 'nn_module_supports_equalization',
- 'node_supports_equalization',
- 'is_equalization_observer',
- 'get_op_node_and_weight_eq_obs',
- 'maybe_get_weight_eq_obs_node',
- 'maybe_get_next_input_eq_obs',
- 'maybe_get_next_equalization_scale',
- 'scale_input_observer',
- 'scale_weight_node',
- 'scale_weight_functional',
- 'clear_weight_quant_obs_node',
- 'remove_node',
- 'update_obs_for_equalization',
- 'convert_eq_obs',
- '_convert_equalization_ref',
- 'get_layer_sqnr_dict',
- 'get_equalization_qconfig_dict'
+ "reshape_scale",
+ "_InputEqualizationObserver",
+ "_WeightEqualizationObserver",
+ "calculate_equalization_scale",
+ "EqualizationQConfig",
+ "input_equalization_observer",
+ "weight_equalization_observer",
+ "default_equalization_qconfig",
+ "fused_module_supports_equalization",
+ "nn_module_supports_equalization",
+ "node_supports_equalization",
+ "is_equalization_observer",
+ "get_op_node_and_weight_eq_obs",
+ "maybe_get_weight_eq_obs_node",
+ "maybe_get_next_input_eq_obs",
+ "maybe_get_next_equalization_scale",
+ "scale_input_observer",
+ "scale_weight_node",
+ "scale_weight_functional",
+ "clear_weight_quant_obs_node",
+ "remove_node",
+ "update_obs_for_equalization",
+ "convert_eq_obs",
+ "_convert_equalization_ref",
+ "get_layer_sqnr_dict",
+ "get_equalization_qconfig_dict",
]
- self._test_function_import('fx._equalize', function_list)
+ self._test_function_import("fx._equalize", function_list)
def test_function_import_fx_quantization_patterns(self):
function_list = [
- 'QuantizeHandler',
- 'BinaryOpQuantizeHandler',
- 'CatQuantizeHandler',
- 'ConvReluQuantizeHandler',
- 'LinearReLUQuantizeHandler',
- 'BatchNormQuantizeHandler',
- 'EmbeddingQuantizeHandler',
- 'RNNDynamicQuantizeHandler',
- 'DefaultNodeQuantizeHandler',
- 'FixedQParamsOpQuantizeHandler',
- 'CopyNodeQuantizeHandler',
- 'CustomModuleQuantizeHandler',
- 'GeneralTensorShapeOpQuantizeHandler',
- 'StandaloneModuleQuantizeHandler'
+ "QuantizeHandler",
+ "BinaryOpQuantizeHandler",
+ "CatQuantizeHandler",
+ "ConvReluQuantizeHandler",
+ "LinearReLUQuantizeHandler",
+ "BatchNormQuantizeHandler",
+ "EmbeddingQuantizeHandler",
+ "RNNDynamicQuantizeHandler",
+ "DefaultNodeQuantizeHandler",
+ "FixedQParamsOpQuantizeHandler",
+ "CopyNodeQuantizeHandler",
+ "CustomModuleQuantizeHandler",
+ "GeneralTensorShapeOpQuantizeHandler",
+ "StandaloneModuleQuantizeHandler",
]
self._test_function_import(
- 'fx.quantization_patterns',
+ "fx.quantization_patterns",
function_list,
- new_package_name='fx.quantize_handler',
+ new_package_name="fx.quantize_handler",
)
def test_function_import_fx_match_utils(self):
- function_list = [
- '_MatchResult',
- 'MatchAllNode',
- '_is_match',
- '_find_matches'
- ]
- self._test_function_import('fx.match_utils', function_list)
+ function_list = ["_MatchResult", "MatchAllNode", "_is_match", "_find_matches"]
+ self._test_function_import("fx.match_utils", function_list)
def test_function_import_fx_prepare(self):
- function_list = [
- 'prepare'
- ]
- self._test_function_import('fx.prepare', function_list)
+ function_list = ["prepare"]
+ self._test_function_import("fx.prepare", function_list)
def test_function_import_fx_convert(self):
- function_list = [
- 'convert'
- ]
- self._test_function_import('fx.convert', function_list)
+ function_list = ["convert"]
+ self._test_function_import("fx.convert", function_list)
def test_function_import_fx_fuse(self):
- function_list = ['fuse']
- self._test_function_import('fx.fuse', function_list)
+ function_list = ["fuse"]
+ self._test_function_import("fx.fuse", function_list)
def test_function_import_fx_fusion_patterns(self):
- function_list = [
- 'FuseHandler',
- 'DefaultFuseHandler'
- ]
+ function_list = ["FuseHandler", "DefaultFuseHandler"]
self._test_function_import(
- 'fx.fusion_patterns',
+ "fx.fusion_patterns",
function_list,
- new_package_name='fx.fuse_handler',
+ new_package_name="fx.fuse_handler",
)
# we removed matching test for torch.quantization.fx.quantization_types
@@ -149,15 +138,15 @@ class TestAOMigrationQuantizationFx(AOMigrationTestCase):
def test_function_import_fx_utils(self):
function_list = [
- 'get_custom_module_class_keys',
- 'get_linear_prepack_op_for_dtype',
- 'get_qconv_prepack_op',
- 'get_new_attr_name_with_prefix',
- 'graph_module_from_producer_nodes',
- 'assert_and_get_unique_device',
- 'create_getattr_from_value',
- 'all_node_args_have_no_tensors',
- 'get_non_observable_arg_indexes_and_types',
- 'maybe_get_next_module'
+ "get_custom_module_class_keys",
+ "get_linear_prepack_op_for_dtype",
+ "get_qconv_prepack_op",
+ "get_new_attr_name_with_prefix",
+ "graph_module_from_producer_nodes",
+ "assert_and_get_unique_device",
+ "create_getattr_from_value",
+ "all_node_args_have_no_tensors",
+ "get_non_observable_arg_indexes_and_types",
+ "maybe_get_next_module",
]
- self._test_function_import('fx.utils', function_list)
+ self._test_function_import("fx.utils", function_list)
diff --git a/test/quantization/bc/test_backward_compatibility.py b/test/quantization/bc/test_backward_compatibility.py
index 8387a5a40f..028709231c 100644
--- a/test/quantization/bc/test_backward_compatibility.py
+++ b/test/quantization/bc/test_backward_compatibility.py
@@ -1,32 +1,39 @@
# Owner(s): ["oncall: quantization"]
-import sys
import os
+import sys
import unittest
from typing import Set
# torch
import torch
-import torch.nn as nn
+import torch.ao.nn.intrinsic.quantized as nniq
import torch.ao.nn.quantized as nnq
import torch.ao.nn.quantized.dynamic as nnqd
-import torch.ao.nn.intrinsic.quantized as nniq
+import torch.ao.quantization.quantize_fx as quantize_fx
+import torch.nn as nn
+
+from torch.ao.quantization import MinMaxObserver, PerChannelMinMaxObserver
from torch.fx import GraphModule
+from torch.testing._internal.common_quantization import skipIfNoFBGEMM
+from torch.testing._internal.common_quantized import (
+ override_qengines,
+ qengine_is_fbgemm,
+)
# Testing utils
-from torch.testing._internal.common_utils import TestCase, IS_AVX512_VNNI_SUPPORTED
-from torch.testing._internal.common_quantized import override_qengines, qengine_is_fbgemm
-from torch.testing._internal.common_quantization import skipIfNoFBGEMM
-from torch.testing._internal.quantization_torch_package_models import LinearReluFunctional
+from torch.testing._internal.common_utils import IS_AVX512_VNNI_SUPPORTED, TestCase
+from torch.testing._internal.quantization_torch_package_models import (
+ LinearReluFunctional,
+)
-from torch.ao.quantization import MinMaxObserver, PerChannelMinMaxObserver
-import torch.ao.quantization.quantize_fx as quantize_fx
def remove_prefix(text, prefix):
if text.startswith(prefix):
- return text[len(prefix):]
+ return text[len(prefix) :]
return text
+
def get_filenames(self, subname):
# NB: we take __file__ from the module that defined the test
# class, so we place the expect directory where the test script
@@ -34,9 +41,7 @@ def get_filenames(self, subname):
module_id = self.__class__.__module__
munged_id = remove_prefix(self.id(), module_id + ".")
test_file = os.path.realpath(sys.modules[module_id].__file__)
- base_name = os.path.join(os.path.dirname(test_file),
- "../serialized",
- munged_id)
+ base_name = os.path.join(os.path.dirname(test_file), "../serialized", munged_id)
subname_output = ""
if subname:
@@ -51,32 +56,59 @@ def get_filenames(self, subname):
package_file = base_name + ".package.pt"
get_attr_targets_file = base_name + ".get_attr_targets.pt"
- return input_file, state_dict_file, scripted_module_file, \
- traced_module_file, expected_file, package_file, get_attr_targets_file
+ return (
+ input_file,
+ state_dict_file,
+ scripted_module_file,
+ traced_module_file,
+ expected_file,
+ package_file,
+ get_attr_targets_file,
+ )
class TestSerialization(TestCase):
- """ Test backward compatiblity for serialization and numerics
- """
+ """Test backward compatiblity for serialization and numerics"""
+
# Copy and modified from TestCase.assertExpected
- def _test_op(self, qmodule, subname=None, input_size=None, input_quantized=True,
- generate=False, prec=None, new_zipfile_serialization=False):
- r""" Test quantized modules serialized previously can be loaded
+ def _test_op(
+ self,
+ qmodule,
+ subname=None,
+ input_size=None,
+ input_quantized=True,
+ generate=False,
+ prec=None,
+ new_zipfile_serialization=False,
+ ):
+ r"""Test quantized modules serialized previously can be loaded
with current code, make sure we don't break backward compatibility for the
serialization of quantized modules
"""
- input_file, state_dict_file, scripted_module_file, traced_module_file, \
- expected_file, _package_file, _get_attr_targets_file = \
- get_filenames(self, subname)
+ (
+ input_file,
+ state_dict_file,
+ scripted_module_file,
+ traced_module_file,
+ expected_file,
+ _package_file,
+ _get_attr_targets_file,
+ ) = get_filenames(self, subname)
# only generate once.
if generate and qengine_is_fbgemm():
input_tensor = torch.rand(*input_size).float()
if input_quantized:
- input_tensor = torch.quantize_per_tensor(input_tensor, 0.5, 2, torch.quint8)
+ input_tensor = torch.quantize_per_tensor(
+ input_tensor, 0.5, 2, torch.quint8
+ )
torch.save(input_tensor, input_file)
# Temporary fix to use _use_new_zipfile_serialization until #38379 lands.
- torch.save(qmodule.state_dict(), state_dict_file, _use_new_zipfile_serialization=new_zipfile_serialization)
+ torch.save(
+ qmodule.state_dict(),
+ state_dict_file,
+ _use_new_zipfile_serialization=new_zipfile_serialization,
+ )
torch.jit.save(torch.jit.script(qmodule), scripted_module_file)
torch.jit.save(torch.jit.trace(qmodule, input_tensor), traced_module_file)
torch.save(qmodule(input_tensor), expected_file)
@@ -90,8 +122,16 @@ class TestSerialization(TestCase):
self.assertEqual(qmodule_scripted(input_tensor), expected, atol=prec)
self.assertEqual(qmodule_traced(input_tensor), expected, atol=prec)
- def _test_op_graph(self, qmodule, subname=None, input_size=None, input_quantized=True,
- generate=False, prec=None, new_zipfile_serialization=False):
+ def _test_op_graph(
+ self,
+ qmodule,
+ subname=None,
+ input_size=None,
+ input_quantized=True,
+ generate=False,
+ prec=None,
+ new_zipfile_serialization=False,
+ ):
r"""
Input: a floating point module
@@ -101,9 +141,15 @@ class TestSerialization(TestCase):
If generate == False, traces and scripts the module and quantizes the results with
PTQ, and compares to saved results.
"""
- input_file, state_dict_file, scripted_module_file, traced_module_file, \
- expected_file, _package_file, _get_attr_targets_file = \
- get_filenames(self, subname)
+ (
+ input_file,
+ state_dict_file,
+ scripted_module_file,
+ traced_module_file,
+ expected_file,
+ _package_file,
+ _get_attr_targets_file,
+ ) = get_filenames(self, subname)
# only generate once.
if generate and qengine_is_fbgemm():
@@ -119,11 +165,13 @@ class TestSerialization(TestCase):
def _eval_fn(model, data):
model(data)
- qconfig_dict = {'': torch.ao.quantization.default_qconfig}
+ qconfig_dict = {"": torch.ao.quantization.default_qconfig}
scripted_q = torch.ao.quantization.quantize_jit(
- scripted, qconfig_dict, _eval_fn, [input_tensor])
+ scripted, qconfig_dict, _eval_fn, [input_tensor]
+ )
traced_q = torch.ao.quantization.quantize_jit(
- traced, qconfig_dict, _eval_fn, [input_tensor])
+ traced, qconfig_dict, _eval_fn, [input_tensor]
+ )
torch.jit.save(scripted_q, scripted_module_file)
torch.jit.save(traced_q, traced_module_file)
@@ -136,12 +184,21 @@ class TestSerialization(TestCase):
self.assertEqual(qmodule_scripted(input_tensor), expected, atol=prec)
self.assertEqual(qmodule_traced(input_tensor), expected, atol=prec)
- def _test_obs(self, obs, input_size, subname=None, generate=False, check_numerics=True):
+ def _test_obs(
+ self, obs, input_size, subname=None, generate=False, check_numerics=True
+ ):
"""
Test observer code can be loaded from state_dict.
"""
- input_file, state_dict_file, _, traced_module_file, expected_file, \
- _package_file, _get_attr_targets_file = get_filenames(self, None)
+ (
+ input_file,
+ state_dict_file,
+ _,
+ traced_module_file,
+ expected_file,
+ _package_file,
+ _get_attr_targets_file,
+ ) = get_filenames(self, None)
if generate:
input_tensor = torch.rand(*input_size).float()
torch.save(input_tensor, input_file)
@@ -159,12 +216,18 @@ class TestSerialization(TestCase):
Verifies that files created in the past with torch.package
work on today's FX graph mode quantization transforms.
"""
- input_file, state_dict_file, _scripted_module_file, _traced_module_file, \
- expected_file, package_file, get_attr_targets_file = \
- get_filenames(self, None)
-
- package_name = 'test'
- resource_name_model = 'test.pkl'
+ (
+ input_file,
+ state_dict_file,
+ _scripted_module_file,
+ _traced_module_file,
+ expected_file,
+ package_file,
+ get_attr_targets_file,
+ ) = get_filenames(self, None)
+
+ package_name = "test"
+ resource_name_model = "test.pkl"
def _do_quant_transforms(
m: torch.nn.Module,
@@ -172,8 +235,8 @@ class TestSerialization(TestCase):
) -> torch.nn.Module:
example_inputs = (input_tensor,)
# do the quantizaton transforms and save result
- qconfig = torch.ao.quantization.get_default_qconfig('fbgemm')
- mp = quantize_fx.prepare_fx(m, {'': qconfig}, example_inputs=example_inputs)
+ qconfig = torch.ao.quantization.get_default_qconfig("fbgemm")
+ mp = quantize_fx.prepare_fx(m, {"": qconfig}, example_inputs=example_inputs)
mp(input_tensor)
mq = quantize_fx.convert_fx(mp)
return mq
@@ -181,7 +244,7 @@ class TestSerialization(TestCase):
def _get_get_attr_target_strings(m: GraphModule) -> Set[str]:
results = set()
for node in m.graph.nodes:
- if node.op == 'get_attr':
+ if node.op == "get_attr":
results.add(node.target)
return results
@@ -191,7 +254,7 @@ class TestSerialization(TestCase):
# save the model with torch.package
with torch.package.PackageExporter(package_file) as exp:
- exp.intern('torch.testing._internal.quantization_torch_package_models')
+ exp.intern("torch.testing._internal.quantization_torch_package_models")
exp.save_pickle(package_name, resource_name_model, fp32_module)
# do the quantization transforms and save the result
@@ -214,7 +277,8 @@ class TestSerialization(TestCase):
get_attrs = _get_get_attr_target_strings(mq)
self.assertTrue(
get_attrs == expected_get_attrs,
- f'get_attrs: expected {expected_get_attrs}, got {get_attrs}')
+ f"get_attrs: expected {expected_get_attrs}, got {get_attrs}",
+ )
output_tensor = mq(input_tensor)
self.assertTrue(torch.allclose(output_tensor, expected_output_tensor))
@@ -231,29 +295,68 @@ class TestSerialization(TestCase):
@override_qengines
def test_linear_dynamic(self):
module_qint8 = nnqd.Linear(3, 1, bias_=True, dtype=torch.qint8)
- self._test_op(module_qint8, "qint8", input_size=[1, 3], input_quantized=False, generate=False)
+ self._test_op(
+ module_qint8,
+ "qint8",
+ input_size=[1, 3],
+ input_quantized=False,
+ generate=False,
+ )
if qengine_is_fbgemm():
module_float16 = nnqd.Linear(3, 1, bias_=True, dtype=torch.float16)
- self._test_op(module_float16, "float16", input_size=[1, 3], input_quantized=False, generate=False)
+ self._test_op(
+ module_float16,
+ "float16",
+ input_size=[1, 3],
+ input_quantized=False,
+ generate=False,
+ )
@override_qengines
def test_conv2d(self):
- module = nnq.Conv2d(3, 3, kernel_size=3, stride=1, padding=0, dilation=1,
- groups=1, bias=True, padding_mode="zeros")
+ module = nnq.Conv2d(
+ 3,
+ 3,
+ kernel_size=3,
+ stride=1,
+ padding=0,
+ dilation=1,
+ groups=1,
+ bias=True,
+ padding_mode="zeros",
+ )
self._test_op(module, input_size=[1, 3, 6, 6], generate=False)
@override_qengines
def test_conv2d_nobias(self):
- module = nnq.Conv2d(3, 3, kernel_size=3, stride=1, padding=0, dilation=1,
- groups=1, bias=False, padding_mode="zeros")
+ module = nnq.Conv2d(
+ 3,
+ 3,
+ kernel_size=3,
+ stride=1,
+ padding=0,
+ dilation=1,
+ groups=1,
+ bias=False,
+ padding_mode="zeros",
+ )
self._test_op(module, input_size=[1, 3, 6, 6], generate=False)
@override_qengines
def test_conv2d_graph(self):
module = nn.Sequential(
torch.ao.quantization.QuantStub(),
- nn.Conv2d(3, 3, kernel_size=3, stride=1, padding=0, dilation=1,
- groups=1, bias=True, padding_mode="zeros"),
+ nn.Conv2d(
+ 3,
+ 3,
+ kernel_size=3,
+ stride=1,
+ padding=0,
+ dilation=1,
+ groups=1,
+ bias=True,
+ padding_mode="zeros",
+ ),
)
self._test_op_graph(module, input_size=[1, 3, 6, 6], generate=False)
@@ -261,8 +364,17 @@ class TestSerialization(TestCase):
def test_conv2d_nobias_graph(self):
module = nn.Sequential(
torch.ao.quantization.QuantStub(),
- nn.Conv2d(3, 3, kernel_size=3, stride=1, padding=0, dilation=1,
- groups=1, bias=False, padding_mode="zeros"),
+ nn.Conv2d(
+ 3,
+ 3,
+ kernel_size=3,
+ stride=1,
+ padding=0,
+ dilation=1,
+ groups=1,
+ bias=False,
+ padding_mode="zeros",
+ ),
)
self._test_op_graph(module, input_size=[1, 3, 6, 6], generate=False)
@@ -272,8 +384,17 @@ class TestSerialization(TestCase):
# ConvPackedParams{n}d
module = nn.Sequential(
torch.ao.quantization.QuantStub(),
- nn.Conv2d(3, 3, kernel_size=3, stride=1, padding=0, dilation=1,
- groups=1, bias=True, padding_mode="zeros"),
+ nn.Conv2d(
+ 3,
+ 3,
+ kernel_size=3,
+ stride=1,
+ padding=0,
+ dilation=1,
+ groups=1,
+ bias=True,
+ padding_mode="zeros",
+ ),
)
self._test_op_graph(module, input_size=[1, 3, 6, 6], generate=False)
@@ -283,8 +404,17 @@ class TestSerialization(TestCase):
# ConvPackedParams{n}d
module = nn.Sequential(
torch.ao.quantization.QuantStub(),
- nn.Conv2d(3, 3, kernel_size=3, stride=1, padding=0, dilation=1,
- groups=1, bias=False, padding_mode="zeros"),
+ nn.Conv2d(
+ 3,
+ 3,
+ kernel_size=3,
+ stride=1,
+ padding=0,
+ dilation=1,
+ groups=1,
+ bias=False,
+ padding_mode="zeros",
+ ),
)
self._test_op_graph(module, input_size=[1, 3, 6, 6], generate=False)
@@ -294,8 +424,17 @@ class TestSerialization(TestCase):
# ConvPackedParams{n}d
module = nn.Sequential(
torch.ao.quantization.QuantStub(),
- nn.Conv2d(3, 3, kernel_size=3, stride=1, padding=0, dilation=1,
- groups=1, bias=True, padding_mode="zeros"),
+ nn.Conv2d(
+ 3,
+ 3,
+ kernel_size=3,
+ stride=1,
+ padding=0,
+ dilation=1,
+ groups=1,
+ bias=True,
+ padding_mode="zeros",
+ ),
)
self._test_op_graph(module, input_size=[1, 3, 6, 6], generate=False)
@@ -305,48 +444,96 @@ class TestSerialization(TestCase):
# ConvPackedParams{n}d
module = nn.Sequential(
torch.ao.quantization.QuantStub(),
- nn.Conv2d(3, 3, kernel_size=3, stride=1, padding=0, dilation=1,
- groups=1, bias=False, padding_mode="zeros"),
+ nn.Conv2d(
+ 3,
+ 3,
+ kernel_size=3,
+ stride=1,
+ padding=0,
+ dilation=1,
+ groups=1,
+ bias=False,
+ padding_mode="zeros",
+ ),
)
self._test_op_graph(module, input_size=[1, 3, 6, 6], generate=False)
@override_qengines
def test_conv2d_relu(self):
- module = nniq.ConvReLU2d(3, 3, kernel_size=3, stride=1, padding=0, dilation=1,
- groups=1, bias=True, padding_mode="zeros")
+ module = nniq.ConvReLU2d(
+ 3,
+ 3,
+ kernel_size=3,
+ stride=1,
+ padding=0,
+ dilation=1,
+ groups=1,
+ bias=True,
+ padding_mode="zeros",
+ )
self._test_op(module, input_size=[1, 3, 6, 6], generate=False)
# TODO: graph mode quantized conv2d module
@override_qengines
def test_conv3d(self):
if qengine_is_fbgemm():
- module = nnq.Conv3d(3, 3, kernel_size=3, stride=1, padding=0, dilation=1,
- groups=1, bias=True, padding_mode="zeros")
+ module = nnq.Conv3d(
+ 3,
+ 3,
+ kernel_size=3,
+ stride=1,
+ padding=0,
+ dilation=1,
+ groups=1,
+ bias=True,
+ padding_mode="zeros",
+ )
self._test_op(module, input_size=[1, 3, 6, 6, 6], generate=False)
# TODO: graph mode quantized conv3d module
@override_qengines
def test_conv3d_relu(self):
if qengine_is_fbgemm():
- module = nniq.ConvReLU3d(3, 3, kernel_size=3, stride=1, padding=0, dilation=1,
- groups=1, bias=True, padding_mode="zeros")
+ module = nniq.ConvReLU3d(
+ 3,
+ 3,
+ kernel_size=3,
+ stride=1,
+ padding=0,
+ dilation=1,
+ groups=1,
+ bias=True,
+ padding_mode="zeros",
+ )
self._test_op(module, input_size=[1, 3, 6, 6, 6], generate=False)
# TODO: graph mode quantized conv3d module
@override_qengines
- @unittest.skipIf(IS_AVX512_VNNI_SUPPORTED, "This test fails on machines with AVX512_VNNI support. Ref: GH Issue 59098")
+ @unittest.skipIf(
+ IS_AVX512_VNNI_SUPPORTED,
+ "This test fails on machines with AVX512_VNNI support. Ref: GH Issue 59098",
+ )
def test_lstm(self):
class LSTMModule(torch.nn.Module):
def __init__(self):
super().__init__()
- self.lstm = nnqd.LSTM(input_size=3, hidden_size=7, num_layers=1).to(dtype=torch.float)
+ self.lstm = nnqd.LSTM(input_size=3, hidden_size=7, num_layers=1).to(
+ dtype=torch.float
+ )
def forward(self, x):
x = self.lstm(x)
return x
+
if qengine_is_fbgemm():
mod = LSTMModule()
- self._test_op(mod, input_size=[4, 4, 3], input_quantized=False, generate=False, new_zipfile_serialization=True)
+ self._test_op(
+ mod,
+ input_size=[4, 4, 3],
+ input_quantized=False,
+ generate=False,
+ new_zipfile_serialization=True,
+ )
def test_per_channel_observer(self):
obs = PerChannelMinMaxObserver()
@@ -373,7 +560,9 @@ class TestSerialization(TestCase):
model.qconfig = torch.ao.quantization.get_default_qat_qconfig("fbgemm")
ref_model = torch.ao.quantization.QuantWrapper(model)
ref_model = torch.ao.quantization.prepare_qat(ref_model)
- self._test_obs(ref_model, input_size=[5, 5], generate=False, check_numerics=False)
+ self._test_obs(
+ ref_model, input_size=[5, 5], generate=False, check_numerics=False
+ )
@skipIfNoFBGEMM
def test_linear_relu_package_quantization_transforms(self):
|
2.41.0
|
7261be0a8f09bed9ab95d0cee82e75eebd249c3
|
Sat, 13 Apr 2024 07:47:32 +0000
|
[PATCH 0136/1000] Revert "Simplify ATen sparse semi-structured operators based on CUTLASS (#123473)"
|
This reverts commit b2a0b8c446234f0b35a66aff87501c4596ea5d51. Reverted https://github.com/pytorch/pytorch/pull/123473 on behalf of https://github.com/DanilBaibak due to Break internal build ([comment](https://github.com/pytorch/pytorch/pull/123473#issuecomment-2053561077))
|
diff --git a/aten/src/ATen/native/native_functions.yaml b/aten/src/ATen/native/native_functions.yaml
index 6e96a8a6aa..02e7c5caa2 100644
--- a/aten/src/ATen/native/native_functions.yaml
+++ b/aten/src/ATen/native/native_functions.yaml
@@ -3342,19 +3342,10 @@
dispatch:
CUDA: _cslt_sparse_mm_search
-# DEPRECATED: Use torch.__sparse_semi_structured_mm/torch._sparse_semi_structured_addmm instead
- func: _sparse_semi_structured_linear(Tensor input, Tensor weight, Tensor meta, *, Tensor? bias=None, str? activation=None, ScalarType? out_dtype=None) -> Tensor
dispatch:
CUDA: _sparse_semi_structured_linear
|
- dispatch:
|
60af92c175799c11afce4fb948748f399955e3a
|
Sat, 13 Apr 2024 11:45:00 +0000
|
[PATCH 0137/1000] [Distributed] [3/N] Fix clang-tidy warnings in torch/csrc/distributed/c10d (#123312)
|
This PR continues to fix some clang-tidy warnings in distributed code, following https://github.com/pytorch/pytorch/pull/122892. Pull Request resolved: https://github.com/pytorch/pytorch/pull/123312 Approved by: https://github.com/Skylion007
|
diff --git a/torch/csrc/distributed/c10d/HashStore.cpp b/torch/csrc/distributed/c10d/HashStore.cpp
index cde8585ca5..50ed5ca5eb 100644
--- a/torch/csrc/distributed/c10d/HashStore.cpp
+++ b/torch/csrc/distributed/c10d/HashStore.cpp
@@ -1,12 +1,9 @@
#include <torch/csrc/distributed/c10d/HashStore.hpp>
#include <unistd.h>
-#include <cerrno>
#include <cstdint>
#include <chrono>
-#include <cstdio>
-#include <system_error>
#include <c10/util/Exception.h>
@@ -100,7 +97,7 @@ int64_t HashStore::add(const std::string& key, int64_t i) {
int64_t HashStore::getNumKeys() {
std::unique_lock<std::mutex> lock(m_);
- return map_.size();
+ return static_cast<int64_t>(map_.size());
}
bool HashStore::deleteKey(const std::string& key) {
diff --git a/torch/csrc/distributed/c10d/TCPStore.cpp b/torch/csrc/distributed/c10d/TCPStore.cpp
index 7de20bbcce..fe18dd5355 100644
--- a/torch/csrc/distributed/c10d/TCPStore.cpp
+++ b/torch/csrc/distributed/c10d/TCPStore.cpp
@@ -257,7 +257,7 @@ class SendBuffer {
}
void flush() {
- if (buffer.size() > 0) {
+ if (!buffer.empty()) {
client.sendRaw(buffer.data(), buffer.size());
buffer.clear();
}
@@ -390,7 +390,7 @@ void TCPStore::waitForWorkers() {
}
}
-void TCPStore::validate(void) {
+void TCPStore::validate() {
const std::lock_guard<std::mutex> lock(activeOpLock_);
detail::SendBuffer buffer(*client_, detail::QueryType::VALIDATE);
buffer.appendValue<std::uint32_t>(c10d::detail::validationMagicNumber);
@@ -625,7 +625,7 @@ bool TCPStore::hasExtendedApi() const {
std::unordered_map<std::string, std::unordered_map<std::string, double>>
TCPStore::collectClientCounters() const noexcept {
std::unordered_map<std::string, std::unordered_map<std::string, double>> res;
- for (auto kv : clientCounters_) {
+ for (const auto& kv : clientCounters_) {
res[kv.first] = kv.second.observe();
}
return res;
diff --git a/torch/csrc/distributed/c10d/TCPStore.hpp b/torch/csrc/distributed/c10d/TCPStore.hpp
index 3919b49437..91ed895266 100644
--- a/torch/csrc/distributed/c10d/TCPStore.hpp
+++ b/torch/csrc/distributed/c10d/TCPStore.hpp
@@ -141,7 +141,7 @@ class TORCH_API TCPStore : public Store {
private:
int64_t incrementValueBy(const std::string& key, int64_t delta);
- void validate(void);
+ void validate();
std::vector<uint8_t> doGet(const std::string& key);
diff --git a/torch/csrc/distributed/c10d/TCPStoreBackend.cpp b/torch/csrc/distributed/c10d/TCPStoreBackend.cpp
index 6702595b29..f949926cbd 100644
--- a/torch/csrc/distributed/c10d/TCPStoreBackend.cpp
+++ b/torch/csrc/distributed/c10d/TCPStoreBackend.cpp
@@ -25,11 +25,10 @@
#include <torch/csrc/distributed/c10d/socket.h>
-namespace c10d {
-namespace detail {
+namespace c10d::detail {
// Background thread parent class methods
-BackgroundThread::BackgroundThread() {}
+BackgroundThread::BackgroundThread() = default;
BackgroundThread::~BackgroundThread() = default;
@@ -324,7 +323,7 @@ void TCPStoreMasterDaemon::doSet(
}
void TCPStoreMasterDaemon::validateHandler(int socket) {
- uint32_t validateNumber;
+ uint32_t validateNumber = 0;
tcputil::recvBytes<uint32_t>(socket, &validateNumber, 1);
if (validateNumber != detail::validationMagicNumber) {
TORCH_CHECK(
@@ -612,5 +611,4 @@ std::unique_ptr<BackgroundThread> create_tcpstore_backend(
return std::make_unique<TCPStoreMasterDaemon>(std::move(socket));
}
-} // namespace detail
-} // namespace c10d
+} // namespace c10d::detail
diff --git a/torch/csrc/distributed/c10d/TCPStoreBackend.hpp b/torch/csrc/distributed/c10d/TCPStoreBackend.hpp
index 572340d142..84aac61d02 100644
--- a/torch/csrc/distributed/c10d/TCPStoreBackend.hpp
+++ b/torch/csrc/distributed/c10d/TCPStoreBackend.hpp
@@ -15,8 +15,7 @@
#include <unistd.h>
#endif
-namespace c10d {
-namespace detail {
+namespace c10d::detail {
// Magic number for client validation.
static const uint32_t validationMagicNumber = 0x3C85F7CE;
@@ -63,7 +62,7 @@ class BackgroundThread {
}
private:
- std::atomic<bool> is_running_;
+ std::atomic<bool> is_running_{false};
std::thread daemonThread_{};
};
@@ -73,5 +72,4 @@ std::unique_ptr<BackgroundThread> create_libuv_tcpstore_backend(
const TCPStoreOptions& opts);
bool is_libuv_tcpstore_backend_available();
-} // namespace detail
-} // namespace c10d
+} // namespace c10d::detail
diff --git a/torch/csrc/distributed/c10d/TCPStoreLibUvBackend.cpp b/torch/csrc/distributed/c10d/TCPStoreLibUvBackend.cpp
index 2899411ae6..32c069a98f 100644
--- a/torch/csrc/distributed/c10d/TCPStoreLibUvBackend.cpp
+++ b/torch/csrc/distributed/c10d/TCPStoreLibUvBackend.cpp
@@ -4,6 +4,7 @@
#include <memory>
#include <unordered_map>
#include <unordered_set>
+#include <utility>
#include <vector>
#include <fmt/format.h>
@@ -15,8 +16,7 @@
#include <uv.h>
#endif
-namespace c10d {
-namespace detail {
+namespace c10d::detail {
#ifdef TORCH_USE_LIBUV
@@ -40,7 +40,7 @@ Other callbacks don't provide exception safety so avoid there.
#define ALLOC_BUFFER_SIZE ((size_t)4000)
class UvHandle : public c10::intrusive_ptr_target {
public:
- virtual ~UvHandle() {}
+ ~UvHandle() override = default;
c10::intrusive_ptr<UvHandle> iptr() {
return c10::intrusive_ptr<UvHandle>::reclaim_copy(this);
@@ -83,7 +83,7 @@ class UvHandle : public c10::intrusive_ptr_target {
};
class UvTcpSocket : public UvHandle {
- uv_tcp_t client;
+ uv_tcp_t client{};
c10::intrusive_ptr<UvTcpSocket> iptr() {
return c10::intrusive_ptr<UvTcpSocket>::reclaim_copy(this);
@@ -212,8 +212,8 @@ class UvTcpServer : public UvTcpSocket {
auto res = c10::make_intrusive<UvTcpServer>(loop);
res->handleReady();
try {
- struct sockaddr_storage addr;
- int uv_res;
+ struct sockaddr_storage addr {};
+ int uv_res = 0;
if (useIpv6) {
uv_res = uv_ip6_addr("::", port, (struct sockaddr_in6*)&addr);
} else {
@@ -260,7 +260,7 @@ class UvTcpServer : public UvTcpSocket {
return portNum;
}
- void accept(c10::intrusive_ptr<UvTcpSocket> socket) {
+ void accept(const c10::intrusive_ptr<UvTcpSocket>& socket) {
int res =
uv_accept(unsafeGetStream(), (uv_stream_t*)socket->unsafeGetHandle());
TORCH_CHECK(
@@ -272,7 +272,7 @@ class UvTcpServer : public UvTcpSocket {
private:
OnConnectCallback onConnectCb;
- uint16_t portNum;
+ uint16_t portNum{};
c10::intrusive_ptr<UvTcpServer> iptr() {
return c10::intrusive_ptr<UvTcpServer>::reclaim_copy(this);
@@ -353,11 +353,11 @@ class WriterPayload : public c10::intrusive_ptr_target {
WriterPayload(
std::vector<uint8_t>&& in_data,
c10::intrusive_ptr<UvHandle> handle)
- : data(std::move(in_data)), handle(handle) {
+ : data(std::move(in_data)), handle(std::move(handle)) {
uv_req_set_data((uv_req_t*)&req, this);
}
- ~WriterPayload() {}
+ ~WriterPayload() override = default;
void send() {
buf = uv_buf_init((char*)data.data(), data.size());
@@ -387,7 +387,8 @@ class StreamWriter {
void* operator new(size_t);
public:
- StreamWriter(c10::intrusive_ptr<UvHandle> handle) : handle(handle) {}
+ StreamWriter(c10::intrusive_ptr<UvHandle> handle)
+ : handle(std::move(handle)) {}
void write1(uint8_t val) {
data.push_back(val);
@@ -416,19 +417,14 @@ class StreamWriter {
class ChunkedStream {
std::deque<uv_buf_t> buffers;
- size_t buff_idx;
- size_t buff_offset;
- size_t capacity;
- size_t buff_offset_commit;
- size_t read_offset;
+ size_t buff_idx{0};
+ size_t buff_offset{0};
+ size_t capacity{0};
+ size_t buff_offset_commit{0};
+ size_t read_offset{0};
public:
- ChunkedStream()
- : buff_idx(0),
- buff_offset(0),
- capacity(0),
- buff_offset_commit(0),
- read_offset(0) {}
+ ChunkedStream() = default;
size_t buf_count() {
return buffers.size();
@@ -571,15 +567,15 @@ class LibUVStoreDaemon : public BackgroundThread {
bool checkKeys(const std::vector<std::string>& keys);
bool waitKeys(
const std::vector<std::string>& keys,
- c10::intrusive_ptr<UvHandle> client);
+ const c10::intrusive_ptr<UvHandle>& client);
int64_t size();
int64_t deleteKey(const std::string& key);
void append(const std::string& key, const std::vector<uint8_t>& value);
- void registerClient(c10::intrusive_ptr<UvHandle> client);
- void unregisterClient(c10::intrusive_ptr<UvHandle> client);
- void clearClientWaitState(c10::intrusive_ptr<UvHandle> client);
- bool isMiscellaneousClient(c10::intrusive_ptr<UvHandle> client);
+ void registerClient(const c10::intrusive_ptr<UvHandle>& client);
+ void unregisterClient(const c10::intrusive_ptr<UvHandle>& client);
+ void clearClientWaitState(const c10::intrusive_ptr<UvHandle>& client);
+ bool isMiscellaneousClient(const c10::intrusive_ptr<UvHandle>& client);
uint16_t get_socket_port(uv_tcp_t* handle);
void init(const TCPStoreOptions& opts);
@@ -589,10 +585,10 @@ class LibUVStoreDaemon : public BackgroundThread {
void stop() override;
private:
- uv_loop_t loop;
+ uv_loop_t loop{};
c10::intrusive_ptr<UvTcpServer> tcpServer;
- uv_async_t exit_handle;
+ uv_async_t exit_handle{};
std::unordered_map<std::string, std::vector<uint8_t>> tcpStore_;
// From key -> the list of UvClient waiting on the key
std::unordered_map<std::string, std::vector<c10::intrusive_ptr<UvHandle>>>
@@ -707,7 +703,7 @@ class UvClient : public UvTcpSocket {
}
bool parse_validate_command() {
- uint32_t validateNumber;
+ uint32_t validateNumber = 0;
if (!stream.read_value(validateNumber))
return false;
@@ -1044,8 +1040,8 @@ void LibUVStoreDaemon::run() {
uv_walk(&loop, LibUVStoreDaemon::print_active_handles, nullptr);
}
- for (auto it = clients_.begin(); it != clients_.end(); ++it) {
- (*it)->close();
+ for (const auto& client : clients_) {
+ client->close();
}
tcpServer->close();
@@ -1054,7 +1050,7 @@ void LibUVStoreDaemon::run() {
uv_walk(&loop, LibUVStoreDaemon::print_active_handles, nullptr);
}
- while (1) {
+ while (true) {
res = uv_loop_close(&loop);
if (res == 0) {
break;
@@ -1082,7 +1078,7 @@ void LibUVStoreDaemon::stop() {
}
bool LibUVStoreDaemon::isMiscellaneousClient(
- c10::intrusive_ptr<UvHandle> client) {
+ const c10::intrusive_ptr<UvHandle>& client) {
if (miscellaneousClients_.find(client) != miscellaneousClients_.end()) {
miscellaneousClients_.erase(client);
return true;
@@ -1090,12 +1086,14 @@ bool LibUVStoreDaemon::isMiscellaneousClient(
return false;
}
-void LibUVStoreDaemon::registerClient(c10::intrusive_ptr<UvHandle> client) {
+void LibUVStoreDaemon::registerClient(
+ const c10::intrusive_ptr<UvHandle>& client) {
clients_.insert(client);
miscellaneousClients_.insert(client);
}
-void LibUVStoreDaemon::unregisterClient(c10::intrusive_ptr<UvHandle> client) {
+void LibUVStoreDaemon::unregisterClient(
+ const c10::intrusive_ptr<UvHandle>& client) {
clients_.erase(client);
if (miscellaneousClients_.find(client) != miscellaneousClients_.end()) {
miscellaneousClients_.erase(client);
@@ -1104,7 +1102,7 @@ void LibUVStoreDaemon::unregisterClient(c10::intrusive_ptr<UvHandle> client) {
}
void LibUVStoreDaemon::clearClientWaitState(
- c10::intrusive_ptr<UvHandle> client) {
+ const c10::intrusive_ptr<UvHandle>& client) {
if (keysAwaited_.find(client) == keysAwaited_.end()) {
return;
}
@@ -1152,7 +1150,7 @@ const std::vector<uint8_t>& LibUVStoreDaemon::compareAndSet(
}
} else {
if (pos->second == expectedValue) {
- pos->second = std::move(newValue);
+ pos->second = newValue;
}
wakeupWaitingClients(key);
return pos->second;
@@ -1192,7 +1190,7 @@ bool LibUVStoreDaemon::checkKeys(const std::vector<std::string>& keys) {
bool LibUVStoreDaemon::waitKeys(
const std::vector<std::string>& keys,
- c10::intrusive_ptr<UvHandle> client) {
+ const c10::intrusive_ptr<UvHandle>& client) {
if (checkKeys(keys)) {
return true;
}
@@ -1234,7 +1232,7 @@ void LibUVStoreDaemon::append(
void LibUVStoreDaemon::wakeupWaitingClients(const std::string& key) {
auto socketsToWait = waitingSockets_.find(key);
if (socketsToWait != waitingSockets_.end()) {
- for (auto client : socketsToWait->second) {
+ for (const auto& client : socketsToWait->second) {
if (--keysAwaited_[client] == 0) {
StreamWriter sw(client->iptr());
sw.write1((uint8_t)WaitResponseType::STOP_WAITING);
@@ -1266,5 +1264,4 @@ bool is_libuv_tcpstore_backend_available() {
#endif
}
-} // namespace detail
-} // namespace c10d
+} // namespace c10d::detail
diff --git a/torch/csrc/distributed/c10d/Types.hpp b/torch/csrc/distributed/c10d/Types.hpp
index dc9a985696..423b959803 100644
--- a/torch/csrc/distributed/c10d/Types.hpp
+++ b/torch/csrc/distributed/c10d/Types.hpp
@@ -56,8 +56,8 @@ struct TORCH_API ReduceOp : torch::CustomClassHolder {
ReduceOp(
RedOpType op,
- c10::intrusive_ptr<_SupplementBase> optional_supplement) {
- if (optional_supplement.get()) {
+ const c10::intrusive_ptr<_SupplementBase>& optional_supplement) {
+ if (optional_supplement) {
op_ = op;
} else {
supplement_ = optional_supplement;
@@ -66,14 +66,9 @@ struct TORCH_API ReduceOp : torch::CustomClassHolder {
// The heap resource supplement_, if it exists, is managed by a
// c10::intrusive_ptr, so constructors and operator= can be simple
- ReduceOp(const ReduceOp& other)
- : op_(other.op_), supplement_(other.supplement_) {}
+ ReduceOp(const ReduceOp& other) = default;
- const ReduceOp& operator=(const ReduceOp& other) {
- op_ = other.op_;
- supplement_ = other.supplement_;
- return *this;
- }
+ ReduceOp& operator=(const ReduceOp& other) = default;
operator RedOpType() const {
return op_;
|
2.41.0
|
a7db5d345a10ffb5092b26c5159f56faec1d0ea
|
Sat, 13 Apr 2024 12:54:14 +0000
|
[PATCH 0138/1000] [BE] migrate import sorter configurations to `pyproject.toml` (#123846)
|
Migrate import sorter configurations to `pyproject.toml` and delete `.isort.cfg`. Also, set the line length to 88 (which is the default of `black`). Pull Request resolved: https://github.com/pytorch/pytorch/pull/123846 Approved by: https://github.com/Skylion007
|
diff --git a/.isort.cfg b/.isort.cfg
deleted file mode 100644
index d14d9bf207..0000000000
--- a/.isort.cfg
+++ /dev/null
@@ -1,6 +0,0 @@
-[settings]
-include_trailing_comma=True
-multi_line_output=3
-skip=third_party
-skip_gitignore=True
-use_parentheses=True
diff --git a/pyproject.toml b/pyproject.toml
index 089864d2fc..24bede639f 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -15,11 +15,29 @@ build-backend = "setuptools.build_meta:__legacy__"
[tool.black]
-# Uncomment if pyproject.toml worked fine to ensure consistency with flake8
-# line-length = 120
+line-length = 88
target-version = ["py38", "py39", "py310", "py311"]
+[tool.isort]
+src_paths = ["caffe2", "torch", "torchgen", "functorch", "tests"]
+extra_standard_library = ["typing_extensions"]
+skip_gitignore = true
+skip_glob = ["third_party/*"]
+atomic = true
+profile = "black"
+indent = 4
+line_length = 88
+lines_after_imports = 2
+multi_line_output = 3
+include_trailing_comma = true
+
+
+[tool.usort.kown]
+first_party = ["caffe2", "torch", "torchgen", "functorch", "tests"]
+standard_library = ["typing_extensions"]
+
+
[tool.ruff]
target-version = "py38"
line-length = 120
|
2.41.0
|
440d1baa6cf7b32b47f14ffd9349eea9c39f3c4
|
Sat, 13 Apr 2024 07:42:48 +0000
|
[PATCH 0139/1000] [dynamo, 3.12] fix the block stack... again (#123978)
|
Some changes to how we handle blocks in 3.11+: - We only keep track of with blocks that are not enclosed in a try block - We do not compile partial graphs if we are in a block that is not in a tracked with block - i.e. any block enclosed in some non-with try/except/etc. block Pull Request resolved: https://github.com/pytorch/pytorch/pull/123978 Approved by: https://github.com/jansel
|
diff --git a/test/dynamo/test_misc.py b/test/dynamo/test_misc.py
index 19fc38c2f9..e633e88e60 100644
--- a/test/dynamo/test_misc.py
+++ b/test/dynamo/test_misc.py
@@ -7025,7 +7025,7 @@ def fn():
def test_variable_access_in_exception(self):
def fn():
- x = torch.ones(3, 3)
+ x = torch.ones(1)
try:
raise RuntimeError("bad")
except RuntimeError:
@@ -7033,7 +7033,87 @@ def fn():
return x
opt_fn = torch._dynamo.optimize("eager")(fn)
- torch.allclose(opt_fn(), torch.tensor([3.0]))
+ self.assertEqual(opt_fn(), torch.tensor([2.0]))
+
+ def test_nested_sequential_with(self):
+ def fn(x):
+ with torch.set_grad_enabled(True):
+ with torch.set_grad_enabled(False):
+ x = x + 1
+ with torch.set_grad_enabled(True):
+ x = x + 1
+ return x
+
+ opt_fn = torch._dynamo.optimize("eager")(fn)
+ self.assertEqual(opt_fn(torch.ones(1)), torch.tensor([3.0]))
+
+ def test_nested_sequential_try(self):
+ def fn(x):
+ try:
+ try:
+ x = x + 1
+ except:
+ pass
+ try:
+ try:
+ x = x + 1
+ except:
+ pass
+ except:
+ pass
+ except:
+ pass
+ return x
+
+ opt_fn = torch._dynamo.optimize("eager")(fn)
+ self.assertEqual(opt_fn(torch.ones(1)), torch.tensor([3.0]))
+
+ def test_nested_sequential_try_with(self):
+ def fn(x):
+ with torch.set_grad_enabled(True):
+ try:
+ x = x + 1
+ except:
+ pass
+ try:
+ with torch.set_grad_enabled(False):
+ x = x + 1
+ except:
+ pass
+ return x
+
+ opt_fn = torch._dynamo.optimize("eager")(fn)
+ self.assertEqual(opt_fn(torch.ones(1)), torch.tensor([3.0]))
+
+ def test_nested_sequential_try_with_graph_break(self):
+ def fn(x, n):
+ with torch.set_grad_enabled(True):
+ with torch.set_grad_enabled(False):
+ x = x + 1
+ torch._dynamo.graph_break()
+ try:
+ with torch.set_grad_enabled(False):
+ x = x + 1
+ if n == 0:
+ torch._dynamo.graph_break()
+ except:
+ pass
+ with torch.set_grad_enabled(False):
+ x = x + 1
+ torch._dynamo.graph_break()
+ x = x + 1
+ return x
+
+ counter = CompileCounter()
+ opt_fn = torch._dynamo.optimize(counter)(fn)
+ self.assertEqual(opt_fn(torch.ones(1), 0), torch.tensor([5.0]))
+ self.assertEqual(counter.frame_count, 1)
+
+ torch._dynamo.reset()
+ counter = CompileCounter()
+ opt_fn = torch._dynamo.optimize(counter)(fn)
+ self.assertEqual(opt_fn(torch.ones(1), 1), torch.tensor([5.0]))
+ self.assertEqual(counter.frame_count, 3)
def test_ordered_dict_alias_reconstruct(self):
od = collections.OrderedDict
diff --git a/torch/_dynamo/symbolic_convert.py b/torch/_dynamo/symbolic_convert.py
index b7520c87b3..1a6fc558ea 100644
--- a/torch/_dynamo/symbolic_convert.py
+++ b/torch/_dynamo/symbolic_convert.py
@@ -801,48 +801,43 @@ class InstructionTranslatorBase(
if sys.version_info >= (3, 11):
def update_block_stack(self, inst):
- # 3.11 no longer uses a block stack, but we still keep track of one
+ # 3.11+ no longer uses a block stack, but we still keep track of one
# so that we know which contexts are currently active.
# For our purposes, all exception table entries with the same target
# are considered to be part of the same "block".
+ # NOTE: we only keep track of with blocks that are not contained in try blocks.
+ # This is because we will not create continuation functions on graph breaks in try blocks,
+ # but we may for with blocks. We do not push blocks here since
+ # with blocks are pushed when handling BEFORE_WITH.
entry = inst.exn_tab_entry
- if not (
- # still in the same block
- entry
- and self.block_stack
- and self.block_stack[-1].target is entry.target
- ):
- if not entry:
- # no longer in any block
- # It is possible for NOPs to be between two instructions
- # in the same block, but the NOPs are not covered by an
- # exception table entry. In this case, assume that we
- # are still in the same block.
- # In 3.12+, JUMP_BACKWARD might also not be covered by
- # an exception table entry, so we also assume that we
- # are still in the same block. It is probably safe to do
- # this in 3.11, even though we haven't encountered this case before.
- if self.block_stack and inst.opname not in ("NOP", "JUMP_BACKWARD"):
- # If we really escape from a block and the current
- # instruction is not in another block, then there
- # should be no other nested blocks that we are in.
- assert len(self.block_stack) == 1
- self.block_stack.pop()
- elif (
- # current instruction is in the previous block
- len(self.block_stack) > 1
- and self.block_stack[-2].target is entry.target
+ if entry:
+ # Detect when we have exited the top with block.
+ # The with blocks on the block stack are not enclosed in try
+ # blocks, so a with block's cleanup code should be in the
+ # previous with block (if any).
+ if (
+ len(self.block_stack) >= 2
+ and entry.target is not self.block_stack[-1].target
+ and entry.target is self.block_stack[-2].target
):
# exit the current block
self.block_stack.pop()
- else:
- # current instruction is in a new block
- # push block to stack - note, BEFORE_WITH blocks won't
- # be pushed here since BEFORE_WITH pushes the block, and
- # the current instruction would be counted as being in that block.
- self.block_stack.append(
- BlockStackEntry(entry.target, len(self.stack))
- )
+ else:
+ # no longer in any block
+ # It is possible for NOPs to be between two instructions
+ # in the same block, but the NOPs are not covered by an
+ # exception table entry. In this case, assume that we
+ # are still in the same block.
+ # In 3.12+, JUMP_BACKWARD might also not be covered by
+ # an exception table entry, so we also assume that we
+ # are still in the same block. It is probably safe to do
+ # this in 3.11, even though we haven't encountered this case before.
+ if self.block_stack and inst.opname not in ("NOP", "JUMP_BACKWARD"):
+ # If we really escape from a block and the current
+ # instruction is not in another block, then there
+ # should be no other nested blocks that we are in.
+ assert len(self.block_stack) == 1
+ self.block_stack.pop()
else:
@@ -1392,6 +1387,8 @@ class InstructionTranslatorBase(
speculation.fail_and_restart_analysis()
def store_attr_graph_break(self, inst):
+ if not self.should_compile_partial_graph():
+ unimplemented("should_compile_partial_graph=False")
self.output.compile_subgraph(
self, reason=GraphCompileReason("store_attr", [self.frame_summary()])
)
@@ -1882,15 +1879,27 @@ class InstructionTranslatorBase(
ctx,
inst.target,
)
+
if sys.version_info >= (3, 11):
- # see create_call_resume_at for block stack details
- target = self.next_instruction.exn_tab_entry.target
+ # See create_call_resume_at for block stack details.
+ # Only push a block if the current instruction's block is a
+ # with block that is not nested in a try block - that is, the current
+ # instruction's block target is the same as the top block's target.
+ if inst.exn_tab_entry and (
+ not self.block_stack
+ or inst.exn_tab_entry.target is not self.block_stack[-1].target
+ ):
+ target = None
+ else:
+ target = self.next_instruction.exn_tab_entry.target
else:
target = inst.target
- if isinstance(self, InstructionTranslator):
- self.block_stack.append(BlockStackEntry(target, len(self.stack), ctx))
- else:
- self.block_stack.append(BlockStackEntry(target))
+
+ if target:
+ if isinstance(self, InstructionTranslator):
+ self.block_stack.append(BlockStackEntry(target, len(self.stack), ctx))
+ else:
+ self.block_stack.append(BlockStackEntry(target))
self.push(exit)
self.push(ctx.enter(self))
@@ -2234,6 +2243,13 @@ class InstructionTranslator(InstructionTranslatorBase):
return self.symbolic_locals[name]
def should_compile_partial_graph(self):
+ if sys.version_info >= (3, 11):
+ # Do not compile if current instruction's block is not the top with block
+ entry = self.current_instruction.exn_tab_entry
+ if entry and (
+ not self.block_stack or entry.target is not self.block_stack[-1].target
+ ):
+ return False
return (
all(b.can_restore() for b in self.block_stack)
and not self.one_graph
|
2.41.0
|
8a71594933b2464d9d8b6b3533c5a945a4ac2ff
|
Sat, 13 Apr 2024 19:55:57 +0000
|
[PATCH 0141/1000] [NT] Fix typo in declared strides variable (#123856)
|
Summary: Looks like it's missing an s in the declaration so pyre is throwing an error {F1484357040} Test Plan: expect no pyre errors Differential Revision: D56023743 Pull Request resolved: https://github.com/pytorch/pytorch/pull/123856 Approved by: https://github.com/cpuhrsch, https://github.com/soulitzer
|
diff --git a/torch/nested/_internal/nested_tensor.py b/torch/nested/_internal/nested_tensor.py
index b5c9354ad2..5494633b09 100644
--- a/torch/nested/_internal/nested_tensor.py
+++ b/torch/nested/_internal/nested_tensor.py
@@ -45,7 +45,7 @@ class NestedTensor(torch.Tensor):
# For example, a jagged tensor with shape [B, x, D] can be strided in two
# ways: [xD, D, 1] and [x, 1, sum(x)], where xD represents x multiplied by D
_size: Tuple[int, ...]
- _stride: Tuple[int, ...]
+ _strides: Tuple[int, ...]
# Indicates that the nth dimension is ragged
_ragged_idx: int
_metadata_cache: Dict[str, Any]
|
2.41.0
|
35c238bad6b1629b54f863b75b541e24ff57452
|
Sun, 14 Apr 2024 06:07:21 +0000
|
[PATCH 0143/1000] Enable UFMT on all of test/quantization/jit &pt2e (#124010)
|
Partially addresses #123062 Ran lintrunner on: - test/quantization/jit - test/quantization/pt2e Detail: ``` $ lintrunner -a --take UFMT --all-files ok No lint issues. Successfully applied all patches. ``` cc, please @ezyang Pull Request resolved: https://github.com/pytorch/pytorch/pull/124010 Approved by: https://github.com/ezyang
|
diff --git a/.lintrunner.toml b/.lintrunner.toml
index 872b6452ab..65a4c936e2 100644
--- a/.lintrunner.toml
+++ b/.lintrunner.toml
@@ -1368,14 +1368,6 @@ exclude_patterns = [
'test/quantization/fx/test_numeric_suite_fx.py',
'test/quantization/fx/test_quantize_fx.py',
'test/quantization/fx/test_subgraph_rewriter.py',
- 'test/quantization/jit/__init__.py',
- 'test/quantization/jit/test_deprecated_jit_quant.py',
- 'test/quantization/jit/test_fusion_passes.py',
- 'test/quantization/jit/test_ondevice_quantization.py',
- 'test/quantization/jit/test_quantize_jit.py',
- 'test/quantization/pt2e/test_graph_utils.py',
- 'test/quantization/pt2e/test_quantize_pt2e.py',
- 'test/quantization/pt2e/test_x86inductor_quantizer.py',
'test/scripts/cuda_memcheck_common.py',
'test/scripts/run_cuda_memcheck.py',
'test/simulate_nccl_errors.py',
diff --git a/test/quantization/jit/test_deprecated_jit_quant.py b/test/quantization/jit/test_deprecated_jit_quant.py
index ec9d54fe7c..2e5be93647 100644
--- a/test/quantization/jit/test_deprecated_jit_quant.py
+++ b/test/quantization/jit/test_deprecated_jit_quant.py
@@ -1,9 +1,7 @@
# Owner(s): ["oncall: quantization"]
import torch
-from torch.testing._internal.common_quantization import (
- skipIfNoFBGEMM
-)
+from torch.testing._internal.common_quantization import skipIfNoFBGEMM
from torch.testing._internal.jit_utils import JitTestCase
@@ -35,23 +33,28 @@ class TestDeprecatedJitQuantized(JitTestCase):
# product that overflows the int16 range, e.g.
# (255*127+255*127) = 64770. So, we hardcode the test values
# here and ensure a mix of signedness.
- vals = [[100, -155],
- [100, -155],
- [-155, 100],
- [-155, 100],
- [100, -155],
- [-155, 100],
- [-155, 100],
- [100, -155]]
- vals = vals[:d_hid * num_chunks]
+ vals = [
+ [100, -155],
+ [100, -155],
+ [-155, 100],
+ [-155, 100],
+ [100, -155],
+ [-155, 100],
+ [-155, 100],
+ [100, -155],
+ ]
+ vals = vals[: d_hid * num_chunks]
cell.weight_ih = torch.nn.Parameter(
- torch.tensor(vals, dtype=torch.float),
- requires_grad=False)
+ torch.tensor(vals, dtype=torch.float), requires_grad=False
+ )
cell.weight_hh = torch.nn.Parameter(
- torch.tensor(vals, dtype=torch.float),
- requires_grad=False)
+ torch.tensor(vals, dtype=torch.float), requires_grad=False
+ )
- with self.assertRaisesRegex(RuntimeError, "quantize_rnn_cell_modules function is no longer supported"):
+ with self.assertRaisesRegex(
+ RuntimeError,
+ "quantize_rnn_cell_modules function is no longer supported",
+ ):
cell = torch.jit.quantized.quantize_rnn_cell_modules(cell)
@skipIfNoFBGEMM
@@ -62,7 +65,6 @@ class TestDeprecatedJitQuantized(JitTestCase):
torch.nn.LSTM(d_in, d_hid).float(),
torch.nn.GRU(d_in, d_hid).float(),
]:
-
# Replace parameter values s.t. the range of values is exactly
# 255, thus we will have 0 quantization error in the quantized
# GEMM call. This i s for testing purposes.
@@ -74,34 +76,44 @@ class TestDeprecatedJitQuantized(JitTestCase):
# product that overflows the int16 range, e.g.
# (255*127+255*127) = 64770. So, we hardcode the test values
# here and ensure a mix of signedness.
- vals = [[100, -155],
- [100, -155],
- [-155, 100],
- [-155, 100],
- [100, -155],
- [-155, 100],
- [-155, 100],
- [100, -155]]
+ vals = [
+ [100, -155],
+ [100, -155],
+ [-155, 100],
+ [-155, 100],
+ [100, -155],
+ [-155, 100],
+ [-155, 100],
+ [100, -155],
+ ]
if isinstance(cell, torch.nn.LSTM):
num_chunks = 4
elif isinstance(cell, torch.nn.GRU):
num_chunks = 3
- vals = vals[:d_hid * num_chunks]
+ vals = vals[: d_hid * num_chunks]
cell.weight_ih_l0 = torch.nn.Parameter(
- torch.tensor(vals, dtype=torch.float),
- requires_grad=False)
+ torch.tensor(vals, dtype=torch.float), requires_grad=False
+ )
cell.weight_hh_l0 = torch.nn.Parameter(
- torch.tensor(vals, dtype=torch.float),
- requires_grad=False)
+ torch.tensor(vals, dtype=torch.float), requires_grad=False
+ )
- with self.assertRaisesRegex(RuntimeError, "quantize_rnn_modules function is no longer supported"):
- cell_int8 = torch.jit.quantized.quantize_rnn_modules(cell, dtype=torch.int8)
+ with self.assertRaisesRegex(
+ RuntimeError, "quantize_rnn_modules function is no longer supported"
+ ):
+ cell_int8 = torch.jit.quantized.quantize_rnn_modules(
+ cell, dtype=torch.int8
+ )
- with self.assertRaisesRegex(RuntimeError, "quantize_rnn_modules function is no longer supported"):
- cell_fp16 = torch.jit.quantized.quantize_rnn_modules(cell, dtype=torch.float16)
+ with self.assertRaisesRegex(
+ RuntimeError, "quantize_rnn_modules function is no longer supported"
+ ):
+ cell_fp16 = torch.jit.quantized.quantize_rnn_modules(
+ cell, dtype=torch.float16
+ )
+ if "fbgemm" in torch.backends.quantized.supported_engines:
- if 'fbgemm' in torch.backends.quantized.supported_engines:
def test_quantization_modules(self):
K1, N1 = 2, 2
@@ -116,18 +128,26 @@ class TestDeprecatedJitQuantized(JitTestCase):
fb = FooBar()
fb.linear1.weight = torch.nn.Parameter(
- torch.tensor([[-150, 100], [100, -150]], dtype=torch.float), requires_grad=False)
- fb.linear1.bias = torch.nn.Parameter(torch.zeros_like(fb.linear1.bias), requires_grad=False)
+ torch.tensor([[-150, 100], [100, -150]], dtype=torch.float),
+ requires_grad=False,
+ )
+ fb.linear1.bias = torch.nn.Parameter(
+ torch.zeros_like(fb.linear1.bias), requires_grad=False
+ )
x = (torch.rand(1, K1).float() - 0.5) / 10.0
value = torch.tensor([[100, -150]], dtype=torch.float)
y_ref = fb(value)
- with self.assertRaisesRegex(RuntimeError, "quantize_linear_modules function is no longer supported"):
+ with self.assertRaisesRegex(
+ RuntimeError, "quantize_linear_modules function is no longer supported"
+ ):
fb_int8 = torch.jit.quantized.quantize_linear_modules(fb)
- with self.assertRaisesRegex(RuntimeError, "quantize_linear_modules function is no longer supported"):
+ with self.assertRaisesRegex(
+ RuntimeError, "quantize_linear_modules function is no longer supported"
+ ):
fb_fp16 = torch.jit.quantized.quantize_linear_modules(fb, torch.float16)
@skipIfNoFBGEMM
@@ -136,13 +156,19 @@ class TestDeprecatedJitQuantized(JitTestCase):
def __init__(self, in_features, out_features):
super().__init__()
qweight = torch._empty_affine_quantized(
- [out_features, in_features], scale=1, zero_point=0,
- dtype=torch.qint8)
+ [out_features, in_features],
+ scale=1,
+ zero_point=0,
+ dtype=torch.qint8,
+ )
self._packed_weight = torch.ops.quantized.linear_prepack(qweight)
@torch.jit.export
def __getstate__(self):
- return (torch.ops.quantized.linear_unpack(self._packed_weight)[0], self.training)
+ return (
+ torch.ops.quantized.linear_unpack(self._packed_weight)[0],
+ self.training,
+ )
def forward(self):
return self._packed_weight
@@ -165,7 +191,9 @@ class TestDeprecatedJitQuantized(JitTestCase):
torch._C._jit_pass_erase_shape_information(x.graph)
-if __name__ == '__main__':
- raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
- "\tpython test/test_quantization.py TESTNAME\n\n"
- "instead.")
+if __name__ == "__main__":
+ raise RuntimeError(
+ "This test file is not meant to be run directly, use:\n\n"
+ "\tpython test/test_quantization.py TESTNAME\n\n"
+ "instead."
+ )
diff --git a/test/quantization/jit/test_fusion_passes.py b/test/quantization/jit/test_fusion_passes.py
index 1bb93d9051..a1a9eceadb 100644
--- a/test/quantization/jit/test_fusion_passes.py
+++ b/test/quantization/jit/test_fusion_passes.py
@@ -5,11 +5,12 @@ import torch
from torch.testing import FileCheck
from torch.testing._internal.common_quantization import QuantizationTestCase
+
class TestFusionPasses(QuantizationTestCase):
def test_quantized_add_relu_fusion(self):
class MAdd(torch.nn.Module):
def forward(self, x, y):
- a = torch.ops.quantized.add(x, y, 1., 0)
+ a = torch.ops.quantized.add(x, y, 1.0, 0)
relu_out = torch.relu(a)
return relu_out
@@ -17,10 +18,12 @@ class TestFusionPasses(QuantizationTestCase):
B = torch.arange(-128, 130, dtype=torch.float)
scale = 2.0
zero_point = 127
- qA = torch.quantize_per_tensor(A, scale=scale, zero_point=zero_point,
- dtype=torch.quint8)
- qB = torch.quantize_per_tensor(B, scale=scale, zero_point=zero_point,
- dtype=torch.quint8)
+ qA = torch.quantize_per_tensor(
+ A, scale=scale, zero_point=zero_point, dtype=torch.quint8
+ )
+ qB = torch.quantize_per_tensor(
+ B, scale=scale, zero_point=zero_point, dtype=torch.quint8
+ )
# Check quantized add + relu fusion
m = MAdd()
@@ -33,9 +36,9 @@ class TestFusionPasses(QuantizationTestCase):
# modules we have to inline graph.
torch._C._jit_pass_inline(scripted_m.graph)
torch._C._jit_pass_fuse_quantized_add_relu(scripted_m.graph)
- FileCheck().check_not("aten::relu") \
- .check("quantized::add_relu") \
- .run(scripted_m.graph)
+ FileCheck().check_not("aten::relu").check("quantized::add_relu").run(
+ scripted_m.graph
+ )
output = scripted_m(qA, qB)
self.assertEqual(ref_output, output)
@@ -45,10 +48,9 @@ class TestFusionPasses(QuantizationTestCase):
relu_out = torch.relu(a)
return relu_out
- qC = torch._empty_affine_quantized(qA.shape,
- scale=scale,
- zero_point=zero_point,
- dtype=torch.quint8)
+ qC = torch._empty_affine_quantized(
+ qA.shape, scale=scale, zero_point=zero_point, dtype=torch.quint8
+ )
# Check quantized add + relu fusion
m = MAddOut()
scripted_m = torch.jit.script(m)
@@ -59,15 +61,14 @@ class TestFusionPasses(QuantizationTestCase):
# modules we have to inline graph.
torch._C._jit_pass_inline(scripted_m.graph)
torch._C._jit_pass_fuse_quantized_add_relu(scripted_m.graph)
- FileCheck().check_not("aten::relu") \
- .check_not("quantized::add_out") \
- .check("quantized::add_relu_out") \
- .run(scripted_m.graph)
+ FileCheck().check_not("aten::relu").check_not("quantized::add_out").check(
+ "quantized::add_relu_out"
+ ).run(scripted_m.graph)
output = scripted_m(qA, qB, qC)
self.assertEqual(ref_output, output)
class MAddScalar(torch.nn.Module):
- def forward(self, x, y : float):
+ def forward(self, x, y: float):
a = torch.ops.quantized.add_scalar(x, y)
relu_out = torch.relu(a)
return relu_out
@@ -75,34 +76,31 @@ class TestFusionPasses(QuantizationTestCase):
# Check quantized add + relu fusion
m = MAddScalar()
scripted_m = torch.jit.script(m)
- ref_output = scripted_m(qA, 3.)
+ ref_output = scripted_m(qA, 3.0)
torch._C._jit_pass_inline(scripted_m.graph)
torch._C._jit_pass_fuse_quantized_add_relu(scripted_m.graph)
- FileCheck().check_not("aten::relu") \
- .check_not("quantized::add_scalar(") \
- .check("quantized::add_scalar_relu") \
- .run(scripted_m.graph)
- output = scripted_m(qA, 3.)
+ FileCheck().check_not("aten::relu").check_not("quantized::add_scalar(").check(
+ "quantized::add_scalar_relu"
+ ).run(scripted_m.graph)
+ output = scripted_m(qA, 3.0)
self.assertEqual(ref_output, output)
class MAddScalarOut(torch.nn.Module):
- def forward(self, x, y : float, z):
+ def forward(self, x, y: float, z):
a = torch.ops.quantized.add_scalar_out(x, y, z)
relu_out = torch.relu(a)
return relu_out
- qC = torch._empty_affine_quantized(qA.shape,
- scale=scale,
- zero_point=zero_point,
- dtype=torch.quint8)
+ qC = torch._empty_affine_quantized(
+ qA.shape, scale=scale, zero_point=zero_point, dtype=torch.quint8
+ )
m = MAddScalarOut()
scripted_m = torch.jit.script(m)
- ref_output = scripted_m(qA, 3., qC)
+ ref_output = scripted_m(qA, 3.0, qC)
torch._C._jit_pass_inline(scripted_m.graph)
torch._C._jit_pass_fuse_quantized_add_relu(scripted_m.graph)
- FileCheck().check_not("aten::relu") \
- .check_not("quantized::add_scalar_out") \
- .check("quantized::add_scalar_relu_out") \
- .run(scripted_m.graph)
- output = scripted_m(qA, 3., qC)
+ FileCheck().check_not("aten::relu").check_not(
+ "quantized::add_scalar_out"
+ ).check("quantized::add_scalar_relu_out").run(scripted_m.graph)
+ output = scripted_m(qA, 3.0, qC)
self.assertEqual(ref_output, output)
diff --git a/test/quantization/jit/test_ondevice_quantization.py b/test/quantization/jit/test_ondevice_quantization.py
index 76dce10cf2..ea2725f642 100644
--- a/test/quantization/jit/test_ondevice_quantization.py
+++ b/test/quantization/jit/test_ondevice_quantization.py
@@ -1,34 +1,32 @@
# Owner(s): ["oncall: quantization"]
+import io
+from typing import Dict
+
import torch
import torch._C
-from torch.ao.quantization import (
- default_dynamic_qconfig,
- per_channel_dynamic_qconfig,
-)
+from torch.ao.quantization import default_dynamic_qconfig, per_channel_dynamic_qconfig
from torch.ao.quantization.quantize_jit import (
- prepare_dynamic_jit,
- convert_dynamic_jit,
_prepare_ondevice_dynamic_jit,
_quantize_ondevice_dynamic_jit,
+ convert_dynamic_jit,
+ prepare_dynamic_jit,
)
-from torch.testing._internal.common_utils import TestCase
+from torch.jit.mobile import _load_for_lite_interpreter, LiteScriptModule
+
+from torch.testing import FileCheck
from torch.testing._internal.common_quantization import (
get_script_module,
LinearAddModel,
)
-from torch.jit.mobile import _load_for_lite_interpreter, LiteScriptModule
-
-from torch.testing import FileCheck
+from torch.testing._internal.common_utils import TestCase
from torch.utils import bundled_inputs as bundled_inputs
-import io
-from typing import Dict
class myMod(torch.nn.Module):
def __init__(self, weight):
@@ -60,7 +58,7 @@ class MyConvLinearModule(torch.nn.Module):
class OnDevicePTQUtils:
- observer_module_name = ['MinMaxObserver', 'PerChannelMinMaxObserver']
+ observer_module_name = ["MinMaxObserver", "PerChannelMinMaxObserver"]
@staticmethod
def insert_observers(model, qconfig_dict):
@@ -73,7 +71,7 @@ class OnDevicePTQUtils:
def ptq_dynamic_quantize(model, qconfig_dict):
inputs = model.get_example_inputs()
m = get_script_module(model, False, inputs)
- m = _quantize_ondevice_dynamic_jit(m, qconfig_dict, 'forward', True)
+ m = _quantize_ondevice_dynamic_jit(m, qconfig_dict, "forward", True)
return m
@staticmethod
@@ -95,25 +93,35 @@ class OnDevicePTQUtils:
@staticmethod
def is_calculate_qparam(node):
if node.kind() == "prim::CallMethod":
- if node.s('name') == "calculate_qparams":
+ if node.s("name") == "calculate_qparams":
return True
return False
@staticmethod
def get_linear_packed_param_fp_weight(node):
weight = node.inputsAt(0).node()
- if weight.kind() != "aten::quantize_per_tensor" and weight.kind() != "aten::quantize_per_channel":
+ if (
+ weight.kind() != "aten::quantize_per_tensor"
+ and weight.kind() != "aten::quantize_per_channel"
+ ):
raise ValueError("Quantized weight must be produced.")
fp_weight = weight.inputsAt(0).node()
- assert fp_weight.kind() == "prim::GetAttr", "Weight must be an attribute of the module."
- fp_weight_name = fp_weight.s('name')
+ assert (
+ fp_weight.kind() == "prim::GetAttr"
+ ), "Weight must be an attribute of the module."
+ fp_weight_name = fp_weight.s("name")
return fp_weight_name
@staticmethod
def is_per_channel_quantized_packed_param(node):
- assert node.kind() == 'quantized::linear_prepack', "Node must corresponds to linear_prepack."
+ assert (
+ node.kind() == "quantized::linear_prepack"
+ ), "Node must corresponds to linear_prepack."
weight = node.inputsAt(0).node()
- assert weight.kind() != "aten::quantize_per_tensor" or weight.kind() != "aten::quantize_per_channel"
+ assert (
+ weight.kind() != "aten::quantize_per_tensor"
+ or weight.kind() != "aten::quantize_per_channel"
+ )
return weight.kind() != "aten::quantize_per_tensor"
@@ -124,14 +132,14 @@ class TestOnDeviceDynamicPTQInsertObservers(TestCase):
observer_modules = OnDevicePTQUtils.find_observer_modules(scripted_model)
self.assertTrue(len(observer_modules) == num_observers)
for observer in observer_modules:
- self.assertTrue(observer.original_name == 'MinMaxObserver')
+ self.assertTrue(observer.original_name == "MinMaxObserver")
qconfig_dict = {"": per_channel_dynamic_qconfig}
scripted_model = OnDevicePTQUtils.insert_observers(model, qconfig_dict)
observer_modules = OnDevicePTQUtils.find_observer_modules(scripted_model)
self.assertTrue(len(observer_modules) == num_observers)
for observer in observer_modules:
- self.assertTrue(observer.original_name == 'PerChannelMinMaxObserver')
+ self.assertTrue(observer.original_name == "PerChannelMinMaxObserver")
def _check_observer_method(self, model, num_observers):
qconfig_dict = {"": default_dynamic_qconfig}
@@ -143,18 +151,24 @@ class TestOnDeviceDynamicPTQInsertObservers(TestCase):
quant_forward_graph = scripted_model.graph.str()
# exact graph matching is difficult so just resorting to # of lines
# instead of implementing graph matching
- self.assertEqual(len(orig_forward_graph.splitlines()), len(quant_forward_graph.splitlines()))
+ self.assertEqual(
+ len(orig_forward_graph.splitlines()), len(quant_forward_graph.splitlines())
+ )
observe_method = scripted_model.observe_forward.graph
- FileCheck().check_count("prim::CallMethod[name=\"forward\"](%_observer",
- num_observers, exactly=True).run(observe_method)
+ FileCheck().check_count(
+ 'prim::CallMethod[name="forward"](%_observer', num_observers, exactly=True
+ ).run(observe_method)
reset_observers_method = scripted_model.reset_observers_forward.graph
FileCheck().check_count(
- "prim::CallMethod[name=\"reset_min_max_vals\"](%_observer", num_observers, exactly=True).run(reset_observers_method)
+ 'prim::CallMethod[name="reset_min_max_vals"](%_observer',
+ num_observers,
+ exactly=True,
+ ).run(reset_observers_method)
def _observer_is_weight_only(self, node):
if (node.kind() == "prim::CallMethod") and node.s("name") == "forward":
- if (OnDevicePTQUtils.is_value_type_observer(node.inputsAt(0))):
- return (node.inputsAt(1).node().kind() == "prim::GetAttr")
+ if OnDevicePTQUtils.is_value_type_observer(node.inputsAt(0)):
+ return node.inputsAt(1).node().kind() == "prim::GetAttr"
return False
def test_num_observers(self):
@@ -175,7 +189,7 @@ class TestOnDeviceDynamicPTQInsertObservers(TestCase):
observe_forward_graph = scripted_model.observe_forward.graph
num_weight_only_observers = 0
for node in observe_forward_graph.nodes():
- if (self._observer_is_weight_only(node)):
+ if self._observer_is_weight_only(node):
num_weight_only_observers += 1
self.assertEqual(num_weight_only_observers, 3)
@@ -203,18 +217,18 @@ class TestOnDeviceDynamicPTQInsertQuantDequant(TestCase):
quantize_forward_graph = model.quantize_forward.graph
for n in quantize_forward_graph.nodes():
if (n.kind() == "prim::CallMethod") and n.s("name") == "forward":
- if (OnDevicePTQUtils.is_value_type_observer(n.inputsAt(0))):
+ if OnDevicePTQUtils.is_value_type_observer(n.inputsAt(0)):
return False
return True
def _check_quant_dequant_and_calc_qparams(self, model, num_nodes):
- qconfig_dict = {"" : default_dynamic_qconfig}
+ qconfig_dict = {"": default_dynamic_qconfig}
m = OnDevicePTQUtils.ptq_dynamic_quantize(model, qconfig_dict)
self._validate_quant_dequant_nodes(m, num_nodes)
self._validate_calculate_qparams(m, num_nodes)
self._validate_no_observer_forward(m)
- qconfig_dict = {"" : per_channel_dynamic_qconfig}
+ qconfig_dict = {"": per_channel_dynamic_qconfig}
m = OnDevicePTQUtils.ptq_dynamic_quantize(model, qconfig_dict)
self._validate_quant_dequant_nodes(m, num_nodes, num_nodes)
self._validate_calculate_qparams(m, num_nodes)
@@ -222,12 +236,12 @@ class TestOnDeviceDynamicPTQInsertQuantDequant(TestCase):
def _check_quantize_forward_runs(self, model):
inputs = model.get_example_inputs()
- qconfig_dict = {"" : default_dynamic_qconfig}
+ qconfig_dict = {"": default_dynamic_qconfig}
m = OnDevicePTQUtils.ptq_dynamic_quantize(model, qconfig_dict)
m.observe_forward(*inputs)
m.quantize_forward(*inputs)
- qconfig_dict = {"" : per_channel_dynamic_qconfig}
+ qconfig_dict = {"": per_channel_dynamic_qconfig}
m = OnDevicePTQUtils.ptq_dynamic_quantize(model, qconfig_dict)
# First must run observe forward to record the stats to produce
# correct scales and zero points
@@ -254,13 +268,15 @@ class TestOnDeviceDynamicPTQFinalize(TestCase):
linear_prepack = 0
linear_prepack_uses = 0
for n in quantize_forward_graph.nodes():
- if n.kind() == 'prim::SetAttr':
+ if n.kind() == "prim::SetAttr":
maybe_packed_param_value = n.inputsAt(1)
maybe_packed_param = maybe_packed_param_value.node()
- if maybe_packed_param.kind() == 'quantized::linear_prepack':
+ if maybe_packed_param.kind() == "quantized::linear_prepack":
linear_prepack += 1
linear_prepack_uses += len(maybe_packed_param_value.uses())
- if OnDevicePTQUtils.is_per_channel_quantized_packed_param(maybe_packed_param):
+ if OnDevicePTQUtils.is_per_channel_quantized_packed_param(
+ maybe_packed_param
+ ):
quantize_per_channel += 1
else:
quantize_per_tensor += 1
@@ -269,24 +285,24 @@ class TestOnDeviceDynamicPTQFinalize(TestCase):
self.assertEqual(linear_prepack, num_nodes)
self.assertEqual(linear_prepack_uses, num_nodes)
-
def _validate_no_linear_unpack(self, model):
quantize_forward_graph = model.quantize_forward.graph
for n in quantize_forward_graph.nodes():
- if n.kind() == 'quantized::linear_unpack':
+ if n.kind() == "quantized::linear_unpack":
return False
return True
-
def _validate_setattr_fp_weights(self, model, num_nodes):
quantize_forward_graph = model.quantize_forward.graph
fp_weights_setattr = 0
fp_weight_names = []
for n in quantize_forward_graph.nodes():
- if n.kind() == 'prim::SetAttr':
+ if n.kind() == "prim::SetAttr":
maybe_packed_param = n.inputsAt(1).node()
- if maybe_packed_param.kind() == 'quantized::linear_prepack':
- weight_name = OnDevicePTQUtils.get_linear_packed_param_fp_weight(maybe_packed_param)
+ if maybe_packed_param.kind() == "quantized::linear_prepack":
+ weight_name = OnDevicePTQUtils.get_linear_packed_param_fp_weight(
+ maybe_packed_param
+ )
fp_weight_names.append(weight_name)
for n in quantize_forward_graph.nodes():
@@ -295,15 +311,14 @@ class TestOnDeviceDynamicPTQFinalize(TestCase):
# = prim::SetAttr(<weight_name>)(module_value, x)
# Thus making sure that the original fp weights are
# reset
- if n.kind() == 'prim::SetAttr':
- weight_name = n.s('name')
+ if n.kind() == "prim::SetAttr":
+ weight_name = n.s("name")
if weight_name in fp_weight_names:
maybe_constant = n.inputsAt(1).node()
- if maybe_constant.kind() == 'prim::Constant':
+ if maybe_constant.kind() == "prim::Constant":
fp_weights_setattr += 1
self.assertEqual(fp_weights_setattr, num_nodes)
-
def _validate_quantized_forward(self, model, num_nodes):
quantized_forward_graph = model.quantized_forward.graph
quantize_per_tensor = quantize_per_channel = 0
@@ -317,12 +332,12 @@ class TestOnDeviceDynamicPTQFinalize(TestCase):
quantize_per_channel += 1
if "quantized::linear_dynamic" in n.kind():
quantized_linear_dynamic += 1
- if n.kind() == 'prim::GetAttr':
+ if n.kind() == "prim::GetAttr":
output = n.outputsAt(0)
output_type = output.type()
if "LinearPackedParamsBase" in output_type.str():
linear_packed_params += 1
- if n.kind() == 'prim::SetAttr':
+ if n.kind() == "prim::SetAttr":
num_setattr += 1
self.assertEqual(quantize_per_tensor, 0)
self.assertEqual(quantize_per_channel, 0)
@@ -330,37 +345,34 @@ class TestOnDeviceDynamicPTQFinalize(TestCase):
self.assertEqual(linear_packed_params, num_nodes)
# self.assertEqual(num_setattr, 0)
-
def _check_quantize_forward(self, model, num_nodes):
- qconfig_dict = {"" : default_dynamic_qconfig}
+ qconfig_dict = {"": default_dynamic_qconfig}
m = OnDevicePTQUtils.ptq_dynamic_quantize(model, qconfig_dict)
self._validate_packed_params(m, num_nodes)
self._validate_no_linear_unpack(m)
self._validate_setattr_fp_weights(m, num_nodes)
- qconfig_dict = {"" : per_channel_dynamic_qconfig}
+ qconfig_dict = {"": per_channel_dynamic_qconfig}
m = OnDevicePTQUtils.ptq_dynamic_quantize(model, qconfig_dict)
self._validate_packed_params(m, num_nodes, num_nodes)
self._validate_no_linear_unpack(m)
self._validate_setattr_fp_weights(m, num_nodes)
-
def _check_quantized_forward(self, model, num_nodes):
- qconfig_dict = {"" : default_dynamic_qconfig}
+ qconfig_dict = {"": default_dynamic_qconfig}
m = OnDevicePTQUtils.ptq_dynamic_quantize(model, qconfig_dict)
self._validate_quantized_forward(m, num_nodes)
- qconfig_dict = {"" : per_channel_dynamic_qconfig}
+ qconfig_dict = {"": per_channel_dynamic_qconfig}
m = OnDevicePTQUtils.ptq_dynamic_quantize(model, qconfig_dict)
self._validate_quantized_forward(m, num_nodes)
-
def _check_against_ref_dynamic_ptq(self, model):
model.eval()
inputs = model.get_example_inputs()
ref_m = torch.jit.script(model)
torch._C._jit_pass_inline(ref_m.graph)
- qconfig_dict = {"" : default_dynamic_qconfig}
+ qconfig_dict = {"": default_dynamic_qconfig}
ref_m = prepare_dynamic_jit(ref_m, qconfig_dict)
ref_m = convert_dynamic_jit(ref_m)
ref_output = ref_m(*inputs)
@@ -380,7 +392,7 @@ class TestOnDeviceDynamicPTQFinalize(TestCase):
# test with per channel quant
ref_m = torch.jit.script(model)
torch._C._jit_pass_inline(ref_m.graph)
- qconfig_dict = {"" : per_channel_dynamic_qconfig}
+ qconfig_dict = {"": per_channel_dynamic_qconfig}
ref_m = prepare_dynamic_jit(ref_m, qconfig_dict)
ref_m = convert_dynamic_jit(ref_m)
ref_output = ref_m(*inputs)
@@ -397,13 +409,14 @@ class TestOnDeviceDynamicPTQFinalize(TestCase):
thrown = True
self.assertTrue(thrown)
-
- def _check_serdes_and_device_side_api_helper(self, model, check_device_side_api=False):
+ def _check_serdes_and_device_side_api_helper(
+ self, model, check_device_side_api=False
+ ):
model.eval()
inputs = model.get_example_inputs()
ref_m = torch.jit.script(model)
torch._C._jit_pass_inline(ref_m.graph)
- qconfig_dict = {"" : default_dynamic_qconfig}
+ qconfig_dict = {"": default_dynamic_qconfig}
ref_m = prepare_dynamic_jit(ref_m, qconfig_dict)
ref_m = convert_dynamic_jit(ref_m)
buffer = io.BytesIO()
@@ -426,9 +439,11 @@ class TestOnDeviceDynamicPTQFinalize(TestCase):
else:
# check for lite interpreter
m = OnDevicePTQUtils.ptq_dynamic_quantize(model, qconfig_dict)
- first_input, = inputs
- rand_input = bundled_inputs.bundle_randn(first_input.size(), dtype=first_input.dtype)
- m = bundled_inputs.bundle_inputs(m, inputs=[(rand_input, )])
+ (first_input,) = inputs
+ rand_input = bundled_inputs.bundle_randn(
+ first_input.size(), dtype=first_input.dtype
+ )
+ m = bundled_inputs.bundle_inputs(m, inputs=[(rand_input,)])
buffer = io.BytesIO(m._save_to_buffer_for_lite_interpreter())
buffer.seek(0)
m = _load_for_lite_interpreter(buffer) # Error here
@@ -451,7 +466,7 @@ class TestOnDeviceDynamicPTQFinalize(TestCase):
inputs = model.get_example_inputs()
ref_m = torch.jit.script(model)
torch._C._jit_pass_inline(ref_m.graph)
- qconfig_dict = {"" : per_channel_dynamic_qconfig}
+ qconfig_dict = {"": per_channel_dynamic_qconfig}
ref_m = prepare_dynamic_jit(ref_m, qconfig_dict)
ref_m = convert_dynamic_jit(ref_m)
buffer = io.BytesIO()
@@ -474,9 +489,11 @@ class TestOnDeviceDynamicPTQFinalize(TestCase):
else:
# check for lite interpreter
m = OnDevicePTQUtils.ptq_dynamic_quantize(model, qconfig_dict)
- first_input, = inputs
- rand_input = bundled_inputs.bundle_randn(first_input.size(), dtype=first_input.dtype)
- m = bundled_inputs.bundle_inputs(m, inputs=[(rand_input, )])
+ (first_input,) = inputs
+ rand_input = bundled_inputs.bundle_randn(
+ first_input.size(), dtype=first_input.dtype
+ )
+ m = bundled_inputs.bundle_inputs(m, inputs=[(rand_input,)])
buffer = io.BytesIO(m._save_to_buffer_for_lite_interpreter())
buffer.seek(0)
m = _load_for_lite_interpreter(buffer) # Error here
@@ -488,41 +505,34 @@ class TestOnDeviceDynamicPTQFinalize(TestCase):
output = m(*inputs)
self.assertTrue(torch.allclose(ref_output, output))
-
def _check_serialization_deserialization(self, model):
self._check_serdes_and_device_side_api_helper(model, False)
-
def _check_device_side_api(self, model):
self._check_serdes_and_device_side_api_helper(model, True)
-
def test_quantize_forward(self):
model = LinearAddModel()
self._check_quantize_forward(model, 2)
model = MyConvLinearModule()
self._check_quantize_forward(model, 3)
-
def test_quantized_forward(self):
model = LinearAddModel()
self._check_quantized_forward(model, 2)
model = MyConvLinearModule()
self._check_quantized_forward(model, 3)
-
def test_against_offdevice_dynamic_ptq(self):
model = LinearAddModel()
self._check_against_ref_dynamic_ptq(model)
model = MyConvLinearModule()
self._check_against_ref_dynamic_ptq(model)
-
def test_serialization_deserialization(self):
model = MyConvLinearModule()
self._check_serialization_deserialization(model)
-
def test_device_side_api(self):
model = MyConvLinearModule()
self._check_device_side_api(model)
diff --git a/test/quantization/jit/test_quantize_jit.py b/test/quantization/jit/test_quantize_jit.py
index c5c16853e3..2ce4aa7348 100644
--- a/test/quantization/jit/test_quantize_jit.py
+++ b/test/quantization/jit/test_quantize_jit.py
@@ -1,87 +1,88 @@
# Owner(s): ["oncall: quantization"]
# torch
+import io
+import itertools
+import unittest
+
+# Standard library
+from typing import List, Tuple
+
import torch
-import torch.nn as nn
-import torch.nn.functional as F
import torch.jit
import torch.jit.quantized
+import torch.nn as nn
+import torch.nn.functional as F
# torch.ao.quantization
from torch.ao.quantization import (
- QConfig,
default_dynamic_qconfig,
- float16_dynamic_qconfig,
+ default_histogram_observer,
default_observer,
- per_channel_dynamic_qconfig,
default_per_channel_weight_observer,
default_qconfig,
+ default_weight_observer,
+ float16_dynamic_qconfig,
+ fuse_modules,
get_default_qconfig,
+ per_channel_dynamic_qconfig,
+ PlaceholderObserver,
+ QConfig,
quantize,
quantize_dynamic,
- default_weight_observer,
- default_histogram_observer,
- fuse_modules,
- quantize_jit,
quantize_dynamic_jit,
- PlaceholderObserver,
+ quantize_jit,
)
# torch.ao.quantization.quantize_jit
from torch.ao.quantization.quantize_jit import (
- convert_jit,
convert_dynamic_jit,
+ convert_jit,
fuse_conv_bn_jit,
- prepare_jit,
prepare_dynamic_jit,
+ prepare_jit,
script_qconfig,
)
-# Testing utils
-from torch.testing._internal.common_quantized import (
- override_qengines,
- qengine_is_fbgemm,
- qengine_is_qnnpack,
-)
+from torch.jit._recursive import wrap_cpp_module
+
+from torch.testing import FileCheck
+
+# Annotated models
from torch.testing._internal.common_quantization import (
- QuantizationTestCase,
- skipIfNoFBGEMM,
- get_script_module,
- SingleLayerLinearModel,
- SkipQuantModel,
- NestedModel,
+ AnnotatedConvBnModel,
+ AnnotatedConvModel,
+ AnnotatedConvTransposeModel,
+ AnnotatedNestedModel,
+ AnnotatedSingleLayerLinearModel,
+ AnnotatedSkipQuantModel,
+ ConvBnModel,
ConvModel,
ConvTransposeModel,
default_per_channel_qconfig,
+ get_script_module,
+ NestedModel,
+ QuantizationTestCase,
+ SingleLayerLinearModel,
+ skipIfNoFBGEMM,
+ SkipQuantModel,
test_only_eval_fn,
- ConvBnModel,
)
-# Annotated models
-from torch.testing._internal.common_quantization import (
- AnnotatedSingleLayerLinearModel,
- AnnotatedSkipQuantModel,
- AnnotatedNestedModel,
- AnnotatedConvModel,
- AnnotatedConvTransposeModel,
- AnnotatedConvBnModel,
+# Testing utils
+from torch.testing._internal.common_quantized import (
+ override_qengines,
+ qengine_is_fbgemm,
+ qengine_is_qnnpack,
)
-from torch.testing import FileCheck
-from torch.testing._internal.jit_utils import attrs_with_prefix
-from torch.testing._internal.jit_utils import get_forward
-from torch.testing._internal.jit_utils import get_forward_graph
-
from torch.testing._internal.common_utils import set_default_dtype
-
-from torch.jit._recursive import wrap_cpp_module
-
-# Standard library
-from typing import List, Tuple
-import io
-import itertools
-import unittest
+from torch.testing._internal.jit_utils import (
+ attrs_with_prefix,
+ get_forward,
+ get_forward_graph,
+)
class TestQuantizeJitPasses(QuantizationTestCase):
@@ -97,9 +98,7 @@ class TestQuantizeJitPasses(QuantizationTestCase):
return self.conv(x)
m = torch.jit.script(M())
- observer = (
- default_per_channel_weight_observer.with_args(ch_axis=1)
- )
+ observer = default_per_channel_weight_observer.with_args(ch_axis=1)
qconfig_dict = {"": QConfig(activation=default_observer, weight=observer)}
m = prepare_jit(m, qconfig_dict)
data = torch.randn(1, 3, 10, 10, dtype=torch.float)
@@ -114,12 +113,18 @@ class TestQuantizeJitPasses(QuantizationTestCase):
# We have this pattern in the original graph: Constant f32_weight -> quant -> dequant
# After skipping dequant during Constant Propagation, the resulting graph will be:
# Constant int8_weight -> dequant
- FileCheck().check_count("aten::quantize_per_tensor", 2, exactly=True).run(freezed.graph)
- FileCheck().check_count("aten::quantize_per_channel", 0, exactly=True).run(freezed.graph)
+ FileCheck().check_count("aten::quantize_per_tensor", 2, exactly=True).run(
+ freezed.graph
+ )
+ FileCheck().check_count("aten::quantize_per_channel", 0, exactly=True).run(
+ freezed.graph
+ )
FileCheck().check_count("aten::dequantize", 3, exactly=True).run(freezed.graph)
- FileCheck().check("aten::quantize_per_tensor").check_next("aten::dequantize").check_not(
- "aten::quantize_per_channel"
- ).check("aten::dequantize").check_next("aten::conv2d").check_next(
+ FileCheck().check("aten::quantize_per_tensor").check_next(
+ "aten::dequantize"
+ ).check_not("aten::quantize_per_channel").check("aten::dequantize").check_next(
+ "aten::conv2d"
+ ).check_next(
"aten::quantize_per_tensor"
).check_next(
"aten::dequantize"
@@ -665,8 +670,8 @@ class TestQuantizeJitPasses(QuantizationTestCase):
}
assert len(activation_dtypes) == 1, "Expected to have 1 activation dtype"
assert len(weight_dtypes) == 1, "Expected to have 1 weight dtype"
- assert (
- next(iter(activation_dtypes)) != next(iter(weight_dtypes))
+ assert next(iter(activation_dtypes)) != next(
+ iter(weight_dtypes)
), "Expected activation dtype to "
" be different from wegiht dtype"
@@ -1700,9 +1705,7 @@ class TestQuantizeJitOps(QuantizationTestCase):
model.graph
)
- FileCheck().check_not(f"quantized::conv{dim}d_prepack").run(
- model.graph
- )
+ FileCheck().check_not(f"quantized::conv{dim}d_prepack").run(model.graph)
@skipIfNoFBGEMM
def test_quantized_conv_relu(self):
@@ -2282,16 +2285,19 @@ class TestQuantizeJitOps(QuantizationTestCase):
options = itertools.product([True, False], [2, 3])
for tracing, dim in options:
for instance in [BNRelu(dim, True), BNRelu(dim, False)]:
- model = self.checkGraphModeOp(instance, self.img_data_dict[dim],
- "quantized::batch_norm_relu", tracing)
- FileCheck().check_not("aten::batch_norm") \
- .check_not("aten::relu") \
- .check_not("aten::relu_") \
- .run(model.graph)
+ model = self.checkGraphModeOp(
+ instance,
+ self.img_data_dict[dim],
+ "quantized::batch_norm_relu",
+ tracing,
+ )
+ FileCheck().check_not("aten::batch_norm").check_not(
+ "aten::relu"
+ ).check_not("aten::relu_").run(model.graph)
@skipIfNoFBGEMM
def test_qbatch_norm_relu_BNFuncRelu(self):
- bn_module = {2 : torch.nn.BatchNorm2d, 3 : torch.nn.BatchNorm3d}
+ bn_module = {2: torch.nn.BatchNorm2d, 3: torch.nn.BatchNorm3d}
class BNFuncRelu(torch.nn.Module):
def __init__(self, dim):
@@ -2304,16 +2310,16 @@ class TestQuantizeJitOps(QuantizationTestCase):
options = itertools.product([True, False], [2, 3])
for tracing, dim in options:
instance = BNFuncRelu(dim)
- model = self.checkGraphModeOp(instance, self.img_data_dict[dim],
- "quantized::batch_norm_relu", tracing)
- FileCheck().check_not("aten::batch_norm") \
- .check_not("aten::relu") \
- .check_not("aten::relu_") \
- .run(model.graph)
+ model = self.checkGraphModeOp(
+ instance, self.img_data_dict[dim], "quantized::batch_norm_relu", tracing
+ )
+ FileCheck().check_not("aten::batch_norm").check_not("aten::relu").check_not(
+ "aten::relu_"
+ ).run(model.graph)
@skipIfNoFBGEMM
def test_qbatch_norm_relu_BNFuncInplaceRelu(self):
- bn_module = {2 : torch.nn.BatchNorm2d, 3 : torch.nn.BatchNorm3d}
+ bn_module = {2: torch.nn.BatchNorm2d, 3: torch.nn.BatchNorm3d}
class BNFuncInplaceRelu(torch.nn.Module):
def __init__(self, dim):
@@ -2326,12 +2332,12 @@ class TestQuantizeJitOps(QuantizationTestCase):
options = itertools.product([True, False], [2, 3])
for tracing, dim in options:
instance = BNFuncInplaceRelu(dim)
- model = self.checkGraphModeOp(instance, self.img_data_dict[dim],
- "quantized::batch_norm_relu", tracing)
- FileCheck().check_not("aten::batch_norm") \
- .check_not("aten::relu") \
- .check_not("aten::relu_") \
- .run(model.graph)
+ model = self.checkGraphModeOp(
+ instance, self.img_data_dict[dim], "quantized::batch_norm_relu", tracing
+ )
+ FileCheck().check_not("aten::batch_norm").check_not("aten::relu").check_not(
+ "aten::relu_"
+ ).run(model.graph)
@skipIfNoFBGEMM
def test_quantized_mul(self):
@@ -3280,11 +3286,17 @@ class TestQuantizeDynamicJitPasses(QuantizationTestCase):
for x, obs in m2._modules._c.items():
if x == "res1":
graph_params.append(
- (obs.getattr("weight.2_scale_0"), obs.getattr("weight.2_zero_point_0"))
+ (
+ obs.getattr("weight.2_scale_0"),
+ obs.getattr("weight.2_zero_point_0"),
+ )
)
elif x == "res2":
graph_params.append(
- (obs.getattr("weight.4_scale_0"), obs.getattr("weight.4_zero_point_0"))
+ (
+ obs.getattr("weight.4_scale_0"),
+ obs.getattr("weight.4_zero_point_0"),
+ )
)
self.assertEqual(ref_qparams, graph_params)
@@ -3313,10 +3325,12 @@ class TestQuantizeDynamicJitPasses(QuantizationTestCase):
model = quantize_dynamic_jit(model, qconfig_dict, debug=True)
graph_qparams = []
for x, obs in model._modules._c.items():
- n = 2 if x == 'fc' and tracing else 1
+ n = 2 if x == "fc" and tracing else 1
graph_qparams.append(
- (obs.getattr(f"weight.{n}_scale_0"),
- obs.getattr(f"weight.{n}_zero_point_0"))
+ (
+ obs.getattr(f"weight.{n}_scale_0"),
+ obs.getattr(f"weight.{n}_zero_point_0"),
+ )
)
self.assertEqual(ref_qparams, graph_qparams)
@@ -3519,21 +3533,19 @@ class TestQuantizeDynamicJitOps(QuantizationTestCase):
activation=PlaceholderObserver.with_args(
dtype=torch.float, custom_op_name="embedding_bag_4bit"
),
- weight=PlaceholderObserver.with_args(
- custom_op_name="embedding_bag_4bit"
- ),
+ weight=PlaceholderObserver.with_args(custom_op_name="embedding_bag_4bit"),
)
int8_qconfig = QConfig(
activation=PlaceholderObserver.with_args(
dtype=torch.float, custom_op_name="embedding_bag_byte"
),
- weight=PlaceholderObserver.with_args(
- custom_op_name="embedding_bag_byte"
- ),
+ weight=PlaceholderObserver.with_args(custom_op_name="embedding_bag_byte"),
)
- error_msg = r'Expected aten::embedding_bag padding_idx input to be None'
- for trace, qconfig in itertools.product([True, False], [int4_qconfig, int8_qconfig]):
+ error_msg = r"Expected aten::embedding_bag padding_idx input to be None"
+ for trace, qconfig in itertools.product(
+ [True, False], [int4_qconfig, int8_qconfig]
+ ):
if trace:
m = torch.jit.trace(module, dummy_inputs)
else:
diff --git a/test/quantization/pt2e/test_graph_utils.py b/test/quantization/pt2e/test_graph_utils.py
index a20338a97e..2cb49ae634 100644
--- a/test/quantization/pt2e/test_graph_utils.py
+++ b/test/quantization/pt2e/test_graph_utils.py
@@ -10,14 +10,10 @@ from torch.ao.quantization.pt2e.graph_utils import (
get_equivalent_types,
update_equivalent_types_dict,
)
-from torch.testing._internal.common_utils import (
- IS_WINDOWS,
- TestCase,
-)
+from torch.testing._internal.common_utils import IS_WINDOWS, TestCase
class TestGraphUtils(TestCase):
-
@unittest.skipIf(IS_WINDOWS, "torch.compile is not supported on Windows")
def test_conv_bn_conv_relu(self):
class M(torch.nn.Module):
diff --git a/test/quantization/pt2e/test_quantize_pt2e.py b/test/quantization/pt2e/test_quantize_pt2e.py
index 30ceb80c39..e3b7eadf48 100644
--- a/test/quantization/pt2e/test_quantize_pt2e.py
+++ b/test/quantization/pt2e/test_quantize_pt2e.py
@@ -2,14 +2,22 @@
from typing import List, Tuple
import torch
-from torch._export import (
- capture_pre_autograd_graph,
-)
from torch import Tensor
-from torch.ao.quantization import (
- observer,
- ObserverOrFakeQuantize,
- QConfigMapping,
+from torch._export import capture_pre_autograd_graph
+from torch.ao.quantization import observer, ObserverOrFakeQuantize, QConfigMapping
+
+from torch.ao.quantization.qconfig import (
+ default_per_channel_symmetric_qnnpack_qconfig,
+ float_qparams_weight_only_qconfig,
+ per_channel_weight_observer_range_neg_127_to_127,
+ QConfig,
+ weight_observer_range_neg_127_to_127,
+)
+
+from torch.ao.quantization.quantize_pt2e import (
+ convert_pt2e,
+ prepare_pt2e,
+ prepare_qat_pt2e,
)
from torch.ao.quantization.quantizer import (
DerivedQuantizationSpec,
@@ -19,33 +27,19 @@ from torch.ao.quantization.quantizer import (
Quantizer,
SharedQuantizationSpec,
)
-from torch.ao.quantization.quantizer.xnnpack_quantizer import (
- XNNPACKQuantizer,
- get_symmetric_quantization_config,
-)
-from torch.ao.quantization.quantizer.xnnpack_quantizer_utils import (
- OP_TO_ANNOTATOR,
- QuantizationConfig,
-)
from torch.ao.quantization.quantizer.composable_quantizer import ( # noqa: F811
ComposableQuantizer,
)
from torch.ao.quantization.quantizer.embedding_quantizer import ( # noqa: F811
EmbeddingQuantizer,
)
-
-from torch.ao.quantization.quantize_pt2e import (
- convert_pt2e,
- prepare_pt2e,
- prepare_qat_pt2e,
+from torch.ao.quantization.quantizer.xnnpack_quantizer import (
+ get_symmetric_quantization_config,
+ XNNPACKQuantizer,
)
-
-from torch.ao.quantization.qconfig import (
- default_per_channel_symmetric_qnnpack_qconfig,
- float_qparams_weight_only_qconfig,
- per_channel_weight_observer_range_neg_127_to_127,
- QConfig,
- weight_observer_range_neg_127_to_127,
+from torch.ao.quantization.quantizer.xnnpack_quantizer_utils import (
+ OP_TO_ANNOTATOR,
+ QuantizationConfig,
)
from torch.fx import Node
@@ -55,9 +49,7 @@ from torch.testing._internal.common_quantization import (
skipIfNoQNNPACK,
TestHelperModules,
)
-from torch.testing._internal.common_utils import (
- TemporaryFileName,
-)
+from torch.testing._internal.common_utils import TemporaryFileName
@skipIfNoQNNPACK
@@ -186,12 +178,20 @@ class TestQuantizePT2E(PT2EQuantizationTestCase):
# Ensure the conv has no observer inserted at output
node_occurrence = {
# two for input of conv
- ns.call_function(torch.ops.quantized_decomposed.quantize_per_tensor.default): 1,
- ns.call_function(torch.ops.quantized_decomposed.dequantize_per_tensor.default): 2,
+ ns.call_function(
+ torch.ops.quantized_decomposed.quantize_per_tensor.default
+ ): 1,
+ ns.call_function(
+ torch.ops.quantized_decomposed.dequantize_per_tensor.default
+ ): 2,
}
node_list = [
- ns.call_function(torch.ops.quantized_decomposed.dequantize_per_tensor.default),
- ns.call_function(torch.ops.quantized_decomposed.dequantize_per_tensor.default),
+ ns.call_function(
+ torch.ops.quantized_decomposed.dequantize_per_tensor.default
+ ),
+ ns.call_function(
+ torch.ops.quantized_decomposed.dequantize_per_tensor.default
+ ),
ns.call_function(torch.ops.aten.conv2d.default),
]
self.checkGraphModuleNodes(
@@ -272,15 +272,27 @@ class TestQuantizePT2E(PT2EQuantizationTestCase):
# two for input of conv
# one for input of maxpool
# one for output of maxpool
- ns.call_function(torch.ops.quantized_decomposed.quantize_per_tensor.default): 3,
- ns.call_function(torch.ops.quantized_decomposed.dequantize_per_tensor.default): 4,
+ ns.call_function(
+ torch.ops.quantized_decomposed.quantize_per_tensor.default
+ ): 3,
+ ns.call_function(
+ torch.ops.quantized_decomposed.dequantize_per_tensor.default
+ ): 4,
}
node_list = [
- ns.call_function(torch.ops.quantized_decomposed.dequantize_per_tensor.default),
- ns.call_function(torch.ops.quantized_decomposed.dequantize_per_tensor.default),
+ ns.call_function(
+ torch.ops.quantized_decomposed.dequantize_per_tensor.default
+ ),
+ ns.call_function(
+ torch.ops.quantized_decomposed.dequantize_per_tensor.default
+ ),
ns.call_function(torch.ops.aten.conv2d.default),
- ns.call_function(torch.ops.quantized_decomposed.quantize_per_tensor.default),
- ns.call_function(torch.ops.quantized_decomposed.dequantize_per_tensor.default),
+ ns.call_function(
+ torch.ops.quantized_decomposed.quantize_per_tensor.default
+ ),
+ ns.call_function(
+ torch.ops.quantized_decomposed.dequantize_per_tensor.default
+ ),
ns.call_function(torch.ops.aten.max_pool2d.default),
]
self.checkGraphModuleNodes(
@@ -607,8 +619,8 @@ class TestQuantizePT2E(PT2EQuantizationTestCase):
_annotated=True,
)
elif (
- node.op == "call_function"
- and node.target == torch.ops.aten.add.Tensor
+ node.op == "call_function"
+ and node.target == torch.ops.aten.add.Tensor
):
input_act0 = node.args[0]
assert isinstance(input_act, Node)
@@ -638,7 +650,10 @@ class TestQuantizePT2E(PT2EQuantizationTestCase):
return torch.sigmoid(x) + y
def example_inputs(self):
- return (torch.randn(1, 3, 5, 5), torch.randn(1, 3, 5, 5),)
+ return (
+ torch.randn(1, 3, 5, 5),
+ torch.randn(1, 3, 5, 5),
+ )
m = M().eval()
example_inputs = m.example_inputs()
@@ -731,9 +746,13 @@ class TestQuantizePT2E(PT2EQuantizationTestCase):
observer_or_fake_quant_ctr=observer.default_observer,
)
input_qspec_map[first_input_node] = act_qspec
- share_qparams_with_input_act0_qspec = SharedQuantizationSpec((first_input_node, cat_node))
+ share_qparams_with_input_act0_qspec = SharedQuantizationSpec(
+ (first_input_node, cat_node)
+ )
for input_node in input_nodes[1:]:
- input_qspec_map[input_node] = share_qparams_with_input_act0_qspec
+ input_qspec_map[
+ input_node
+ ] = share_qparams_with_input_act0_qspec
cat_node.meta[
"quantization_annotation"
@@ -746,7 +765,6 @@ class TestQuantizePT2E(PT2EQuantizationTestCase):
def validate(self, model: torch.fx.GraphModule) -> None:
pass
-
m = TestHelperModules.Conv2dWithCat().eval()
example_inputs = (torch.randn(1, 3, 5, 5), torch.randn(1, 3, 5, 5))
@@ -770,7 +788,9 @@ class TestQuantizePT2E(PT2EQuantizationTestCase):
obs_ins0 = getattr(m, input0.target)
obs_ins1 = getattr(m, input1.target)
assert obs_ins0 == obs_ins1
- assert len(conv_output_obs) == 2, "expecting two observer that follows conv2d ops"
+ assert (
+ len(conv_output_obs) == 2
+ ), "expecting two observer that follows conv2d ops"
# checking that the output observers for the two convs are shared as well
assert conv_output_obs[0] == conv_output_obs[1]
@@ -804,7 +824,12 @@ class TestQuantizePT2E(PT2EQuantizationTestCase):
def _test_transitive_sharing_with_cat_helper(self, quantizer):
m = TestHelperModules.Conv2dWithTwoCat().eval()
- example_inputs = (torch.randn(1, 3, 5, 5), torch.randn(1, 3, 5, 5), torch.randn(1, 6, 3, 3), torch.randn(1, 6, 3, 3))
+ example_inputs = (
+ torch.randn(1, 3, 5, 5),
+ torch.randn(1, 3, 5, 5),
+ torch.randn(1, 6, 3, 3),
+ torch.randn(1, 6, 3, 3),
+ )
# program capture
m = capture_pre_autograd_graph(
@@ -833,7 +858,9 @@ class TestQuantizePT2E(PT2EQuantizationTestCase):
obs_ins2 = getattr(m, output_obs.target)
assert obs_ins0 == obs_ins2, "input observer does not match output"
- assert len(conv_output_obs) == 2, "expecting two observer that follows conv2d ops"
+ assert (
+ len(conv_output_obs) == 2
+ ), "expecting two observer that follows conv2d ops"
# checking that the output observers for the two convs are shared as well
assert conv_output_obs[0] == conv_output_obs[1]
@@ -885,6 +912,7 @@ class TestQuantizePT2E(PT2EQuantizationTestCase):
so there is an implicit sharing here, all tensors connect to cat1 and cat2 are in the same
sharing group after transitive sharing
"""
+
# TODO: refactor this to a common util
class BackendAQuantizer(Quantizer):
def annotate(self, model: torch.fx.GraphModule) -> torch.fx.GraphModule:
@@ -943,9 +971,13 @@ class TestQuantizePT2E(PT2EQuantizationTestCase):
observer_or_fake_quant_ctr=observer.default_observer,
)
input_qspec_map[first_input_node] = act_qspec
- share_qparams_with_input_act0_qspec = SharedQuantizationSpec((first_input_node, cat_node))
+ share_qparams_with_input_act0_qspec = SharedQuantizationSpec(
+ (first_input_node, cat_node)
+ )
for input_node in input_nodes[1:]:
- input_qspec_map[input_node] = share_qparams_with_input_act0_qspec
+ input_qspec_map[
+ input_node
+ ] = share_qparams_with_input_act0_qspec
cat_node.meta[
"quantization_annotation"
@@ -976,6 +1008,7 @@ class TestQuantizePT2E(PT2EQuantizationTestCase):
the difference is that for this one, all edges and nodes are shared with the second input edge of cat
instead of the first input edge of cat as in previous example
"""
+
# TODO: refactor this to a common util
class BackendAQuantizer(Quantizer):
def annotate(self, model: torch.fx.GraphModule) -> torch.fx.GraphModule:
@@ -1035,8 +1068,12 @@ class TestQuantizePT2E(PT2EQuantizationTestCase):
observer_or_fake_quant_ctr=observer.default_observer,
)
input_qspec_map[second_input_node] = act_qspec
- share_qparams_with_input_act1_qspec = SharedQuantizationSpec((second_input_node, cat_node))
- input_qspec_map[first_input_node] = share_qparams_with_input_act1_qspec
+ share_qparams_with_input_act1_qspec = SharedQuantizationSpec(
+ (second_input_node, cat_node)
+ )
+ input_qspec_map[
+ first_input_node
+ ] = share_qparams_with_input_act1_qspec
cat_node.meta[
"quantization_annotation"
@@ -1070,6 +1107,7 @@ class TestQuantizePT2E(PT2EQuantizationTestCase):
x3 -> obs2 -> add2 -> obs2 -> obs3
x4 -> obs2 -/
"""
+
# TODO: refactor this to a common util
class BackendAQuantizer(Quantizer):
def annotate(self, model: torch.fx.GraphModule) -> torch.fx.GraphModule:
@@ -1088,8 +1126,12 @@ class TestQuantizePT2E(PT2EQuantizationTestCase):
observer_or_fake_quant_ctr=observer.default_observer,
)
input_qspec_map[second_input_node] = act_qspec
- share_qparams_with_input_act1_qspec = SharedQuantizationSpec((second_input_node, add_node))
- input_qspec_map[first_input_node] = share_qparams_with_input_act1_qspec
+ share_qparams_with_input_act1_qspec = SharedQuantizationSpec(
+ (second_input_node, add_node)
+ )
+ input_qspec_map[
+ first_input_node
+ ] = share_qparams_with_input_act1_qspec
add_node.meta[
"quantization_annotation"
@@ -1104,7 +1146,12 @@ class TestQuantizePT2E(PT2EQuantizationTestCase):
pass
m = TestHelperModules.ThreeAdd().eval()
- example_inputs = (torch.randn(1, 3, 5, 5), torch.randn(1, 3, 5, 5), torch.randn(1, 3, 5, 5), torch.randn(1, 3, 5, 5))
+ example_inputs = (
+ torch.randn(1, 3, 5, 5),
+ torch.randn(1, 3, 5, 5),
+ torch.randn(1, 3, 5, 5),
+ torch.randn(1, 3, 5, 5),
+ )
# program capture
m = capture_pre_autograd_graph(
@@ -1134,7 +1181,7 @@ class TestQuantizePT2E(PT2EQuantizationTestCase):
# using int32 to simulate int16
int16_qspec = QuantizationSpec(
dtype=torch.int16,
- quant_min=-2**15,
+ quant_min=-(2**15),
quant_max=2**15 - 1,
qscheme=torch.per_tensor_affine,
is_dynamic=False,
@@ -1231,25 +1278,33 @@ class TestQuantizePT2E(PT2EQuantizationTestCase):
m = prepare_pt2e(m, BackendAQuantizer())
def test_fold_quantize(self):
- """Test to make sure the quantized model gets quantized weight (quantize_per_tensor op is folded)
- """
+ """Test to make sure the quantized model gets quantized weight (quantize_per_tensor op is folded)"""
m = self._get_pt2e_quantized_linear()
node_occurrence = {
# quantize op for weight node is folded
- ns.call_function(torch.ops.quantized_decomposed.quantize_per_tensor.default): 2,
- ns.call_function(torch.ops.quantized_decomposed.dequantize_per_tensor.default): 3,
+ ns.call_function(
+ torch.ops.quantized_decomposed.quantize_per_tensor.default
+ ): 2,
+ ns.call_function(
+ torch.ops.quantized_decomposed.dequantize_per_tensor.default
+ ): 3,
}
self.checkGraphModuleNodes(m, expected_node_occurrence=node_occurrence)
def test_fold_quantize_per_channel(self):
- """Test to make sure the quantized model gets quantized weight (quantize_per_channel op is folded)
- """
+ """Test to make sure the quantized model gets quantized weight (quantize_per_channel op is folded)"""
m = self._get_pt2e_quantized_linear(is_per_channel=True)
node_occurrence = {
# quantize op for weight node is folded
- ns.call_function(torch.ops.quantized_decomposed.quantize_per_tensor.default): 2,
- ns.call_function(torch.ops.quantized_decomposed.dequantize_per_channel.default): 1,
- ns.call_function(torch.ops.quantized_decomposed.dequantize_per_tensor.default): 2,
+ ns.call_function(
+ torch.ops.quantized_decomposed.quantize_per_tensor.default
+ ): 2,
+ ns.call_function(
+ torch.ops.quantized_decomposed.dequantize_per_channel.default
+ ): 1,
+ ns.call_function(
+ torch.ops.quantized_decomposed.dequantize_per_tensor.default
+ ): 2,
}
self.checkGraphModuleNodes(m, expected_node_occurrence=node_occurrence)
@@ -1257,6 +1312,7 @@ class TestQuantizePT2E(PT2EQuantizationTestCase):
"""Make sure the constant propagation does not apply to things unrelated to
quantization
"""
+
class M(torch.nn.Module):
def __init__(self):
super().__init__()
@@ -1277,8 +1333,12 @@ class TestQuantizePT2E(PT2EQuantizationTestCase):
m = self._quantize(m, quantizer, example_inputs)
node_occurrence = {
# quantize op for weight node is folded
- ns.call_function(torch.ops.quantized_decomposed.quantize_per_tensor.default): 2,
- ns.call_function(torch.ops.quantized_decomposed.dequantize_per_tensor.default): 3,
+ ns.call_function(
+ torch.ops.quantized_decomposed.quantize_per_tensor.default
+ ): 2,
+ ns.call_function(
+ torch.ops.quantized_decomposed.dequantize_per_tensor.default
+ ): 3,
# transpose op not folded
ns.call_function(torch.ops.aten.t.default): 1,
}
@@ -1291,6 +1351,7 @@ class TestQuantizePT2E(PT2EQuantizationTestCase):
After:
get_attr(folded_weight) -> dequantize
"""
+
class M(torch.nn.Module):
def __init__(self):
super().__init__()
@@ -1308,8 +1369,12 @@ class TestQuantizePT2E(PT2EQuantizationTestCase):
m = self._quantize(m, quantizer, example_inputs)
node_occurrence = {
# quantize op for weight node is folded
- ns.call_function(torch.ops.quantized_decomposed.quantize_per_tensor.default): 2,
- ns.call_function(torch.ops.quantized_decomposed.dequantize_per_tensor.default): 3,
+ ns.call_function(
+ torch.ops.quantized_decomposed.quantize_per_tensor.default
+ ): 2,
+ ns.call_function(
+ torch.ops.quantized_decomposed.dequantize_per_tensor.default
+ ): 3,
}
self.checkGraphModuleNodes(m, expected_node_occurrence=node_occurrence)
@@ -1317,6 +1382,7 @@ class TestQuantizePT2E(PT2EQuantizationTestCase):
"""Test to make sure the get_attr node for const propagated weight Tensor gets the correct
metadata (from original get_attr node from weight)
"""
+
class M(torch.nn.Module):
def __init__(self):
super().__init__()
@@ -1336,7 +1402,10 @@ class TestQuantizePT2E(PT2EQuantizationTestCase):
)
weight_meta = None
for n in m.graph.nodes:
- if n.op == "get_attr" and next(iter(n.users)).target == torch.ops.aten.linear.default:
+ if (
+ n.op == "get_attr"
+ and next(iter(n.users)).target == torch.ops.aten.linear.default
+ ):
weight_meta = n.meta
break
assert weight_meta is not None, "Expect to find metadata for weight node"
@@ -1352,8 +1421,7 @@ class TestQuantizePT2E(PT2EQuantizationTestCase):
self.assertEqual(n.meta[key], weight_meta[key])
def test_save_load(self):
- """Test save/load a quantized model
- """
+ """Test save/load a quantized model"""
m = self._get_pt2e_quantized_linear()
example_inputs = (torch.randn(2, 2),)
ref_res = m(*example_inputs)
@@ -1393,7 +1461,9 @@ class TestQuantizePT2E(PT2EQuantizationTestCase):
def test_transform_for_annotation(self):
class TestQuantizer(Quantizer):
- def transform_for_annotation(self, model: torch.fx.GraphModule) -> torch.fx.GraphModule:
+ def transform_for_annotation(
+ self, model: torch.fx.GraphModule
+ ) -> torch.fx.GraphModule:
for n in model.graph.nodes:
if n.target == torch.ops.aten.add.Tensor:
n.target = torch.ops.aten.mul.Tensor
@@ -1423,7 +1493,9 @@ class TestQuantizePT2E(PT2EQuantizationTestCase):
def test_composable_quantizer_transform_for_annotation(self):
class TestQuantizer1(Quantizer):
- def transform_for_annotation(self, model: torch.fx.GraphModule) -> torch.fx.GraphModule:
+ def transform_for_annotation(
+ self, model: torch.fx.GraphModule
+ ) -> torch.fx.GraphModule:
for n in model.graph.nodes:
if n.target == torch.ops.aten.add.Tensor:
n.target = torch.ops.aten.mul.Tensor
@@ -1436,7 +1508,9 @@ class TestQuantizePT2E(PT2EQuantizationTestCase):
pass
class TestQuantizer2(Quantizer):
- def transform_for_annotation(self, model: torch.fx.GraphModule) -> torch.fx.GraphModule:
+ def transform_for_annotation(
+ self, model: torch.fx.GraphModule
+ ) -> torch.fx.GraphModule:
for n in model.graph.nodes:
if n.target == torch.ops.aten.sub.Tensor:
n.target = torch.ops.aten.div.Tensor
@@ -1453,10 +1527,12 @@ class TestQuantizePT2E(PT2EQuantizationTestCase):
return x + y - z
m = M().eval()
- quantizer = ComposableQuantizer(
- [TestQuantizer1(), TestQuantizer2()]
+ quantizer = ComposableQuantizer([TestQuantizer1(), TestQuantizer2()])
+ example_inputs = (
+ torch.randn(1, 2, 3, 3),
+ torch.randn(1, 2, 3, 3),
+ torch.randn(1, 2, 3, 3),
)
- example_inputs = (torch.randn(1, 2, 3, 3), torch.randn(1, 2, 3, 3), torch.randn(1, 2, 3, 3))
m = capture_pre_autograd_graph(m, example_inputs)
m = prepare_pt2e(m, quantizer)
m(*example_inputs)
@@ -1772,7 +1848,9 @@ class TestQuantizePT2E(PT2EQuantizationTestCase):
torch.ao.quantization.move_exported_model_to_eval(m)
# Assert that batch norm op is now in eval mode
- bn_node = self._get_node(m, torch.ops.aten._native_batch_norm_legit_no_training.default)
+ bn_node = self._get_node(
+ m, torch.ops.aten._native_batch_norm_legit_no_training.default
+ )
self.assertTrue(bn_node is not None)
# Move to train
@@ -1814,7 +1892,6 @@ class TestQuantizePT2E(PT2EQuantizationTestCase):
m.train()
def test_allow_exported_model_train_eval(self):
-
class M(torch.nn.Module):
def __init__(self):
super().__init__()
@@ -1890,8 +1967,12 @@ class TestQuantizePT2E(PT2EQuantizationTestCase):
example_inputs = (torch.rand(3, 3, 5, 5),)
exported_gm = capture_pre_autograd_graph(m, example_inputs)
fx_traced_gm = torch.fx.symbolic_trace(m, example_inputs)
- self.assertTrue(torch.ao.quantization.pt2e.export_utils.model_is_exported(exported_gm))
- self.assertFalse(torch.ao.quantization.pt2e.export_utils.model_is_exported(fx_traced_gm))
+ self.assertTrue(
+ torch.ao.quantization.pt2e.export_utils.model_is_exported(exported_gm)
+ )
+ self.assertFalse(
+ torch.ao.quantization.pt2e.export_utils.model_is_exported(fx_traced_gm)
+ )
self.assertFalse(torch.ao.quantization.pt2e.export_utils.model_is_exported(m))
def test_reentrant(self):
@@ -1899,31 +1980,49 @@ class TestQuantizePT2E(PT2EQuantizationTestCase):
m = TestHelperModules.ConvBnReLU2dAndLinearReLU()
example_inputs = (torch.randn(3, 3, 10, 10),)
- quantizer = XNNPACKQuantizer().set_global(get_symmetric_quantization_config(is_per_channel=True, is_qat=True))
+ quantizer = XNNPACKQuantizer().set_global(
+ get_symmetric_quantization_config(is_per_channel=True, is_qat=True)
+ )
m.conv_bn_relu = capture_pre_autograd_graph(m.conv_bn_relu, example_inputs)
m.conv_bn_relu = prepare_qat_pt2e(m.conv_bn_relu, quantizer)
m(*example_inputs)
m.conv_bn_relu = convert_pt2e(m.conv_bn_relu)
- quantizer = XNNPACKQuantizer().set_module_type(torch.nn.Linear, get_symmetric_quantization_config(is_per_channel=False))
+ quantizer = XNNPACKQuantizer().set_module_type(
+ torch.nn.Linear, get_symmetric_quantization_config(is_per_channel=False)
+ )
m = capture_pre_autograd_graph(m, example_inputs)
m = prepare_pt2e(m, quantizer)
m = convert_pt2e(m)
node_occurrence = {
- ns.call_function(torch.ops.quantized_decomposed.quantize_per_tensor.default): 4,
+ ns.call_function(
+ torch.ops.quantized_decomposed.quantize_per_tensor.default
+ ): 4,
# one for weight
- ns.call_function(torch.ops.quantized_decomposed.dequantize_per_tensor.default): 5,
- ns.call_function(torch.ops.quantized_decomposed.dequantize_per_channel.default): 1,
+ ns.call_function(
+ torch.ops.quantized_decomposed.dequantize_per_tensor.default
+ ): 5,
+ ns.call_function(
+ torch.ops.quantized_decomposed.dequantize_per_channel.default
+ ): 1,
}
node_list = [
- ns.call_function(torch.ops.quantized_decomposed.dequantize_per_tensor.default),
+ ns.call_function(
+ torch.ops.quantized_decomposed.dequantize_per_tensor.default
+ ),
ns.call_function(torch.ops.aten.conv2d.default),
ns.call_function(torch.ops.aten.relu.default),
- ns.call_function(torch.ops.quantized_decomposed.quantize_per_tensor.default),
- ns.call_function(torch.ops.quantized_decomposed.dequantize_per_tensor.default),
+ ns.call_function(
+ torch.ops.quantized_decomposed.quantize_per_tensor.default
+ ),
+ ns.call_function(
+ torch.ops.quantized_decomposed.dequantize_per_tensor.default
+ ),
ns.call_function(torch.ops.aten.linear.default),
- ns.call_function(torch.ops.quantized_decomposed.quantize_per_tensor.default),
+ ns.call_function(
+ torch.ops.quantized_decomposed.quantize_per_tensor.default
+ ),
]
self.checkGraphModuleNodes(
m, expected_node_occurrence=node_occurrence, expected_node_list=node_list
@@ -1940,9 +2039,12 @@ class TestQuantizePT2E(PT2EQuantizationTestCase):
m(*example_inputs)
def test_observer_callback(self):
- from torch.library import Library, impl
+ from torch.library import impl, Library
+
test_lib = Library("test_int4", "DEF") # noqa: TOR901
- test_lib.define("quantize_per_tensor_int4(Tensor input, float scale, int zero_point) -> Tensor")
+ test_lib.define(
+ "quantize_per_tensor_int4(Tensor input, float scale, int zero_point) -> Tensor"
+ )
@impl(test_lib, "quantize_per_tensor_int4", "CompositeExplicitAutograd")
def quantize_per_tensor_int4(
@@ -1951,9 +2053,15 @@ class TestQuantizePT2E(PT2EQuantizationTestCase):
zero_point: int,
) -> torch.Tensor:
inv_scale = 1.0 / scale
- return torch.clamp(torch.round(input * inv_scale) + zero_point, 0, 15).to(torch.uint8).view(torch.bits8)
+ return (
+ torch.clamp(torch.round(input * inv_scale) + zero_point, 0, 15)
+ .to(torch.uint8)
+ .view(torch.bits8)
+ )
- test_lib.define("dequantize_per_tensor_int4(Tensor input, float scale, int zero_point) -> Tensor")
+ test_lib.define(
+ "dequantize_per_tensor_int4(Tensor input, float scale, int zero_point) -> Tensor"
+ )
@impl(test_lib, "dequantize_per_tensor_int4", "CompositeExplicitAutograd")
def dequantize_per_tensor_int4(
@@ -1979,9 +2087,15 @@ class TestQuantizePT2E(PT2EQuantizationTestCase):
def convert(self, model: torch.fx.GraphModule, observer_node: Node):
with model.graph.inserting_before(observer_node):
q_node = model.graph.call_function(
- torch.ops.test_int4.quantize_per_tensor_int4, (observer_node.args[0], 1.0, 0), {})
+ torch.ops.test_int4.quantize_per_tensor_int4,
+ (observer_node.args[0], 1.0, 0),
+ {},
+ )
dq_node = model.graph.call_function(
- torch.ops.test_int4.dequantize_per_tensor_int4, (q_node, 1.0, 0), {})
+ torch.ops.test_int4.dequantize_per_tensor_int4,
+ (q_node, 1.0, 0),
+ {},
+ )
observer_node.replace_all_uses_with(dq_node)
model.graph.erase_node(observer_node)
@@ -2017,12 +2131,14 @@ class TestQuantizePT2E(PT2EQuantizationTestCase):
def validate(self, model: torch.fx.GraphModule) -> None:
pass
-
class M(torch.nn.Module):
def forward(self, x1, x2):
return x1 + x2
- example_inputs = (torch.randn(1, 3, 5, 5), torch.randn(1, 3, 5, 5),)
+ example_inputs = (
+ torch.randn(1, 3, 5, 5),
+ torch.randn(1, 3, 5, 5),
+ )
node_occurrence = {
# two for input of the first conv, one for output for the first conv
torch.ops.test_int4.quantize_per_tensor_int4: 3,
@@ -2057,7 +2173,9 @@ class TestQuantizePT2E(PT2EQuantizationTestCase):
is_per_channel=True, is_dynamic=True
)
dynamic_quantizer.set_global(operator_config_dynamic)
- composed_quantizer = ComposableQuantizer([embedding_quantizer, dynamic_quantizer])
+ composed_quantizer = ComposableQuantizer(
+ [embedding_quantizer, dynamic_quantizer]
+ )
prev = time.time()
model = prepare_qat_pt2e(model, composed_quantizer)
cur = time.time()
diff --git a/test/quantization/pt2e/test_x86inductor_quantizer.py b/test/quantization/pt2e/test_x86inductor_quantizer.py
index c9df319bfd..cb7385ad27 100644
--- a/test/quantization/pt2e/test_x86inductor_quantizer.py
+++ b/test/quantization/pt2e/test_x86inductor_quantizer.py
@@ -1,34 +1,35 @@
# Owner(s): ["oncall: quantization"]
import copy
+import itertools
+from enum import Enum
+
import torch
+import torch.ao.quantization.quantizer.x86_inductor_quantizer as xiq
import torch.nn as nn
-from torch.ao.quantization.quantizer.x86_inductor_quantizer import (
- X86InductorQuantizer,
-)
+from torch._export import capture_pre_autograd_graph
+from torch.ao.quantization import ObserverBase
from torch.ao.quantization.quantize_pt2e import (
convert_pt2e,
prepare_pt2e,
prepare_qat_pt2e,
)
+from torch.ao.quantization.quantizer.x86_inductor_quantizer import X86InductorQuantizer
from torch.testing._internal.common_quantization import (
NodeSpec as ns,
QuantizationTestCase,
- skipIfNoX86,
skipIfNoInductorSupport,
+ skipIfNoX86,
)
-from torch.testing._internal.common_utils import skipIfTorchDynamo
from torch.testing._internal.common_quantized import override_quantized_engine
-from enum import Enum
-import itertools
-import torch.ao.quantization.quantizer.x86_inductor_quantizer as xiq
-from torch.ao.quantization import ObserverBase
-from torch._export import capture_pre_autograd_graph
+from torch.testing._internal.common_utils import skipIfTorchDynamo
+
class Conv2DType(Enum):
left = 1
right = 2
both = 3
+
class TestHelperModules:
class SingleConv2dModule(torch.nn.Module):
def __init__(self, with_bn=False) -> None:
@@ -46,7 +47,9 @@ class TestHelperModules:
class Conv2dUnaryModule(torch.nn.Module):
def __init__(self, post_op, use_bias: bool = False, with_bn=False) -> None:
super().__init__()
- self.conv = nn.Conv2d(3, 6, (2, 2), stride=(1, 1), padding=(1, 1), bias=use_bias)
+ self.conv = nn.Conv2d(
+ 3, 6, (2, 2), stride=(1, 1), padding=(1, 1), bias=use_bias
+ )
self.post_op = post_op
self.bn = torch.nn.BatchNorm2d(6)
self.with_bn = with_bn
@@ -61,18 +64,29 @@ class TestHelperModules:
return x
class Conv2dAddModule(torch.nn.Module):
- def __init__(self,
- inplace_add: bool = False,
- conv2d_type: Conv2DType = Conv2DType.left,
- use_bias: bool = False,
- with_bn: bool = False,
- ) -> None:
+ def __init__(
+ self,
+ inplace_add: bool = False,
+ conv2d_type: Conv2DType = Conv2DType.left,
+ use_bias: bool = False,
+ with_bn: bool = False,
+ ) -> None:
super().__init__()
self.conv = torch.nn.Conv2d(
- in_channels=3, out_channels=3, kernel_size=3, stride=1, padding=1, bias=use_bias
+ in_channels=3,
+ out_channels=3,
+ kernel_size=3,
+ stride=1,
+ padding=1,
+ bias=use_bias,
)
self.conv2 = torch.nn.Conv2d(
- in_channels=3, out_channels=3, kernel_size=3, stride=1, padding=1, bias=use_bias
+ in_channels=3,
+ out_channels=3,
+ kernel_size=3,
+ stride=1,
+ padding=1,
+ bias=use_bias,
)
self.relu = nn.ReLU()
self.inplace_add = inplace_add
@@ -109,19 +123,30 @@ class TestHelperModules:
return self.conv(x) + self.conv2(x)
class Conv2dAddReLUModule(torch.nn.Module):
- def __init__(self,
- inplace_add: bool = False,
- conv2d_type: Conv2DType = Conv2DType.left,
- inplace_relu: bool = False,
- use_bias: bool = False,
- with_bn: bool = False,
- ) -> None:
+ def __init__(
+ self,
+ inplace_add: bool = False,
+ conv2d_type: Conv2DType = Conv2DType.left,
+ inplace_relu: bool = False,
+ use_bias: bool = False,
+ with_bn: bool = False,
+ ) -> None:
super().__init__()
self.conv = torch.nn.Conv2d(
- in_channels=3, out_channels=3, kernel_size=3, stride=1, padding=1, bias=use_bias
+ in_channels=3,
+ out_channels=3,
+ kernel_size=3,
+ stride=1,
+ padding=1,
+ bias=use_bias,
)
self.conv2 = torch.nn.Conv2d(
- in_channels=3, out_channels=3, kernel_size=3, stride=1, padding=1, bias=use_bias
+ in_channels=3,
+ out_channels=3,
+ kernel_size=3,
+ stride=1,
+ padding=1,
+ bias=use_bias,
)
self.relu = nn.ReLU()
self.inplace_add = inplace_add
@@ -170,21 +195,43 @@ class TestHelperModules:
return torch.pow(x, 2)
class SerialsConv2dAddReLUModule(torch.nn.Module):
- """ Serials of 2 Conv2d -> Add -> ReLU Pattern.
- """
- def __init__(self, ) -> None:
+ """Serials of 2 Conv2d -> Add -> ReLU Pattern."""
+
+ def __init__(
+ self,
+ ) -> None:
super().__init__()
self.conv = torch.nn.Conv2d(
- in_channels=3, out_channels=3, kernel_size=3, stride=1, padding=1, bias=True
+ in_channels=3,
+ out_channels=3,
+ kernel_size=3,
+ stride=1,
+ padding=1,
+ bias=True,
)
self.conv2 = torch.nn.Conv2d(
- in_channels=3, out_channels=3, kernel_size=3, stride=1, padding=1, bias=True
+ in_channels=3,
+ out_channels=3,
+ kernel_size=3,
+ stride=1,
+ padding=1,
+ bias=True,
)
self.conv3 = torch.nn.Conv2d(
- in_channels=3, out_channels=3, kernel_size=3, stride=1, padding=1, bias=True
+ in_channels=3,
+ out_channels=3,
+ kernel_size=3,
+ stride=1,
+ padding=1,
+ bias=True,
)
self.conv4 = torch.nn.Conv2d(
- in_channels=3, out_channels=3, kernel_size=3, stride=1, padding=1, bias=True
+ in_channels=3,
+ out_channels=3,
+ kernel_size=3,
+ stride=1,
+ padding=1,
+ bias=True,
)
self.relu = nn.ReLU()
self.relu2 = nn.ReLU()
@@ -196,13 +243,21 @@ class TestHelperModules:
return res2
class Conv2dCatMaxpool2d(torch.nn.Module):
- def __init__(self,):
+ def __init__(
+ self,
+ ):
super().__init__()
- self.conv = torch.nn.Conv2d(3, 16, 7, bias=True, stride=2, padding=3, dilation=1)
- self.conv2 = torch.nn.Conv2d(3, 16, 7, bias=True, stride=2, padding=3, dilation=1)
+ self.conv = torch.nn.Conv2d(
+ 3, 16, 7, bias=True, stride=2, padding=3, dilation=1
+ )
+ self.conv2 = torch.nn.Conv2d(
+ 3, 16, 7, bias=True, stride=2, padding=3, dilation=1
+ )
self.relu = torch.nn.ReLU()
self.maxpool = torch.nn.MaxPool2d(3, stride=2, padding=1)
- self.conv3 = torch.nn.Conv2d(32, 32, 7, bias=True, stride=2, padding=3, dilation=1)
+ self.conv3 = torch.nn.Conv2d(
+ 32, 32, 7, bias=True, stride=2, padding=3, dilation=1
+ )
def forward(self, x):
temp1 = self.relu(self.conv(x))
@@ -213,9 +268,13 @@ class TestHelperModules:
return temp5
class Conv2dAvgPool2d(torch.nn.Module):
- def __init__(self,):
+ def __init__(
+ self,
+ ):
super().__init__()
- self.conv = torch.nn.Conv2d(3, 16, 7, bias=True, stride=2, padding=3, dilation=1)
+ self.conv = torch.nn.Conv2d(
+ 3, 16, 7, bias=True, stride=2, padding=3, dilation=1
+ )
self.avgpool = torch.nn.AvgPool2d(3, stride=2, padding=1)
def forward(self, x):
@@ -223,9 +282,13 @@ class TestHelperModules:
return temp1
class Conv2dCatSameInputs(torch.nn.Module):
- def __init__(self,):
+ def __init__(
+ self,
+ ):
super().__init__()
- self.conv = torch.nn.Conv2d(3, 16, 7, bias=True, stride=2, padding=3, dilation=1)
+ self.conv = torch.nn.Conv2d(
+ 3, 16, 7, bias=True, stride=2, padding=3, dilation=1
+ )
self.relu = torch.nn.ReLU()
def forward(self, x):
@@ -234,9 +297,13 @@ class TestHelperModules:
return temp3
class Conv2dCatSingleInput(torch.nn.Module):
- def __init__(self,):
+ def __init__(
+ self,
+ ):
super().__init__()
- self.conv = torch.nn.Conv2d(3, 16, 7, bias=True, stride=2, padding=3, dilation=1)
+ self.conv = torch.nn.Conv2d(
+ 3, 16, 7, bias=True, stride=2, padding=3, dilation=1
+ )
self.relu = torch.nn.ReLU()
def forward(self, x):
@@ -253,7 +320,9 @@ class TestHelperModules:
return self.linear(x)
class LinearUnaryModule(torch.nn.Module):
- def __init__(self, use_bias, postop, inplace_postop=False, post_op_algo='none') -> None:
+ def __init__(
+ self, use_bias, postop, inplace_postop=False, post_op_algo="none"
+ ) -> None:
super().__init__()
self.linear = nn.Linear(4, 4, bias=use_bias)
if postop == nn.GELU:
@@ -265,9 +334,10 @@ class TestHelperModules:
return self.postop(self.linear(x))
class Conv2dAddModule2(torch.nn.Module):
- def __init__(self,
- inplace_add: bool = False,
- ) -> None:
+ def __init__(
+ self,
+ inplace_add: bool = False,
+ ) -> None:
super().__init__()
self.conv = torch.nn.Conv2d(
in_channels=3, out_channels=3, kernel_size=3, stride=1, padding=1
@@ -301,11 +371,12 @@ class TestHelperModules:
q = self.q_proj(x)
k = self.k_proj(x)
v = self.v_proj(x)
- scores = torch.bmm(q, k.transpose(1, 2)) / (self.input_dim ** 0.5)
+ scores = torch.bmm(q, k.transpose(1, 2)) / (self.input_dim**0.5)
attention = self.softmax(scores)
weighted = torch.bmm(attention, v)
return weighted
+
class X86InductorQuantTestCase(QuantizationTestCase):
def _test_quantizer(
self,
@@ -345,6 +416,7 @@ class X86InductorQuantTestCase(QuantizationTestCase):
)
return export_model, prepare_model, convert_model
+
@skipIfNoInductorSupport
class TestQuantizePT2EX86Inductor(X86InductorQuantTestCase):
@skipIfNoX86
@@ -387,19 +459,41 @@ class TestQuantizePT2EX86Inductor(X86InductorQuantTestCase):
unary_map = {
"relu": [torch.nn.ReLU(inplace=False), torch.ops.aten.relu.default],
"relu_inplace": [torch.nn.ReLU(inplace=True), torch.ops.aten.relu_.default],
- "hardtanh": [torch.nn.Hardtanh(min_val=0.0, max_val=6.0, inplace=False), torch.ops.aten.hardtanh.default],
- "hardtanh_inplace": [torch.nn.Hardtanh(min_val=0.0, max_val=6.0, inplace=True), torch.ops.aten.hardtanh_.default],
+ "hardtanh": [
+ torch.nn.Hardtanh(min_val=0.0, max_val=6.0, inplace=False),
+ torch.ops.aten.hardtanh.default,
+ ],
+ "hardtanh_inplace": [
+ torch.nn.Hardtanh(min_val=0.0, max_val=6.0, inplace=True),
+ torch.ops.aten.hardtanh_.default,
+ ],
"relu6": [torch.nn.ReLU6(inplace=False), torch.ops.aten.hardtanh.default],
- "relu6_inplace": [torch.nn.ReLU6(inplace=True), torch.ops.aten.hardtanh_.default],
- "hardswish": [torch.nn.Hardswish(inplace=False), torch.ops.aten.hardswish.default],
- "hardswish_inplace": [torch.nn.Hardswish(inplace=True), torch.ops.aten.hardswish_.default],
+ "relu6_inplace": [
+ torch.nn.ReLU6(inplace=True),
+ torch.ops.aten.hardtanh_.default,
+ ],
+ "hardswish": [
+ torch.nn.Hardswish(inplace=False),
+ torch.ops.aten.hardswish.default,
+ ],
+ "hardswish_inplace": [
+ torch.nn.Hardswish(inplace=True),
+ torch.ops.aten.hardswish_.default,
+ ],
"swish": [torch.nn.SiLU(inplace=False), torch.ops.aten.silu.default],
- "swish_inplace": [torch.nn.SiLU(inplace=True), torch.ops.aten.silu_.default],
+ "swish_inplace": [
+ torch.nn.SiLU(inplace=True),
+ torch.ops.aten.silu_.default,
+ ],
}
use_bias_list = [True, False]
with override_quantized_engine("x86"), torch.no_grad():
- for unary_op, use_bias in itertools.product(unary_map.keys(), use_bias_list):
- m = TestHelperModules.Conv2dUnaryModule(unary_map[unary_op][0], use_bias=use_bias).eval()
+ for unary_op, use_bias in itertools.product(
+ unary_map.keys(), use_bias_list
+ ):
+ m = TestHelperModules.Conv2dUnaryModule(
+ unary_map[unary_op][0], use_bias=use_bias
+ ).eval()
example_inputs = (torch.randn(2, 3, 16, 16),)
quantizer = X86InductorQuantizer().set_global(
xiq.get_default_x86_inductor_quantization_config()
@@ -476,7 +570,6 @@ class TestQuantizePT2EX86Inductor(X86InductorQuantTestCase):
node_list,
)
-
@skipIfNoX86
def test_conv2d_binary2(self):
"""
@@ -506,7 +599,9 @@ class TestQuantizePT2EX86Inductor(X86InductorQuantTestCase):
torch.ops.quantized_decomposed.dequantize_per_tensor.default,
torch.ops.aten.conv2d.default,
torch.ops.quantized_decomposed.quantize_per_tensor.default,
- torch.ops.aten.add_.Tensor if inplace_add else torch.ops.aten.add.Tensor,
+ torch.ops.aten.add_.Tensor
+ if inplace_add
+ else torch.ops.aten.add.Tensor,
]
self._test_quantizer(
m,
@@ -576,7 +671,9 @@ class TestQuantizePT2EX86Inductor(X86InductorQuantTestCase):
with override_quantized_engine("x86"), torch.no_grad():
m = TestHelperModules.SerialsConv2dAddReLUModule().eval()
example_inputs = (torch.randn(2, 3, 16, 16),)
- quantizer = X86InductorQuantizer().set_global(xiq.get_default_x86_inductor_quantization_config())
+ quantizer = X86InductorQuantizer().set_global(
+ xiq.get_default_x86_inductor_quantization_config()
+ )
node_occurrence = {
torch.ops.quantized_decomposed.quantize_per_tensor.default: 4,
torch.ops.quantized_decomposed.dequantize_per_tensor.default: 6,
@@ -635,10 +732,7 @@ class TestQuantizePT2EX86Inductor(X86InductorQuantTestCase):
)
# Check Maxpool2d has share observer at input and output
for node in prepare_model.graph.nodes:
- if (
- node.op == "call_function"
- and node.target is single_op
- ):
+ if node.op == "call_function" and node.target is single_op:
single_op_node = node
input_obs_of_single_op = getattr(
prepare_model, single_op_node.args[0].target
@@ -658,7 +752,6 @@ class TestQuantizePT2EX86Inductor(X86InductorQuantTestCase):
self.assertTrue(input_obs_of_single_op is output_obs_of_single_op)
self.assertTrue(input_obs_of_single_op is not input_obs_of_conv)
-
@skipIfNoX86
def test_maxpool2d_recipe(self):
r"""
@@ -671,7 +764,6 @@ class TestQuantizePT2EX86Inductor(X86InductorQuantTestCase):
torch.ops.aten.max_pool2d.default,
)
-
@skipIfNoX86
def test_adaptive_avg_pool2d_recipe(self):
r"""
@@ -679,12 +771,13 @@ class TestQuantizePT2EX86Inductor(X86InductorQuantTestCase):
Since adaptive_avg_pool2d is a int8_in_int8_out_op, there is obs between adaptive_avg_pool2d and pow.
"""
self._single_op_share_observer_recipe_test_helper(
- TestHelperModules.Conv2dSingleOpPowModule(nn.AdaptiveAvgPool2d((1, 1))).eval(),
+ TestHelperModules.Conv2dSingleOpPowModule(
+ nn.AdaptiveAvgPool2d((1, 1))
+ ).eval(),
torch.rand(1, 2, 14, 14),
torch.ops.aten.adaptive_avg_pool2d.default,
)
-
@skipIfNoX86
def test_flatten_recipe(self):
r"""
@@ -692,12 +785,13 @@ class TestQuantizePT2EX86Inductor(X86InductorQuantTestCase):
Since flatten is a int8_in_int8_out_op, there is obs between flatten and pow.
"""
self._single_op_share_observer_recipe_test_helper(
- TestHelperModules.Conv2dSingleOpPowModule(lambda x: torch.flatten(x, 1)).eval(),
+ TestHelperModules.Conv2dSingleOpPowModule(
+ lambda x: torch.flatten(x, 1)
+ ).eval(),
torch.rand(1, 2, 14, 14),
torch.ops.aten.flatten.using_ints,
)
-
@skipIfNoX86
def test_cat_recipe(self):
r"""
@@ -739,19 +833,10 @@ class TestQuantizePT2EX86Inductor(X86InductorQuantTestCase):
)
# Check Cat/Maxpool2d has share observer at input and output
for node in prepare_model.graph.nodes:
- if (
- node.op == "call_function"
- and node.target == torch.ops.aten.cat.default
- ):
- cat_act_obs0 = getattr(
- prepare_model, node.all_input_nodes[0].target
- )
- cat_act_obs1 = getattr(
- prepare_model, node.all_input_nodes[1].target
- )
- cat_out_obs = getattr(
- prepare_model, next(iter(node.users)).target
- )
+ if node.op == "call_function" and node.target == torch.ops.aten.cat.default:
+ cat_act_obs0 = getattr(prepare_model, node.all_input_nodes[0].target)
+ cat_act_obs1 = getattr(prepare_model, node.all_input_nodes[1].target)
+ cat_out_obs = getattr(prepare_model, next(iter(node.users)).target)
elif (
node.op == "call_function"
and node.target is torch.ops.aten.max_pool2d.default
@@ -811,19 +896,10 @@ class TestQuantizePT2EX86Inductor(X86InductorQuantTestCase):
)
# Check Cat has share observer at input and output
for node in prepare_model.graph.nodes:
- if (
- node.op == "call_function"
- and node.target == torch.ops.aten.cat.default
- ):
- cat_act_obs0 = getattr(
- prepare_model, node.args[0][0].target
- )
- cat_act_obs1 = getattr(
- prepare_model, node.args[0][1].target
- )
- cat_out_obs = getattr(
- prepare_model, next(iter(node.users)).target
- )
+ if node.op == "call_function" and node.target == torch.ops.aten.cat.default:
+ cat_act_obs0 = getattr(prepare_model, node.args[0][0].target)
+ cat_act_obs1 = getattr(prepare_model, node.args[0][1].target)
+ cat_out_obs = getattr(prepare_model, next(iter(node.users)).target)
self.assertTrue(isinstance(cat_act_obs0, ObserverBase))
self.assertTrue(isinstance(cat_act_obs1, ObserverBase))
self.assertTrue(isinstance(cat_out_obs, ObserverBase))
@@ -868,16 +944,9 @@ class TestQuantizePT2EX86Inductor(X86InductorQuantTestCase):
)
# Check Cat has share observer at input and output
for node in prepare_model.graph.nodes:
- if (
- node.op == "call_function"
- and node.target == torch.ops.aten.cat.default
- ):
- cat_act_obs0 = getattr(
- prepare_model, node.args[0][0].target
- )
- cat_out_obs = getattr(
- prepare_model, next(iter(node.users)).target
- )
+ if node.op == "call_function" and node.target == torch.ops.aten.cat.default:
+ cat_act_obs0 = getattr(prepare_model, node.args[0][0].target)
+ cat_out_obs = getattr(prepare_model, next(iter(node.users)).target)
self.assertTrue(isinstance(cat_act_obs0, ObserverBase))
self.assertTrue(isinstance(cat_out_obs, ObserverBase))
self.assertTrue(cat_act_obs0 is cat_out_obs)
@@ -935,7 +1004,9 @@ class TestQuantizePT2EX86Inductor(X86InductorQuantTestCase):
and node.target is torch.ops.aten.conv2d.default
):
conv_node = node
- output_obs_of_conv = getattr(prepare_model, next(iter(conv_node.users)).target)
+ output_obs_of_conv = getattr(
+ prepare_model, next(iter(conv_node.users)).target
+ )
self.assertTrue(isinstance(input_obs_of_avgpool, ObserverBase))
self.assertTrue(isinstance(output_obs_of_avgpool, ObserverBase))
self.assertTrue(isinstance(output_obs_of_conv, ObserverBase))
@@ -986,11 +1057,16 @@ class TestQuantizePT2EX86Inductor(X86InductorQuantTestCase):
cases = itertools.product(use_bias_list, inplace_list, postop_list)
post_op_map = {
nn.ReLU: [torch.ops.aten.relu_.default, torch.ops.aten.relu.default],
- nn.LeakyReLU: [torch.ops.aten.leaky_relu_.default, torch.ops.aten.leaky_relu.default],
+ nn.LeakyReLU: [
+ torch.ops.aten.leaky_relu_.default,
+ torch.ops.aten.leaky_relu.default,
+ ],
}
with override_quantized_engine("x86"), torch.no_grad():
for use_bias, inplace, postop in cases:
- m = TestHelperModules.LinearUnaryModule(use_bias=use_bias, postop=postop, inplace_postop=inplace).eval()
+ m = TestHelperModules.LinearUnaryModule(
+ use_bias=use_bias, postop=postop, inplace_postop=inplace
+ ).eval()
example_inputs = (torch.randn(2, 4),)
quantizer = X86InductorQuantizer().set_global(
xiq.get_default_x86_inductor_quantization_config()
@@ -1024,11 +1100,13 @@ class TestQuantizePT2EX86Inductor(X86InductorQuantTestCase):
"""
use_bias_list = [True, False]
postop = nn.GELU
- post_op_algorithm = ['none', 'tanh']
+ post_op_algorithm = ["none", "tanh"]
cases = itertools.product(use_bias_list, post_op_algorithm)
with override_quantized_engine("x86"), torch.no_grad():
for use_bias, post_op_algo in cases:
- m = TestHelperModules.LinearUnaryModule(use_bias=use_bias, postop=postop, post_op_algo=post_op_algo).eval()
+ m = TestHelperModules.LinearUnaryModule(
+ use_bias=use_bias, postop=postop, post_op_algo=post_op_algo
+ ).eval()
example_inputs = (torch.randn(2, 4),)
quantizer = X86InductorQuantizer().set_global(
xiq.get_default_x86_inductor_quantization_config()
@@ -1103,19 +1181,39 @@ class TestQuantizePT2EX86Inductor(X86InductorQuantTestCase):
unary_map = {
"relu": [torch.nn.ReLU(inplace=False), torch.ops.aten.relu.default],
"relu_inplace": [torch.nn.ReLU(inplace=True), torch.ops.aten.relu_.default],
- "hardtanh": [torch.nn.Hardtanh(min_val=0.0, max_val=6.0, inplace=False), torch.ops.aten.hardtanh.default],
- "hardtanh_inplace": [torch.nn.Hardtanh(min_val=0.0, max_val=6.0, inplace=True), torch.ops.aten.hardtanh_.default],
+ "hardtanh": [
+ torch.nn.Hardtanh(min_val=0.0, max_val=6.0, inplace=False),
+ torch.ops.aten.hardtanh.default,
+ ],
+ "hardtanh_inplace": [
+ torch.nn.Hardtanh(min_val=0.0, max_val=6.0, inplace=True),
+ torch.ops.aten.hardtanh_.default,
+ ],
"relu6": [torch.nn.ReLU6(inplace=False), torch.ops.aten.hardtanh.default],
- "relu6_inplace": [torch.nn.ReLU6(inplace=True), torch.ops.aten.hardtanh_.default],
- "hardswish": [torch.nn.Hardswish(inplace=False), torch.ops.aten.hardswish.default],
- "hardswish_inplace": [torch.nn.Hardswish(inplace=True), torch.ops.aten.hardswish_.default],
+ "relu6_inplace": [
+ torch.nn.ReLU6(inplace=True),
+ torch.ops.aten.hardtanh_.default,
+ ],
+ "hardswish": [
+ torch.nn.Hardswish(inplace=False),
+ torch.ops.aten.hardswish.default,
+ ],
+ "hardswish_inplace": [
+ torch.nn.Hardswish(inplace=True),
+ torch.ops.aten.hardswish_.default,
+ ],
"swish": [torch.nn.SiLU(inplace=False), torch.ops.aten.silu.default],
- "swish_inplace": [torch.nn.SiLU(inplace=True), torch.ops.aten.silu_.default],
+ "swish_inplace": [
+ torch.nn.SiLU(inplace=True),
+ torch.ops.aten.silu_.default,
+ ],
}
with override_quantized_engine("x86"):
for unary_op in unary_map.keys():
- m = TestHelperModules.Conv2dUnaryModule(unary_map[unary_op][0], with_bn=True)
+ m = TestHelperModules.Conv2dUnaryModule(
+ unary_map[unary_op][0], with_bn=True
+ )
example_inputs = (torch.randn(2, 3, 16, 16),)
quantizer = X86InductorQuantizer().set_global(
xiq.get_default_x86_inductor_quantization_config(is_qat=True)
@@ -1160,7 +1258,9 @@ class TestQuantizePT2EX86Inductor(X86InductorQuantTestCase):
)
with override_quantized_engine("x86"):
for inplace_add in [True, False]:
- m = TestHelperModules.Conv2dAddModule(inplace_add=inplace_add, with_bn=True)
+ m = TestHelperModules.Conv2dAddModule(
+ inplace_add=inplace_add, with_bn=True
+ )
node_occurrence = {
# one for input and weight of the conv
# one for output for the add
@@ -1177,7 +1277,9 @@ class TestQuantizePT2EX86Inductor(X86InductorQuantTestCase):
torch.ops.quantized_decomposed.quantize_per_tensor.default,
torch.ops.quantized_decomposed.dequantize_per_tensor.default,
torch.ops.aten.conv2d.default,
- torch.ops.aten.add_.Tensor if inplace_add else torch.ops.aten.add.Tensor,
+ torch.ops.aten.add_.Tensor
+ if inplace_add
+ else torch.ops.aten.add.Tensor,
torch.ops.quantized_decomposed.quantize_per_tensor.default,
torch.ops.quantized_decomposed.dequantize_per_tensor.default,
]
@@ -1222,7 +1324,9 @@ class TestQuantizePT2EX86Inductor(X86InductorQuantTestCase):
torch.ops.quantized_decomposed.dequantize_per_tensor.default,
torch.ops.aten.conv2d.default,
torch.ops.quantized_decomposed.quantize_per_tensor.default,
- torch.ops.aten.add_.Tensor if inplace_add else torch.ops.aten.add.Tensor,
+ torch.ops.aten.add_.Tensor
+ if inplace_add
+ else torch.ops.aten.add.Tensor,
]
self._test_quantizer(
m,
@@ -1319,8 +1423,7 @@ class TestQuantizePT2EX86Inductor(X86InductorQuantTestCase):
example_inputs = (torch.randn(1, 4, 64),)
quantizer = X86InductorQuantizer().set_global(
xiq.get_default_x86_inductor_quantization_config(
- is_qat=True,
- is_dynamic=True
+ is_qat=True, is_dynamic=True
)
)
node_occurrence = {
|
2.41.0
|
5331aade57725b03c36d5cc6c683f6a6bc0692d
|
Sat, 13 Apr 2024 18:35:02 +0000
|
[PATCH 0144/1000] Simplify ATen sparse semi-structured operators based on CUTLASS (#123473)
|
Pull Request resolved: https://github.com/pytorch/pytorch/pull/123473 Approved by: https://github.com/cpuhrsch
|
diff --git a/aten/src/ATen/native/native_functions.yaml b/aten/src/ATen/native/native_functions.yaml
index 02e7c5caa2..6e96a8a6aa 100644
--- a/aten/src/ATen/native/native_functions.yaml
+++ b/aten/src/ATen/native/native_functions.yaml
@@ -3342,10 +3342,19 @@
dispatch:
CUDA: _cslt_sparse_mm_search
+# DEPRECATED: Use torch.__sparse_semi_structured_mm/torch._sparse_semi_structured_addmm instead
- func: _sparse_semi_structured_linear(Tensor input, Tensor weight, Tensor meta, *, Tensor? bias=None, str? activation=None, ScalarType? out_dtype=None) -> Tensor
dispatch:
CUDA: _sparse_semi_structured_linear
+- func: _sparse_semi_structured_mm(Tensor mat1, Tensor mat1_meta, Tensor mat2, *, ScalarType? out_dtype=None) -> Tensor
+ dispatch:
+ CUDA: _sparse_semi_structured_mm
+
+- func: _sparse_semi_structured_addmm(Tensor input, Tensor mat1, Tensor mat1_meta, Tensor mat2, *, Scalar alpha=1, Scalar beta=1, ScalarType? out_dtype=None) -> Tensor
+ dispatch:
+ CUDA: _sparse_semi_structured_addmm
+
- func: _mixed_dtypes_linear(Tensor input, Tensor weight, Tensor scale, *, Tensor? bias=None, str? activation=None) -> Tensor
dispatch:
CUDA: _mixed_dtypes_linear
diff --git a/aten/src/ATen/native/sparse/cuda/SparseSemiStructuredLinear.cu b/aten/src/ATen/native/sparse/cuda/SparseSemiStructuredLinear.cu
index e997f49f3f..47ee1568be 100644
--- a/aten/src/ATen/native/sparse/cuda/SparseSemiStructuredLinear.cu
+++ b/aten/src/ATen/native/sparse/cuda/SparseSemiStructuredLinear.cu
@@ -603,6 +603,10 @@ Tensor _sparse_semi_structured_linear(
const Tensor& meta, const c10::optional<Tensor>& bias_opt,
const c10::optional<c10::string_view> activation_opt,
const c10::optional<c10::ScalarType> out_dtype_opt) {
+ TORCH_WARN_ONCE("_sparse_semi_structured_linear is deprecated and will be "
+ "removed in a future PyTorch release. Please use "
+ "_sparse_semi_structured_mm/_sparse_semi_structured_addmm "
+ "instead.");
#if defined(USE_ROCM) || defined(_MSC_VER) || (defined(CUDA_VERSION) && CUDA_VERSION < 11080)
AT_ERROR("_sparse_semi_structured_linear: CUTLASS not supported");
return Tensor{};
@@ -893,177 +897,3 @@ Tensor _sparse_semi_structured_linear(
}
} // namespace at::native
-
-// Following is just for testing purposes.
-namespace at::native {
-
-#if defined(USE_ROCM) || defined(_MSC_VER) || (defined(CUDA_VERSION) && CUDA_VERSION < 11080)
-#else
-// Copied from tools/util/include/host_reorder.h, from CUTLASS source
-// tree. This is for simplicity - namely, this file is not under
-// include/cutlass in this tree, as other CUTLASS include files
-// needed, so it would require changing PyTorch CMake configuration;
-// furthermore, including this file produces build errors in PyTorch
-// at the moment.
-template <typename Element, typename LayoutDest, typename LayoutSrc>
-static void reorder_meta(cutlass::TensorRef<Element, LayoutDest> dest,
- cutlass::TensorRef<Element, LayoutSrc> src,
- const int problem_size_m, const int problem_size_k) {
- for (int m = 0; m < problem_size_m; m++) {
- for (int k = 0; k < problem_size_k; k++) {
- // First reorder the rows.
- int group = (sizeof(Element) == 2) ? 32 : 16;
- int interweave = (sizeof(Element) == 2) ? 4 : 2;
-
- int dest_row = m / group * group + (m % 8) * interweave + (m % group) / 8;
- int dest_col = k;
-
- // Next swizzle the 2x2 blocks from Z to N.
- if (((dest_row % 2) == 0) && ((dest_col % 2) == 1)) {
- ++dest_row;
- --dest_col;
- } else if (((dest_row % 2) == 1) && ((dest_col % 2) == 0)) {
- --dest_row;
- ++dest_col;
- }
-
- dest.at({dest_row, dest_col}) = src.at({m, k});
- }
- }
-}
-#endif
-
-std::tuple<Tensor, Tensor>
-_to_sparse_semi_structured(const Tensor& dense) {
-#if defined(USE_ROCM) || defined(_MSC_VER) || (defined(CUDA_VERSION) && CUDA_VERSION < 11080)
- AT_ERROR("_to_sparse_semi_structured: CUTLASS not supported");
- return std::make_tuple(Tensor{}, Tensor{});
-#else
- // Check dimensions of the dense matrix.
- TORCH_CHECK(dense.dim() == 2,
- "_to_sparse_semi_structured: Expected dense argument to be 2D "
- "tensor, got ", dense.dim(), " dims");
-
- // Determine PyTorch datatype for the metadata matrix.
- auto meta_dtype = at::kChar;
- auto ksparse = 0;
- auto dense_elems_per_meta_elem = 0;
- if (dense.dtype() == at::kChar) {
- meta_dtype = at::kInt;
- ksparse = 4;
- dense_elems_per_meta_elem = 32;
- } else if (dense.dtype() == at::kHalf || dense.dtype() == at::kBFloat16) {
- meta_dtype = at::kShort;
- ksparse = 4;
- dense_elems_per_meta_elem = 16;
- } else if (dense.dtype() == at::kFloat) {
- meta_dtype = at::kShort;
- ksparse = 2;
- dense_elems_per_meta_elem = 8;
- } else {
- AT_ERROR("_to_sparse_semi_structured: Invalid dense argument datatype ",
- dense.dtype(), " encountered");
- }
-
- const auto dense_nrows = dense.size(0);
- const auto dense_ncols = dense.size(1);
-
- if (dense_nrows % (meta_dtype == at::kShort ? 32 : 16) != 0) {
- AT_ERROR("_to_sparse_semi_structured: Number of rows of dense matrix must "
- "be divisible by ", (meta_dtype == at::kShort ? 32 : 16),
- ", but it is ", dense_nrows);
- }
- if (dense_ncols % dense_elems_per_meta_elem != 0) {
- AT_ERROR("_to_sparse_semi_structured: Number of columns of dense matrix "
- "must be divisible by ", dense_elems_per_meta_elem, ", but it is ",
- dense_ncols);
- }
-
- const auto dense_cpu = dense.to("cpu");
-
- const auto mask_cpu = dense_cpu != at::zeros({1}, dense_cpu.options());
-
- const auto sparse_cpu =
- dense_cpu.masked_select(mask_cpu).view({dense_nrows, dense_ncols / 2});
-
- const auto meta_nrows = dense_nrows;
- const auto meta_ncols = dense_ncols / dense_elems_per_meta_elem;
- auto meta_cpu = dense_cpu.new_empty({meta_nrows, meta_ncols},
- at::TensorOptions().dtype(meta_dtype));
-
- auto* mask_cpu_ptr = mask_cpu.data_ptr<bool>();
- for (auto i = 0; i < meta_nrows; ++i) {
- for (auto j = 0; j < meta_ncols; ++j) {
- uint64_t meta_val = 0;
- for (auto k = 0; k < dense_elems_per_meta_elem / ksparse; ++k, mask_cpu_ptr += ksparse) {
- const auto mask_elems =
- (ksparse == 4) ? std::make_tuple(mask_cpu_ptr[0], mask_cpu_ptr[1],
- mask_cpu_ptr[2], mask_cpu_ptr[3])
- : std::make_tuple(mask_cpu_ptr[0], mask_cpu_ptr[0],
- mask_cpu_ptr[1], mask_cpu_ptr[1]);
- auto meta_quadruple = 0;
- if (mask_elems == std::make_tuple(1, 1, 0, 0)) {
- meta_quadruple = 4; // 0100
- } else if (mask_elems == std::make_tuple(1, 0, 1, 0)) {
- meta_quadruple = 8; // 1000
- } else if (mask_elems == std::make_tuple(0, 1, 1, 0)) {
- meta_quadruple = 9; // 1001
- } else if (mask_elems == std::make_tuple(1, 0, 0, 1)) {
- meta_quadruple = 12; // 1100
- } else if (mask_elems == std::make_tuple(0, 1, 0, 1)) {
- meta_quadruple = 13; // 1101
- } else if (mask_elems == std::make_tuple(0, 0, 1, 1)) {
- meta_quadruple = 14; // 1110
- } else {
- AT_ERROR("_to_sparse_semi_structured: dense argument does not match ",
- (dense.dtype() != at::kFloat) ? "2:4" : "1:2",
- "sparsity pattern");
- }
- meta_val = meta_val | (meta_quadruple << (4 * k));
- }
- const auto idx = i * meta_ncols + j;
- if (meta_dtype == at::kShort) {
- using MetaElement = int16_t;
- const auto meta_cpu_ptr = meta_cpu.data_ptr<MetaElement>();
- meta_cpu_ptr[idx] = (MetaElement)meta_val;
- } else if (meta_dtype == at::kInt) {
- using MetaElement = int32_t;
- const auto meta_cpu_ptr = meta_cpu.data_ptr<MetaElement>();
- meta_cpu_ptr[idx] = (MetaElement)meta_val;
- }
- }
- }
-
- auto meta_reordered_cpu = meta_cpu.new_empty({meta_nrows, meta_ncols});
- using MetaLayout = cutlass::layout::RowMajor;
- using MetaReorderedLayout = cutlass::layout::ColumnMajorInterleaved<2>;
- if (meta_dtype == at::kShort) {
- using MetaElement = int16_t;
- auto meta_cpu_ref =
- cutlass::TensorRef<MetaElement, MetaLayout>(
- meta_cpu.data_ptr<MetaElement>(),
- MetaLayout::packed({meta_nrows, meta_ncols}));
- auto meta_reordered_cpu_ref =
- cutlass::TensorRef<MetaElement, MetaReorderedLayout>(
- meta_reordered_cpu.data_ptr<MetaElement>(),
- MetaReorderedLayout::packed({meta_nrows, meta_ncols}));
- reorder_meta(meta_reordered_cpu_ref, meta_cpu_ref, meta_nrows, meta_ncols);
- } else if (meta_dtype == at::kInt) {
- using MetaElement = int32_t;
- auto meta_cpu_ref =
- cutlass::TensorRef<MetaElement, MetaLayout>(
- meta_cpu.data_ptr<MetaElement>(),
- MetaLayout::packed({meta_nrows, meta_ncols}));
- auto meta_reordered_cpu_ref =
- cutlass::TensorRef<MetaElement, MetaReorderedLayout>(
- meta_reordered_cpu.data_ptr<MetaElement>(),
- MetaReorderedLayout::packed({meta_nrows, meta_ncols}));
- reorder_meta(meta_reordered_cpu_ref, meta_cpu_ref, meta_nrows, meta_ncols);
- }
-
- return std::make_tuple(sparse_cpu.to(dense.device()),
- meta_reordered_cpu.to(dense.device()));
-#endif
-}
-
-} // namespace at::native
diff --git a/aten/src/ATen/native/sparse/cuda/SparseSemiStructuredOps.cu b/aten/src/ATen/native/sparse/cuda/SparseSemiStructuredOps.cu
new file mode 100644
index 0000000000..8c05acc66b
--- /dev/null
+++ b/aten/src/ATen/native/sparse/cuda/SparseSemiStructuredOps.cu
@@ -0,0 +1,979 @@
+#include <ATen/ATen.h>
+#include <ATen/core/Tensor.h>
+#include <ATen/cuda/CUDAUtils.h>
+#include <ATen/Dispatch.h>
+
+#if defined(USE_ROCM) || defined(_MSC_VER) || (defined(CUDA_VERSION) && CUDA_VERSION < 11080)
+#else
+#include <cuda_runtime.h>
+#include <cutlass/cutlass.h>
+#include <cutlass/layout/layout.h>
+#include <cutlass/tensor_ref.h>
+#include <cutlass/gemm/device/gemm_sparse_with_visitor.h>
+#include <cutlass/epilogue/threadblock/fusion/visitors.hpp>
+#endif
+
+#include <type_traits>
+#include <tuple>
+
+#if defined(USE_ROCM) || defined(_MSC_VER) || (defined(CUDA_VERSION) && CUDA_VERSION < 11080)
+#else
+#define CUTLASS_STATUS_CHECK(status) \
+ { \
+ TORCH_CHECK(status == cutlass::Status::kSuccess, \
+ __func__, " : CUTLASS error: ", \
+ cutlassGetStatusString(status)); \
+ }
+#endif
+
+namespace at::native {
+
+#if defined(USE_ROCM) || defined(_MSC_VER) || (defined(CUDA_VERSION) && CUDA_VERSION < 11080)
+#else
+// Wrapper function for CUTLASS sparse GEMM implementation, used
+// solely to simplify dispatching from
+// sparse_semi_structured_mad_op() function below.
+template <
+ typename ElementInputA,
+ typename ElementInputB,
+ typename ElementOutput,
+ typename ElementAccumulator,
+ typename ThreadblockShape,
+ typename WarpShape,
+ typename InstructionShape,
+ typename LayoutInputA,
+ typename LayoutInputB,
+ bool use_tensor_c>
+void spgemm_cutlass(
+ const Tensor& tensor_a, const at::IntArrayRef::value_type& tensor_a_stride,
+ const Tensor& tensor_b, const at::IntArrayRef::value_type& tensor_b_stride,
+ const Tensor& tensor_c, const Tensor& tensor_e, const Scalar& alpha,
+ const Scalar& beta, Tensor& tensor_d) {
+ // Fix CUTLASS sparse GEMM template arguments that are not
+ // provided as template argument of this function, and create an
+ // alias for particular instantiation of this template.
+ using LayoutOutput = cutlass::layout::RowMajor; // Result of the operation will be provided in row-major format.
+ using MMAOp = cutlass::arch::OpClassTensorOp; // Tensor cores are to be used for maximum performance.
+ using SmArch = cutlass::arch::Sm80; // Only CC 8.x devices are supported at the moment.
+ using SwizzleThreadBlock = cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>; // This choice provides good performance across wide range of operand sizes.
+ constexpr int NumStages = 3; // This choice provides good performance across wide range of operand sizes.
+ using Operator = cutlass::arch::OpMultiplyAdd;
+ constexpr int NumEVTEpilogueStages = 1;
+
+ constexpr int AlignmentInputA = 128 / cutlass::sizeof_bits<ElementInputA>::value;
+ constexpr int AlignmentInputB = 128 / cutlass::sizeof_bits<ElementInputB>::value;
+ constexpr int AlignmentOutput = 128 / cutlass::sizeof_bits<ElementOutput>::value;
+
+ using ElementComputeEpilogue = ElementAccumulator; // Typically slightly slower, but more precise than if ElementOutput used.
+ constexpr int AlignmentComputeEpilogue = 128 / cutlass::sizeof_bits<ElementComputeEpilogue>::value;
+ using ElementC = ElementOutput;
+ using LayoutC = LayoutOutput;
+ constexpr int AlignmentC = 128 / cutlass::sizeof_bits<ElementC>::value;
+
+ using TensorCTileThreadMap = cutlass::epilogue::threadblock::OutputTileThreadLayout<
+ ThreadblockShape,
+ WarpShape,
+ ElementC,
+ AlignmentC,
+ NumEVTEpilogueStages>;
+ using OutputTileThreadMap = cutlass::epilogue::threadblock::OutputTileThreadLayout<
+ ThreadblockShape,
+ WarpShape,
+ ElementOutput,
+ AlignmentOutput,
+ NumEVTEpilogueStages>;
+
+ using Accum = cutlass::epilogue::threadblock::VisitorAccFetch;
+
+ using Alpha =
+ cutlass::epilogue::threadblock::VisitorScalarBroadcast<ElementComputeEpilogue>;
+ using AlphaArguments = typename Alpha::Arguments;
+
+ using ApplyAlpha = cutlass::epilogue::threadblock::VisitorCompute<
+ cutlass::multiplies, ElementComputeEpilogue, ElementComputeEpilogue,
+ cutlass::FloatRoundStyle::round_to_nearest>;
+ using EVTApplyAlpha = cutlass::epilogue::threadblock::Sm80EVT<
+ ApplyAlpha,
+ Alpha,
+ Accum>;
+
+ using Beta =
+ cutlass::epilogue::threadblock::VisitorScalarBroadcast<ElementComputeEpilogue>;
+ using BetaArguments = typename Beta::Arguments;
+
+ using TensorCScalar =
+ cutlass::epilogue::threadblock::VisitorScalarBroadcast<ElementC>;
+ using TensorCTensor =
+ cutlass::epilogue::threadblock::VisitorColBroadcast<
+ TensorCTileThreadMap,
+ ElementC,
+ cute::Stride<cute::_1, cute::_0, int64_t>>;
+ using TensorC = std::conditional_t<use_tensor_c, TensorCTensor, TensorCScalar>;
+ using TensorCArguments = typename TensorC::Arguments;
+
+ using ApplyBeta = cutlass::epilogue::threadblock::VisitorCompute<
+ cutlass::multiplies, ElementComputeEpilogue, ElementComputeEpilogue,
+ cutlass::FloatRoundStyle::round_to_nearest>;
+ using EVTApplyBeta = cutlass::epilogue::threadblock::Sm80EVT<
+ ApplyBeta,
+ Beta,
+ TensorC>;
+
+ using ApplySum = cutlass::epilogue::threadblock::VisitorCompute<
+ cutlass::plus, ElementComputeEpilogue, ElementComputeEpilogue,
+ cutlass::FloatRoundStyle::round_to_nearest>;
+ using EVTApplySum = cutlass::epilogue::threadblock::Sm80EVT<
+ ApplySum,
+ EVTApplyAlpha,
+ EVTApplyBeta>;
+
+ using Output = cutlass::epilogue::threadblock::VisitorAuxStore<
+ OutputTileThreadMap, ElementOutput, cutlass::FloatRoundStyle::round_to_nearest,
+ cute::Stride<int64_t, cute::_1, int64_t>>;
+
+ using EVTOutput = cutlass::epilogue::threadblock::Sm80EVT<
+ Output,
+ EVTApplySum>;
+
+ using Gemm = cutlass::gemm::device::SparseGemmWithVisitor<
+ ElementInputA,
+ LayoutInputA,
+ ElementInputB,
+ LayoutInputB,
+ ElementC,
+ LayoutC,
+ ElementAccumulator,
+ MMAOp,
+ SmArch,
+ ThreadblockShape,
+ WarpShape,
+ InstructionShape,
+ EVTOutput,
+ SwizzleThreadBlock,
+ NumStages,
+ AlignmentInputA,
+ AlignmentInputB,
+ Operator,
+ NumEVTEpilogueStages>;
+
+ // Datatype and layout of metadata matrix are inferred from sparse
+ // GEMM template.
+ using ElementInputE = typename Gemm::ElementE;
+ using LayoutInputE = cutlass::layout::RowMajor;
+ using ReorderedLayoutInputE = typename Gemm::LayoutE;
+ static_assert(
+ std::is_same<ReorderedLayoutInputE,
+ cutlass::layout::ColumnMajorInterleaved<2>>::value,
+ "Matrix layout used by CUTLASS for reordered metadata for sparse GEMM "
+ "change, thus code doing conversions from/to dense matrix has to be "
+ "updated.");
+
+ constexpr auto kSparse = Gemm::kSparse;
+ constexpr int kElementsPerElementE = Gemm::kElementsPerElementE;
+
+ // Operand sizes.
+ const int length_m = tensor_a.size(0);
+ const int length_k = tensor_b.size(0);
+ const int length_n = tensor_b.size(1);
+ const auto tensor_e_ncols = length_k / kSparse / kElementsPerElementE;
+
+ // Determine PyTorch datatype for the metadata matrix.
+ auto tensor_e_dtype = at::kChar;
+ switch (sizeof(ElementInputE)) {
+ case 2:
+ tensor_e_dtype = at::kShort;
+ break;
+ case 4:
+ tensor_e_dtype = at::kInt;
+ break;
+ default:
+ AT_ERROR(__func__, ": invalid size of meta tensor datatype "
+ "encountered");
+ }
+ TORCH_CHECK(tensor_e.dtype() == tensor_e_dtype,
+ __func__, " : Expected meta datatype ", tensor_e_dtype,
+ ", but got ", tensor_e.dtype());
+
+ // Prepare arguments for CUTLASS sparse GEMM kernel.
+ cutlass::gemm::GemmCoord problem_size(length_m, length_n, length_k);
+ LayoutInputA layout_a(tensor_a_stride);
+ LayoutInputB layout_b(tensor_b_stride);
+ auto tensor_a_device_ref =
+ cutlass::TensorRef<ElementInputA, LayoutInputA>(
+ (ElementInputA*)tensor_a.data_ptr(), layout_a);
+ auto tensor_b_device_ref =
+ cutlass::TensorRef<ElementInputB, LayoutInputB>(
+ (ElementInputB*)tensor_b.data_ptr(), layout_b);
+ auto tensor_e_reordered_device_ref =
+ cutlass::TensorRef<ElementInputE, ReorderedLayoutInputE>(
+ (ElementInputE*)tensor_e.data_ptr(),
+ ReorderedLayoutInputE::packed({length_m, tensor_e_ncols}));
+
+ AlphaArguments alpha_arguments{
+ [&]() -> AlphaArguments {
+ if constexpr (std::is_same<ElementComputeEpilogue, cutlass::half_t>::value ||
+ std::is_same<ElementComputeEpilogue, cutlass::bfloat16_t>::value) {
+ return {ElementComputeEpilogue{alpha.to<float>()}};
+ } else {
+ return {alpha.to<ElementComputeEpilogue>()};
+ }
+ }()
+ };
+ BetaArguments beta_arguments{
+ [&]() -> BetaArguments {
+ if constexpr (std::is_same<ElementComputeEpilogue, cutlass::half_t>::value ||
+ std::is_same<ElementComputeEpilogue, cutlass::bfloat16_t>::value) {
+ return {ElementComputeEpilogue{beta.to<float>()}};
+ } else {
+ return {beta.to<ElementComputeEpilogue>()};
+ }
+ }()
+ };
+ TensorCArguments tensor_c_arguments{
+ [&]() -> TensorCArguments {
+ if constexpr (use_tensor_c) {
+ return {(ElementC*)tensor_c.data_ptr(),
+ ElementC(0),
+ {cute::_1{}, cute::_0{}, problem_size.m()}};
+ } else {
+ return {ElementC(0)};
+ }
+ }()
+ };
+ typename Output::Arguments output_arguments{
+ (ElementOutput*)tensor_d.data_ptr(),
+ {problem_size.n(), cute::_1{}, problem_size.mn().product()}
+ };
+ typename EVTOutput::Arguments callback_arguments{
+ {
+ {
+ alpha_arguments, // Alpha
+ {}, // Accum
+ {} // ApplyAlpha
+ }, // EVTApplyAlpha
+ {
+ beta_arguments, // Beta
+ tensor_c_arguments, // TensorC
+ {} // ApplyBeta
+ }, // EVTApplyBeta
+ {} // ApplySum
+ }, // EVTApplySum
+ output_arguments // Output
+ }; // EVTOutput
+
+ // Create a tuple of CUTLASS sparse GEMM kernel arguments.
+ typename Gemm::Arguments arguments{
+ problem_size,
+ tensor_a_device_ref,
+ tensor_b_device_ref,
+ tensor_e_reordered_device_ref,
+ callback_arguments};
+
+ cutlass::Status status;
+
+ // Create CUTLASS sparse GEMM kernel object.
+ Gemm gemm_op;
+
+ // Verify that sparse GEMM operation with given arguments can be
+ // performed by CUTLASS.
+ status = gemm_op.can_implement(arguments);
+ CUTLASS_STATUS_CHECK(status);
+
+ // Allocate workspace for CUTLASS sparse GEMM kernel.
+ const auto workspace_size = Gemm::get_workspace_size(arguments);
+ auto workspace = tensor_a.new_empty({(int64_t)workspace_size},
+ at::TensorOptions().dtype(at::kByte));
+
+ // Initialize CUTLASS sparse GEMM object.
+ status = gemm_op.initialize(arguments, workspace.data_ptr(),
+ at::cuda::getCurrentCUDAStream());
+ CUTLASS_STATUS_CHECK(status);
+
+ // Perform sparse GEMM operation.
+ status = gemm_op.run(at::cuda::getCurrentCUDAStream());
+ CUTLASS_STATUS_CHECK(status);
+
+ C10_CUDA_KERNEL_LAUNCH_CHECK();
+}
+
+// Dispatch according to the input tensors layouts combination.
+template <
+ typename ElementInputA,
+ typename ElementInputB,
+ typename ElementOutput,
+ typename ElementAccumulator,
+ typename ThreadblockShape,
+ typename WarpShape,
+ typename InstructionShape,
+ bool EnableRowMajorRowMajorLayouts,
+ bool EnableRowMajorColumnMajorLayouts,
+ bool EnableColumnMajorRowMajorLayouts,
+ bool EnableColumnMajorColumnMajorLayouts,
+ bool use_tensor_c>
+void spgemm_cutlass_dispatch_layouts(
+ const Tensor& tensor_a, const Tensor& tensor_b, const Tensor& tensor_c,
+ const Tensor& tensor_e, const Scalar& alpha, const Scalar& beta,
+ Tensor& tensor_d) {
+ // Determine layouts (row-major or column-major) of input tensors.
+ const auto strides_a = tensor_a.strides();
+ auto tensor_a_row_major = strides_a[1] == 1;
+ auto tensor_a_stride = tensor_a_row_major ? strides_a[0] : strides_a[1];
+ const auto strides_b = tensor_b.strides();
+ auto tensor_b_row_major = strides_b[1] == 1;
+ auto tensor_b_stride = tensor_b_row_major ? strides_b[0] : strides_b[1];
+
+ // Perform dispatching.
+ if constexpr (EnableRowMajorRowMajorLayouts) {
+ if (tensor_a_row_major && tensor_b_row_major) {
+ spgemm_cutlass<
+ ElementInputA,
+ ElementInputB,
+ ElementOutput,
+ ElementAccumulator,
+ ThreadblockShape,
+ WarpShape,
+ InstructionShape,
+ cutlass::layout::RowMajor,
+ cutlass::layout::RowMajor,
+ use_tensor_c>(
+ tensor_a,
+ tensor_a_stride,
+ tensor_b,
+ tensor_b_stride,
+ tensor_c,
+ tensor_e,
+ alpha,
+ beta,
+ tensor_d);
+ return;
+ }
+ }
+ if constexpr (EnableRowMajorColumnMajorLayouts) {
+ if (tensor_a_row_major && !tensor_b_row_major) {
+ spgemm_cutlass<
+ ElementInputA,
+ ElementInputB,
+ ElementOutput,
+ ElementAccumulator,
+ ThreadblockShape,
+ WarpShape,
+ InstructionShape,
+ cutlass::layout::RowMajor,
+ cutlass::layout::ColumnMajor,
+ use_tensor_c>(
+ tensor_a,
+ tensor_a_stride,
+ tensor_b,
+ tensor_b_stride,
+ tensor_c,
+ tensor_e,
+ alpha,
+ beta,
+ tensor_d);
+ return;
+ }
+ }
+ if constexpr (EnableColumnMajorRowMajorLayouts) {
+ if (!tensor_a_row_major && tensor_b_row_major) {
+ spgemm_cutlass<
+ ElementInputA,
+ ElementInputB,
+ ElementOutput,
+ ElementAccumulator,
+ ThreadblockShape,
+ WarpShape,
+ InstructionShape,
+ cutlass::layout::ColumnMajor,
+ cutlass::layout::RowMajor,
+ use_tensor_c>(
+ tensor_a,
+ tensor_a_stride,
+ tensor_b,
+ tensor_b_stride,
+ tensor_c,
+ tensor_e,
+ alpha,
+ beta,
+ tensor_d);
+ return;
+ }
+ }
+ if constexpr (EnableColumnMajorColumnMajorLayouts) {
+ if (!tensor_a_row_major && !tensor_b_row_major) {
+ spgemm_cutlass<
+ ElementInputA,
+ ElementInputB,
+ ElementOutput,
+ ElementAccumulator,
+ ThreadblockShape,
+ WarpShape,
+ InstructionShape,
+ cutlass::layout::ColumnMajor,
+ cutlass::layout::ColumnMajor,
+ use_tensor_c>(
+ tensor_a,
+ tensor_a_stride,
+ tensor_b,
+ tensor_b_stride,
+ tensor_c,
+ tensor_e,
+ alpha,
+ beta,
+ tensor_d);
+ return;
+ }
+ }
+
+ AT_ERROR(__func__, "_dispatch_layouts: Combination of ",
+ tensor_a_row_major ? "row-major" : "column_major", " and ",
+ tensor_b_row_major ? "row-major" : "column_major",
+ " layouts for input tensors is not supported");
+}
+
+// Dispatch according to the tensor_c tensor being provided or not.
+template <
+ typename ElementInputA,
+ typename ElementInputB,
+ typename ElementOutput,
+ typename ElementAccumulator,
+ typename ThreadblockShape,
+ typename WarpShape,
+ typename InstructionShape,
+ bool EnableRowMajorRowMajorLayouts,
+ bool EnableRowMajorColumnMajorLayouts,
+ bool EnableColumnMajorRowMajorLayouts,
+ bool EnableColumnMajorColumnMajorLayouts>
+void spgemm_cutlass_dispatch_layouts_tensor_c(
+ const Tensor& tensor_a, const Tensor& tensor_b, const Tensor& tensor_c,
+ const Tensor& tensor_e, const Scalar& alpha, const Scalar& beta,
+ Tensor& tensor_d) {
+ if (tensor_c.numel() > 0) {
+ spgemm_cutlass_dispatch_layouts<
+ ElementInputA,
+ ElementInputB,
+ ElementOutput,
+ ElementAccumulator,
+ ThreadblockShape,
+ WarpShape,
+ InstructionShape,
+ EnableRowMajorRowMajorLayouts,
+ EnableRowMajorColumnMajorLayouts,
+ EnableColumnMajorRowMajorLayouts,
+ EnableColumnMajorColumnMajorLayouts,
+ true>(
+ tensor_a,
+ tensor_b,
+ tensor_c,
+ tensor_e,
+ alpha,
+ beta,
+ tensor_d);
+ } else {
+ spgemm_cutlass_dispatch_layouts<
+ ElementInputA,
+ ElementInputB,
+ ElementOutput,
+ ElementAccumulator,
+ ThreadblockShape,
+ WarpShape,
+ InstructionShape,
+ EnableRowMajorRowMajorLayouts,
+ EnableRowMajorColumnMajorLayouts,
+ EnableColumnMajorRowMajorLayouts,
+ EnableColumnMajorColumnMajorLayouts,
+ false>(
+ tensor_a,
+ tensor_b,
+ tensor_c,
+ tensor_e,
+ alpha,
+ beta,
+ tensor_d);
+ }
+}
+#endif
+
+// Perform multiply-add operation, using corresponding CUTLASS
+// sparse GEMM kernel, to given arguments:
+// result = alpha * mat1 @ mat2 + beta * input
+// The "mat2" tensor is a dense tensor, while the "mat1" tensor is a
+// sparse semi-structured matrix. The "input" tensor is optional; if
+// provided, it should be a vector, with the number of elements equal
+// to the number of rows of "mat1" matrix. It is assumed that "mat1"
+// and "mat2" are 2D tensors, supplied either in row-major or
+// column-major layouts (different layouts between these two tensors
+// are OK, but not all combinations of formats are supported for some
+// datatypes of these matrices). The "mat1_meta" argument contains
+// sparse semi-strucutred metadata.
+//
+// There exists numerous limitations of CUTLASS sparse GEMM kernel,
+// with regards to sizes and alignments of input tensors, their
+// layouts and datatypes, and so on; this is the reason for large
+// number of checks throughout the code.
+//
+// TODO: The "input" tensor has to be a vector, such that it could be
+// broadcasted to columns of mat1 * mat2. The case of broadcasting to
+// rows of mat1 * mat2 could be also supported, if "input" tensor is a
+// vector of corresponding length; and same for the case when "input"
+// tensor is a matrix of same size as mat1 * mat2 product. If these
+// updates made here, then remember to update corresponding bits in
+// the Inductor code that are handling meta registrations and
+// lowerings of aten._sparse_semi_structured_mm and
+// aten._sparse_semi_structured_addmm operators.
+Tensor sparse_semi_structured_mad_op(
+ const Tensor& mat1, const Tensor& mat1_meta, const Tensor& mat2,
+ const c10::optional<Tensor>& input_opt, const Scalar& alpha,
+ const Scalar& beta, const c10::optional<c10::ScalarType> out_dtype_opt) {
+#if defined(USE_ROCM) || defined(_MSC_VER) || (defined(CUDA_VERSION) && CUDA_VERSION < 11080)
+ AT_ERROR(__func__, " : CUTLASS not supported");
+ return Tensor{};
+#else
+ // No need to check that all tensors are on CUDA device, as this
+ // is provided by dispatch.
+
+ const auto& input = input_opt.value_or(Tensor{});
+ const auto out_dtype = out_dtype_opt.value_or(mat2.scalar_type());
+
+ // For now, only CC 8.x devices are supported.
+ const auto dprops = at::cuda::getCurrentDeviceProperties();
+ const auto is_sm8x = dprops->major == 8;
+ TORCH_CHECK(is_sm8x,
+ __func__, " : Supported only on GPUs with compute capability "
+ "8.x");
+
+ // Validate datatypes of input tensors.
+ TORCH_CHECK(mat2.dtype() == at::kChar ||
+ mat2.dtype() == at::kHalf ||
+ mat2.dtype() == at::kBFloat16 ||
+ mat2.dtype() == at::kFloat,
+ __func__, " : The mat2 datatype ", mat2.dtype(),
+ " is not supported");
+ TORCH_CHECK(mat1.dtype() == mat2.dtype(),
+ __func__, " : Expected mat1 datatype ", mat2.dtype(),
+ ", but got ", mat1.dtype());
+ if (input.numel() != 0) {
+ TORCH_CHECK(input.dtype() == out_dtype,
+ __func__, " : Expected input datatype ", out_dtype,
+ ", but got ", input.dtype());
+ }
+
+ // Validate layouts of input tensors.
+ TORCH_CHECK(mat1.layout() == Layout::Strided,
+ __func__, " : Expected mat1 argument to be strided, but got "
+ "layout ", mat1.layout());
+ TORCH_CHECK(mat1.dim() == 2,
+ __func__, " : Expected mat1 argument to be 2D tensor, got ",
+ mat1.dim(), " dims");
+ const auto strides_a = mat1.strides();
+ TORCH_CHECK(strides_a[0] == 1 || strides_a[1] == 1,
+ __func__, " : Invalid strides for mat1 argument: row stride = ",
+ strides_a[0], ", column stride = ", strides_a[1]);
+ TORCH_CHECK(mat2.layout() == Layout::Strided,
+ __func__, " : Expected mat2 argument to be "
+ "strided, but got layout ", mat2.layout());
+ TORCH_CHECK(mat2.dim() == 2,
+ __func__, " : Expected mat2 argument to be 2D tensor, got ",
+ mat2.dim(), " dims");
+ const auto strides_b = mat2.strides();
+ TORCH_CHECK(strides_b[0] == 1 || strides_b[1] == 1,
+ __func__, " : Invalid strides for mat2 argument: row stride = ",
+ strides_b[0], ", column stride = ", strides_b[1]);
+ if (input.numel() != 0) {
+ TORCH_CHECK(input.layout() == Layout::Strided,
+ __func__, " : Expected input argument to be strided, but "
+ "got layout ", input.layout());
+ TORCH_CHECK(input.dim() == 1,
+ __func__, " : Expected input argument to be 1D tensor, "
+ "got ", input.dim(), " dims");
+ }
+
+ // Validate sizes of input tensors.
+ TORCH_CHECK(mat1.size(1) == mat2.size(0) / 2,
+ __func__, " : Expected mat1 argument to have ",
+ mat2.size(0) / 2, " columns, but got ", mat1.size(1));
+ if (input.numel() != 0) {
+ TORCH_CHECK(input.size(0) == mat1.size(0),
+ __func__, " : Expected input argument to have ",
+ mat1.size(0), " elements, but got ", input.size(0));
+ }
+
+ // Introduce alias names for arguments, according to the CUTLASS
+ // naming conventions.
+ const auto& tensor_a = mat1;
+ const auto& tensor_b = mat2;
+ const auto& tensor_c = input;
+ const auto& tensor_e = mat1_meta;
+
+ // Create output tensor.
+ Tensor tensor_d =
+ tensor_b.new_empty({tensor_a.size(0), tensor_b.size(1)},
+ at::TensorOptions().dtype(out_dtype));
+
+ // Call wrapper function for CUTLASS sparse GEMM, dispatching on
+ // the input datatype, and then on input tensors layouts.
+ // According to the input tensors datatypes and layouts,
+ // corresponding template arguments are supplied for instantiating
+ // the wrapper function. The tile sizes template arguments are
+ // selected according to the CUTLASS profiler results, for number
+ // of runs.
+ AT_DISPATCH_SWITCH(
+ tensor_a.scalar_type(),
+ "sparse_semi_structured_mad_op",
+ AT_DISPATCH_CASE(
+ at::ScalarType::Char,
+ [&]() {
+ using ElementInputA = int8_t;
+ using ElementInputB = int8_t;
+ using ElementAccumulator = int32_t;
+ using ThreadblockShape =
+ cutlass::gemm::GemmShape<128, 128, 128>;
+ using WarpShape = cutlass::gemm::GemmShape<64, 64, 128>;
+ using InstructionShape = cutlass::gemm::GemmShape<16, 8, 64>;
+ const auto EnableRowMajorRowMajorLayouts = false;
+ const auto EnableRowMajorColumnMajorLayouts = true;
+ const auto EnableColumnMajorRowMajorLayouts = false;
+ const auto EnableColumnMajorColumnMajorLayouts = false;
+ if (out_dtype == at::kInt) {
+ using ElementOutput = int32_t;
+ spgemm_cutlass_dispatch_layouts_tensor_c<
+ ElementInputA,
+ ElementInputB,
+ ElementOutput,
+ ElementAccumulator,
+ ThreadblockShape,
+ WarpShape,
+ InstructionShape,
+ EnableRowMajorRowMajorLayouts,
+ EnableRowMajorColumnMajorLayouts,
+ EnableColumnMajorRowMajorLayouts,
+ EnableColumnMajorColumnMajorLayouts>(
+ tensor_a,
+ tensor_b,
+ tensor_c,
+ tensor_e,
+ alpha,
+ beta,
+ tensor_d);
+ } else if (out_dtype == at::kChar) {
+ using ElementOutput = int8_t;
+ spgemm_cutlass_dispatch_layouts_tensor_c<
+ ElementInputA,
+ ElementInputB,
+ ElementOutput,
+ ElementAccumulator,
+ ThreadblockShape,
+ WarpShape,
+ InstructionShape,
+ EnableRowMajorRowMajorLayouts,
+ EnableRowMajorColumnMajorLayouts,
+ EnableColumnMajorRowMajorLayouts,
+ EnableColumnMajorColumnMajorLayouts>(
+ tensor_a,
+ tensor_b,
+ tensor_c,
+ tensor_e,
+ alpha,
+ beta,
+ tensor_d);
+ }
+ })
+ AT_DISPATCH_CASE(
+ at::ScalarType::Half,
+ [&]() {
+ using ElementInputA = cutlass::half_t;
+ using ElementInputB = cutlass::half_t;
+ using ElementOutput = cutlass::half_t;
+ using ElementAccumulator = float;
+ using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 64>;
+ using WarpShape = cutlass::gemm::GemmShape<64, 64, 64>;
+ using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>;
+ const auto EnableRowMajorRowMajorLayouts = true;
+ const auto EnableRowMajorColumnMajorLayouts = true;
+ const auto EnableColumnMajorRowMajorLayouts = true;
+ const auto EnableColumnMajorColumnMajorLayouts = true;
+ spgemm_cutlass_dispatch_layouts_tensor_c<
+ ElementInputA,
+ ElementInputB,
+ ElementOutput,
+ ElementAccumulator,
+ ThreadblockShape,
+ WarpShape,
+ InstructionShape,
+ EnableRowMajorRowMajorLayouts,
+ EnableRowMajorColumnMajorLayouts,
+ EnableColumnMajorRowMajorLayouts,
+ EnableColumnMajorColumnMajorLayouts>(
+ tensor_a,
+ tensor_b,
+ tensor_c,
+ tensor_e,
+ alpha,
+ beta,
+ tensor_d);
+ })
+ AT_DISPATCH_CASE(
+ at::ScalarType::BFloat16,
+ [&]() {
+ using ElementInputA = cutlass::bfloat16_t;
+ using ElementInputB = cutlass::bfloat16_t;
+ using ElementOutput = cutlass::bfloat16_t;
+ using ElementAccumulator = float;
+ using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 64>;
+ using WarpShape = cutlass::gemm::GemmShape<64, 64, 64>;
+ using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>;
+ const auto EnableRowMajorRowMajorLayouts = true;
+ const auto EnableRowMajorColumnMajorLayouts = true;
+ const auto EnableColumnMajorRowMajorLayouts = true;
+ const auto EnableColumnMajorColumnMajorLayouts = true;
+ spgemm_cutlass_dispatch_layouts_tensor_c<
+ ElementInputA,
+ ElementInputB,
+ ElementOutput,
+ ElementAccumulator,
+ ThreadblockShape,
+ WarpShape,
+ InstructionShape,
+ EnableRowMajorRowMajorLayouts,
+ EnableRowMajorColumnMajorLayouts,
+ EnableColumnMajorRowMajorLayouts,
+ EnableColumnMajorColumnMajorLayouts>(
+ tensor_a,
+ tensor_b,
+ tensor_c,
+ tensor_e,
+ alpha,
+ beta,
+ tensor_d);
+ })
+ AT_DISPATCH_CASE(
+ at::ScalarType::Float,
+ [&]() {
+ using ElementInputA = float;
+ using ElementInputB = float;
+ using ElementOutput = float;
+ using ElementAccumulator = float;
+ using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 32>;
+ using WarpShape = cutlass::gemm::GemmShape<64, 32, 32>;
+ using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>;
+ const auto EnableRowMajorRowMajorLayouts = true;
+ const auto EnableRowMajorColumnMajorLayouts = true;
+ const auto EnableColumnMajorRowMajorLayouts = true;
+ const auto EnableColumnMajorColumnMajorLayouts = true;
+ spgemm_cutlass_dispatch_layouts_tensor_c<
+ ElementInputA,
+ ElementInputB,
+ ElementOutput,
+ ElementAccumulator,
+ ThreadblockShape,
+ WarpShape,
+ InstructionShape,
+ EnableRowMajorRowMajorLayouts,
+ EnableRowMajorColumnMajorLayouts,
+ EnableColumnMajorRowMajorLayouts,
+ EnableColumnMajorColumnMajorLayouts>(
+ tensor_a,
+ tensor_b,
+ tensor_c,
+ tensor_e,
+ alpha,
+ beta,
+ tensor_d);
+ }));
+
+ return tensor_d;
+#endif
+}
+
+// Implementation of aten._sparse_semi_structured_mm operator.
+Tensor _sparse_semi_structured_mm(
+ const Tensor& mat1, const Tensor& mat1_meta, const Tensor& mat2,
+ const c10::optional<c10::ScalarType> out_dtype_opt) {
+ return sparse_semi_structured_mad_op(mat1, mat1_meta, mat2,
+ c10::optional<Tensor>(), 1, 0,
+ out_dtype_opt);
+}
+
+// Implementation of aten._sparse_semi_structured_addmm operator.
+Tensor _sparse_semi_structured_addmm(
+ const Tensor& input, const Tensor& mat1, const Tensor& mat1_meta,
+ const Tensor& mat2, const Scalar& alpha, const Scalar& beta,
+ const c10::optional<c10::ScalarType> out_dtype_opt) {
+ return sparse_semi_structured_mad_op(mat1, mat1_meta, mat2, input, alpha,
+ beta, out_dtype_opt);
+}
+
+} // namespace at::native
+
+// Following is just for testing purposes.
+namespace at::native {
+
+#if defined(USE_ROCM) || defined(_MSC_VER) || (defined(CUDA_VERSION) && CUDA_VERSION < 11080)
+#else
+// Copied from tools/util/include/host_reorder.h, from CUTLASS source
+// tree. This is for simplicity - namely, this file is not under
+// include/cutlass in this tree, as other CUTLASS include files
+// needed, so it would require changing PyTorch CMake configuration;
+// furthermore, including this file produces build errors in PyTorch
+// at the moment.
+template <typename Element, typename LayoutDest, typename LayoutSrc>
+static void reorder_meta(cutlass::TensorRef<Element, LayoutDest> dest,
+ cutlass::TensorRef<Element, LayoutSrc> src,
+ const int problem_size_m, const int problem_size_k) {
+ for (int m = 0; m < problem_size_m; m++) {
+ for (int k = 0; k < problem_size_k; k++) {
+ // First reorder the rows.
+ int group = (sizeof(Element) == 2) ? 32 : 16;
+ int interweave = (sizeof(Element) == 2) ? 4 : 2;
+
+ int dest_row = m / group * group + (m % 8) * interweave + (m % group) / 8;
+ int dest_col = k;
+
+ // Next swizzle the 2x2 blocks from Z to N.
+ if (((dest_row % 2) == 0) && ((dest_col % 2) == 1)) {
+ ++dest_row;
+ --dest_col;
+ } else if (((dest_row % 2) == 1) && ((dest_col % 2) == 0)) {
+ --dest_row;
+ ++dest_col;
+ }
+
+ dest.at({dest_row, dest_col}) = src.at({m, k});
+ }
+ }
+}
+#endif
+
+std::tuple<Tensor, Tensor>
+_to_sparse_semi_structured(const Tensor& dense) {
+#if defined(USE_ROCM) || defined(_MSC_VER) || (defined(CUDA_VERSION) && CUDA_VERSION < 11080)
+ AT_ERROR(__func__, " : CUTLASS not supported");
+ return std::make_tuple(Tensor{}, Tensor{});
+#else
+ // Check dimensions of the dense matrix.
+ TORCH_CHECK(dense.dim() == 2,
+ __func__, " : Expected dense argument to be 2D tensor, got ",
+ dense.dim(), " dims");
+
+ // Determine PyTorch datatype for the metadata matrix.
+ auto meta_dtype = at::kChar;
+ auto ksparse = 0;
+ auto dense_elems_per_meta_elem = 0;
+ if (dense.dtype() == at::kChar) {
+ meta_dtype = at::kInt;
+ ksparse = 4;
+ dense_elems_per_meta_elem = 32;
+ } else if (dense.dtype() == at::kHalf || dense.dtype() == at::kBFloat16) {
+ meta_dtype = at::kShort;
+ ksparse = 4;
+ dense_elems_per_meta_elem = 16;
+ } else if (dense.dtype() == at::kFloat) {
+ meta_dtype = at::kShort;
+ ksparse = 2;
+ dense_elems_per_meta_elem = 8;
+ } else {
+ AT_ERROR("_to_sparse_semi_structured: Invalid dense argument datatype ",
+ dense.dtype(), " encountered");
+ }
+
+ const auto dense_nrows = dense.size(0);
+ const auto dense_ncols = dense.size(1);
+
+ if (dense_nrows % (meta_dtype == at::kShort ? 32 : 16) != 0) {
+ AT_ERROR("_to_sparse_semi_structured: Number of rows of dense matrix must "
+ "be divisible by ", (meta_dtype == at::kShort ? 32 : 16),
+ ", but it is ", dense_nrows);
+ }
+ if (dense_ncols % dense_elems_per_meta_elem != 0) {
+ AT_ERROR("_to_sparse_semi_structured: Number of columns of dense matrix "
+ "must be divisible by ", dense_elems_per_meta_elem, ", but it is ",
+ dense_ncols);
+ }
+
+ const auto dense_cpu = dense.to("cpu");
+
+ const auto mask_cpu = dense_cpu != at::zeros({1}, dense_cpu.options());
+
+ const auto sparse_cpu =
+ dense_cpu.masked_select(mask_cpu).view({dense_nrows, dense_ncols / 2});
+
+ const auto meta_nrows = dense_nrows;
+ const auto meta_ncols = dense_ncols / dense_elems_per_meta_elem;
+ auto meta_cpu = dense_cpu.new_empty({meta_nrows, meta_ncols},
+ at::TensorOptions().dtype(meta_dtype));
+
+ auto* mask_cpu_ptr = mask_cpu.data_ptr<bool>();
+ for (auto i = 0; i < meta_nrows; ++i) {
+ for (auto j = 0; j < meta_ncols; ++j) {
+ uint64_t meta_val = 0;
+ for (auto k = 0; k < dense_elems_per_meta_elem / ksparse; ++k, mask_cpu_ptr += ksparse) {
+ const auto mask_elems =
+ (ksparse == 4) ? std::make_tuple(mask_cpu_ptr[0], mask_cpu_ptr[1],
+ mask_cpu_ptr[2], mask_cpu_ptr[3])
+ : std::make_tuple(mask_cpu_ptr[0], mask_cpu_ptr[0],
+ mask_cpu_ptr[1], mask_cpu_ptr[1]);
+ auto meta_quadruple = 0;
+ if (mask_elems == std::make_tuple(1, 1, 0, 0)) {
+ meta_quadruple = 4; // 0100
+ } else if (mask_elems == std::make_tuple(1, 0, 1, 0)) {
+ meta_quadruple = 8; // 1000
+ } else if (mask_elems == std::make_tuple(0, 1, 1, 0)) {
+ meta_quadruple = 9; // 1001
+ } else if (mask_elems == std::make_tuple(1, 0, 0, 1)) {
+ meta_quadruple = 12; // 1100
+ } else if (mask_elems == std::make_tuple(0, 1, 0, 1)) {
+ meta_quadruple = 13; // 1101
+ } else if (mask_elems == std::make_tuple(0, 0, 1, 1)) {
+ meta_quadruple = 14; // 1110
+ } else {
+ AT_ERROR("_to_sparse_semi_structured: dense argument does not match ",
+ (dense.dtype() != at::kFloat) ? "2:4" : "1:2",
+ "sparsity pattern");
+ }
+ meta_val = meta_val | (meta_quadruple << (4 * k));
+ }
+ const auto idx = i * meta_ncols + j;
+ if (meta_dtype == at::kShort) {
+ using MetaElement = int16_t;
+ const auto meta_cpu_ptr = meta_cpu.data_ptr<MetaElement>();
+ meta_cpu_ptr[idx] = (MetaElement)meta_val;
+ } else if (meta_dtype == at::kInt) {
+ using MetaElement = int32_t;
+ const auto meta_cpu_ptr = meta_cpu.data_ptr<MetaElement>();
+ meta_cpu_ptr[idx] = (MetaElement)meta_val;
+ }
+ }
+ }
+
+ auto meta_reordered_cpu = meta_cpu.new_empty({meta_nrows, meta_ncols});
+ using MetaLayout = cutlass::layout::RowMajor;
+ using MetaReorderedLayout = cutlass::layout::ColumnMajorInterleaved<2>;
+ if (meta_dtype == at::kShort) {
+ using MetaElement = int16_t;
+ auto meta_cpu_ref =
+ cutlass::TensorRef<MetaElement, MetaLayout>(
+ meta_cpu.data_ptr<MetaElement>(),
+ MetaLayout::packed({meta_nrows, meta_ncols}));
+ auto meta_reordered_cpu_ref =
+ cutlass::TensorRef<MetaElement, MetaReorderedLayout>(
+ meta_reordered_cpu.data_ptr<MetaElement>(),
+ MetaReorderedLayout::packed({meta_nrows, meta_ncols}));
+ reorder_meta(meta_reordered_cpu_ref, meta_cpu_ref, meta_nrows, meta_ncols);
+ } else if (meta_dtype == at::kInt) {
+ using MetaElement = int32_t;
+ auto meta_cpu_ref =
+ cutlass::TensorRef<MetaElement, MetaLayout>(
+ meta_cpu.data_ptr<MetaElement>(),
+ MetaLayout::packed({meta_nrows, meta_ncols}));
+ auto meta_reordered_cpu_ref =
+ cutlass::TensorRef<MetaElement, MetaReorderedLayout>(
+ meta_reordered_cpu.data_ptr<MetaElement>(),
+ MetaReorderedLayout::packed({meta_nrows, meta_ncols}));
+ reorder_meta(meta_reordered_cpu_ref, meta_cpu_ref, meta_nrows, meta_ncols);
+ }
+
+ return std::make_tuple(sparse_cpu.to(dense.device()),
+ meta_reordered_cpu.to(dense.device()));
+#endif
+}
+
+} // namespace at::native
diff --git a/test/expect/HasDecompTest.test_has_decomposition.expect b/test/expect/HasDecompTest.test_has_decomposition.expect
index 0be956c17f..79a3455713 100644
--- a/test/expect/HasDecompTest.test_has_decomposition.expect
+++ b/test/expect/HasDecompTest.test_has_decomposition.expect
@@ -523,7 +523,9 @@ aten::_sparse_mask_projection
aten::_sparse_mask_projection.out
aten::_sparse_mm_reduce_impl
aten::_sparse_mm_reduce_impl_backward
+aten::_sparse_semi_structured_addmm
aten::_sparse_semi_structured_linear
+aten::_sparse_semi_structured_mm
aten::_sparse_softmax
aten::_sparse_softmax.out
aten::_sparse_softmax_backward_data
diff --git a/test/forward_backward_compatibility/check_forward_backward_compatibility.py b/test/forward_backward_compatibility/check_forward_backward_compatibility.py
index 65c4a1196e..5a4aac572c 100644
--- a/test/forward_backward_compatibility/check_forward_backward_compatibility.py
+++ b/test/forward_backward_compatibility/check_forward_backward_compatibility.py
@@ -134,7 +134,6 @@ ALLOW_LIST = [
("aten::batch_norm_backward_elemt", datetime.date(2023, 12, 31)),
("aten::sym_constrain_range", datetime.date(2023, 12, 31)),
("aten::_efficient_attention_forward", datetime.date(2024, 1, 15)),
- ("aten::_sparse_semi_structured_linear", datetime.date(2024, 1, 15)),
("onednn::qconv1d_pointwise", datetime.date(2023, 12, 31)),
("onednn::qconv2d_pointwise", datetime.date(2023, 12, 31)),
("onednn::qconv3d_pointwise", datetime.date(2023, 12, 31)),
diff --git a/test/test_sparse_semi_structured.py b/test/test_sparse_semi_structured.py
index fcb316ee30..a09e2647eb 100644
--- a/test/test_sparse_semi_structured.py
+++ b/test/test_sparse_semi_structured.py
@@ -157,7 +157,7 @@ class SparseSemiStructuredTensorCompileTest(torch._dynamo.test_case.TestCase):
"""
Test nn.Linear + .contiguous() + nn.ReLU with SparseSemiStructuredTensor + torch.compile
We expect:
- (1) The sparse tensor subclass should turn nn.Linear into `aten._structured_sparse_linear` + `aten.contiguous()`
+ (1) The sparse tensor subclass should turn nn.Linear into `aten._structured_sparse_addmm` + `aten.contiguous()`
(2) Inductor should fuse the .contiguous() call into the relu
"""
@@ -207,7 +207,7 @@ class SparseSemiStructuredTensorCompileTest(torch._dynamo.test_case.TestCase):
@unittest.skipIf(IS_WINDOWS, "torch.compile not supported on windows")
def test_mlp_contiguous_relu_compile_cutlass(self):
"""
- test for CUTLASS meta registrations (_sparse_semi_structured_linear) + torch.compile
+ test for CUTLASS meta registrations (_sparse_semi_structured_addmm) + torch.compile
"""
for dense_input_shape in [(1, 128), (64, 128), (128, 128), (64, 128, 128)]:
SparseSemiStructuredTensorCompileTest._test_mlp_contiguous_relu_compile("cutlass", dense_input_shape)
@@ -258,7 +258,7 @@ class TestSparseSemiStructured(TestCase):
if dtype is torch.int8:
# This should fail
if backend == "cutlass":
- with self.assertRaisesRegex(RuntimeError, "two_four_sgemm_dispatch_layouts"):
+ with self.assertRaisesRegex(RuntimeError, "spgemm_cutlass_dispatch_layouts"):
sparse_result = torch.mm(A_sparse, B)
else:
with self.assertRaisesRegex(RuntimeError,
@@ -291,7 +291,7 @@ class TestSparseSemiStructured(TestCase):
# padding with int8 throws an error because transposing B yields a contiguous output
# and row-row 2:4 sparse @ dense with NN is not supported by cuSPARSELt or CUTLASS.
if backend == "cutlass":
- with self.assertRaisesRegex(RuntimeError, "two_four_sgemm_dispatch_layouts"):
+ with self.assertRaisesRegex(RuntimeError, "spgemm_cutlass_dispatch_layouts"):
sparse_result = torch.mm(A_sparse, B.t())
else:
with self.assertRaisesRegex(RuntimeError,
@@ -575,6 +575,73 @@ class TestSparseSemiStructured(TestCase):
torch.backends.cuda.matmul.allow_tf32 = orig
+ @unittest.skipIf(TEST_WITH_ROCM or IS_WINDOWS, "ROCm and Windows doesn't support CUTLASS")
+ @parametrize("backend", ["cutlass"])
+ @dtypes(*SEMI_STRUCTURED_SUPPORTED_DTYPES)
+ def test_sparse_semi_structured_ops_cutlass(self, device, dtype, backend):
+ SparseSemiStructuredTensor._FORCE_CUTLASS = (backend == "cutlass")
+ if backend == "cutlass" and IS_WINDOWS:
+ self.skipTest("CUTLASS not supported on Windows")
+
+ def run_test(m, n, k, device, dtype, dtype_out, use_input, rtol, atol):
+ mat1 = rand_sparse_semi_structured(m, k, dtype, device)
+ # mat2 transposed as int8 case supports only row-major/column-major combination
+ mat2 = make_tensor((n, k), dtype=dtype, device=device).t()
+ input = make_tensor((m,), dtype=dtype_out, device=device) if use_input else None
+
+ if use_input:
+ if dtype.is_floating_point:
+ alpha = 1.3
+ beta = -0.7
+ else:
+ alpha = 2
+ beta = -3
+
+ dtype_dense = torch.float32
+ mat1_dense = mat1.to(dtype_dense)
+ mat2_dense = mat2.to(dtype_dense)
+ if not use_input:
+ output0 = torch.mm(mat1_dense, mat2_dense)
+ else:
+ input_dense = input.to(dtype_dense)[:, None]
+ output0 = torch.addmm(input_dense, mat1_dense, mat2_dense, alpha=alpha, beta=beta)
+
+ compressed = to_sparse_semi_structured(mat1)
+
+ mat1_sparse = compressed.values()
+ mat1_meta = compressed.indices()
+
+ if not use_input:
+ output1 = torch._sparse_semi_structured_mm(mat1_sparse, mat1_meta, mat2, out_dtype=dtype_out)
+ else:
+ output1 = torch._sparse_semi_structured_addmm(
+ input, mat1_sparse, mat1_meta, mat2, alpha=alpha, beta=beta, out_dtype=dtype_out
+ )
+ torch.testing.assert_close(output1.to(dtype_dense), output0, rtol=rtol, atol=atol)
+
+ if dtype == torch.float32:
+ # Inputs are converted to TF32 internally for sparse GEMM,
+ # so make dense GEMM to do the same for matching results.
+ orig = torch.backends.cuda.matmul.allow_tf32
+ torch.backends.cuda.matmul.allow_tf32 = True
+
+ dtype_out = {torch.int8: torch.int32, torch.half: torch.half, torch.bfloat16: torch.bfloat16, torch.float32: torch.float32}
+ rtol, atol = 1e-3, 1e-3
+ if dtype == torch.bfloat16:
+ rtol, atol = 5e-3, 5e-3
+ elif dtype == torch.float32:
+ rtol, atol = 1e-3, 75e-2
+ for m, n, k, use_input in \
+ itertools.product(range(3), range(3), range(3), (False, True)):
+ m = 2 ** m * 32
+ n = 2 ** n * 32
+ k = 2 ** k * 128
+ run_test(m, n, k, device, dtype, dtype_out[dtype], use_input, rtol, atol)
+
+ if dtype == torch.float32:
+ torch.backends.cuda.matmul.allow_tf32 = orig
+
+
@unittest.skipIf(not has_triton(), "Test needs triton and recent GPU arch")
@parametrize("backend", ["cutlass"])
@dtypes(*SEMI_STRUCTURED_SUPPORTED_DTYPES)
diff --git a/torch/_dynamo/trace_rules.py b/torch/_dynamo/trace_rules.py
index a16333cd7f..393c649133 100644
--- a/torch/_dynamo/trace_rules.py
+++ b/torch/_dynamo/trace_rules.py
@@ -1542,7 +1542,9 @@ torch_c_binding_in_graph_functions = dict.fromkeys(
"torch._sparse_csr_prod",
"torch._sparse_csr_sum",
"torch._sparse_log_softmax_backward_data",
+ "torch._sparse_semi_structured_addmm",
"torch._sparse_semi_structured_linear",
+ "torch._sparse_semi_structured_mm",
"torch._sparse_softmax_backward_data",
"torch._sparse_sparse_matmul",
"torch._sparse_sum",
diff --git a/torch/_meta_registrations.py b/torch/_meta_registrations.py
index fd69525b6d..70e91dc3f2 100644
--- a/torch/_meta_registrations.py
+++ b/torch/_meta_registrations.py
@@ -431,6 +431,66 @@ def meta_sparse_structured_linear(
return output
+@register_meta(aten._sparse_semi_structured_mm)
+def meta_sparse_structured_mm(
+ mat1: Tensor,
+ mat1_meta: Tensor,
+ mat2: Tensor,
+ out_dtype: Optional[torch.dtype] = None,
+):
+ assert len(mat1.shape) == 2
+ assert len(mat1_meta.shape) == 2
+ assert len(mat2.shape) == 2
+ assert mat1.size(1) == mat2.size(0) / 2
+ output_sizes = [mat1.size(0), mat2.size(1)]
+
+ if out_dtype is not None:
+ assert (
+ mat2.dtype == torch.int8 and out_dtype == torch.int32
+ ), "out_dtype is only supported for i8i8->i32 linear operator"
+ output = mat2.new_empty(
+ output_sizes,
+ dtype=mat2.dtype if out_dtype is None else out_dtype,
+ )
+
+ return output
+
+
+@register_meta(aten._sparse_semi_structured_addmm)
+def meta_sparse_structured_addmm(
+ input: Tensor,
+ mat1: Tensor,
+ mat1_meta: Tensor,
+ mat2: Tensor,
+ *,
+ alpha=1,
+ beta=1,
+ out_dtype: Optional[torch.dtype] = None,
+):
+ assert (
+ len(input.shape) == 1
+ ), "only input broadcasted to columns of mat1 * mat2 product is supported"
+ assert len(mat1.shape) == 2
+ assert len(mat1_meta.shape) == 2
+ assert len(mat2.shape) == 2
+ assert input.size(0) == mat1.size(
+ 0
+ ), "only input broadcasted to columns of mat1 * mat2 product is supported"
+ assert mat1.size(1) == mat2.size(0) / 2
+ output_sizes = [mat1.size(0), mat2.size(1)]
+
+ if out_dtype is not None:
+ assert (
+ mat2.dtype == torch.int8 and out_dtype == torch.int32
+ ), "out_dtype is only supported for i8i8->i32 linear operator"
+ output = mat2.new_empty(
+ output_sizes,
+ dtype=mat2.dtype if out_dtype is None else out_dtype,
+ )
+
+ return output
+
+
@register_meta(aten._cslt_sparse_mm)
def meta__cslt_sparse_mm(
compressed_A: torch.Tensor,
diff --git a/torch/sparse/semi_structured.py b/torch/sparse/semi_structured.py
index 03c15c0eee..7c86b0d43b 100644
--- a/torch/sparse/semi_structured.py
+++ b/torch/sparse/semi_structured.py
@@ -47,7 +47,7 @@ class SparseSemiStructuredTensor(torch.Tensor):
-`_DTYPE_SHAPE_CONSTRAINTS` - A dictionary holding backend specific dense/sparse min shape constraints
- `def from_dense()` - backend specific compression routines
- - `def _mm()` - backend specifc mm op (either torch._cslt_sparse_mm or torch._sparse_semi_structured_linear)
+ - `def _mm()` - backend specifc mm op (either torch._cslt_sparse_mm or torch._sparse_semi_structured_(mm|addmm))
"""
_DEFAULT_ALG_ID: int = 0
@@ -371,11 +371,12 @@ class SparseSemiStructuredTensorCUTLASS(SparseSemiStructuredTensor):
"""
This class implements semi-structured sparsity for the CUTLASS backend.
+
In this implementation, the specified elements and metadata are stored seprately,
in packed and meta respectively.
- When _FORCE_CUTLASS is set, or when cuSPARSELt is not available, this subclass calls into _sparse_semi_structured_linear
- and sparse_semi_structured_from_dense for conversion to the compressed format.
+ When _FORCE_CUTLASS is set, or when cuSPARSELt is not available, this subclass calls into _sparse_semi_structured_(mm|addmm) and
+ sparse_semi_structured_from_dense for conversion to the compressed format.
"""
_DTYPE_SHAPE_CONSTRAINTS = {
@@ -436,9 +437,14 @@ class SparseSemiStructuredTensorCUTLASS(SparseSemiStructuredTensor):
f"`{cls_name}` matmul: operation is not supported"
)
else:
- res = torch._sparse_semi_structured_linear(
- B.t(), self.packed, self.meta, bias=bias
- ).t()
+ if bias is None:
+ res = torch._sparse_semi_structured_mm(
+ self.packed, self.meta, B
+ )
+ else:
+ res = torch._sparse_semi_structured_addmm(
+ bias, self.packed, self.meta, B
+ )
return res[: self.shape[0]]
|
2.41.0
|
096e99a5d59a9b22bfaa7ea15492fd454592c48
|
Sun, 14 Apr 2024 08:13:52 -0700
|
[PATCH 0146/1000] Enable int8mm kernel for float16 (#124022)
|
Pull Request resolved: https://github.com/pytorch/pytorch/pull/124022 Approved by: https://github.com/mikekgfb
|
diff --git a/aten/src/ATen/native/LinearAlgebra.cpp b/aten/src/ATen/native/LinearAlgebra.cpp
index 5824cfa23d..26242b4b70 100644
--- a/aten/src/ATen/native/LinearAlgebra.cpp
+++ b/aten/src/ATen/native/LinearAlgebra.cpp
@@ -3516,8 +3516,8 @@ Tensor _weight_int8pack_mm_cpu(
auto N = B.size(0);
auto K = A.size(1);
- TORCH_CHECK(A.dtype() == kBFloat16,
- __func__, " : expect A to be bfloat16 tensor.");
+ TORCH_CHECK(A.dtype() == kBFloat16 || A.dtype() == kHalf,
+ __func__, " : expect A to be float16 or bfloat16 tensor.");
TORCH_CHECK(A.is_contiguous(),
__func__, " : expect A to be contiguous.");
TORCH_CHECK(A.dim() == 2,
diff --git a/aten/src/ATen/native/cpu/int8mm_kernel.cpp b/aten/src/ATen/native/cpu/int8mm_kernel.cpp
index f7b2222945..3645bae3a6 100644
--- a/aten/src/ATen/native/cpu/int8mm_kernel.cpp
+++ b/aten/src/ATen/native/cpu/int8mm_kernel.cpp
@@ -180,15 +180,15 @@ inline void tinygemm_kernel(
c10::ForcedUnroll<ROWS * COLS>{}(storec);
}
-#else
+#endif
// non-vectorized version
-template <int BLOCK_M, int BLOCK_N>
+template <int BLOCK_M, int BLOCK_N, typename T>
inline void tinygemm_kernel(
- const BFloat16* RESTRICT A,
+ const T* RESTRICT A,
const int8_t* RESTRICT B,
- const BFloat16* RESTRICT scales,
- BFloat16* RESTRICT C,
+ const T* RESTRICT scales,
+ T* RESTRICT C,
int lda,
int ldb,
int ldc,
@@ -208,8 +208,6 @@ inline void tinygemm_kernel(
}
}
-#endif
-
#define LAUNCH_TINYGEMM_KERNEL(MB_SIZE, NB_SIZE) \
tinygemm_kernel<MB_SIZE, NB_SIZE>( \
A_ptr, B_ptr, S_ptr, C_ptr, \
@@ -234,16 +232,17 @@ inline void tinygemm_kernel(
break; \
}
-void int8pack_mm_kernel(
+template<typename T>
+void int8pack_mm_kernel_(
const Tensor& C,
const Tensor& A,
const Tensor& B,
const Tensor& scales) {
- const auto* A_data = A.data_ptr<BFloat16>();
+ const auto* A_data = A.data_ptr<T>();
const auto* B_data = B.data_ptr<int8_t>();
- auto* C_data = C.data_ptr<BFloat16>();
- const auto* S_data = scales.data_ptr<BFloat16>();
+ auto* C_data = C.data_ptr<T>();
+ const auto* S_data = scales.data_ptr<T>();
int M = A.size(0);
int N = B.size(0);
@@ -295,6 +294,18 @@ void int8pack_mm_kernel(
});
}
+void int8pack_mm_kernel(
+ const Tensor& C,
+ const Tensor& A,
+ const Tensor& B,
+ const Tensor& scales) {
+ if (C.dtype() == kHalf) {
+ int8pack_mm_kernel_<Half>(C, A, B, scales);
+ } else {
+ int8pack_mm_kernel_<BFloat16>(C, A, B, scales);
+ }
+}
+
} // anonymous namespace
ALSO_REGISTER_AVX512_DISPATCH(int8pack_mm_stub, &int8pack_mm_kernel);
diff --git a/torch/_meta_registrations.py b/torch/_meta_registrations.py
index 70e91dc3f2..b7fad332d2 100644
--- a/torch/_meta_registrations.py
+++ b/torch/_meta_registrations.py
@@ -3545,8 +3545,8 @@ def meta__weight_int4pack_mm(x, w, q_group_size, q_scale_and_zeros):
def meta__weight_int8pack_mm(x, w, q_scales):
torch._check(x.dim() == 2, lambda: "x must be a 2D tensor")
torch._check(
- x.dtype is torch.bfloat16,
- lambda: f"expected x to be bf16, got {x.dtype}",
+ x.dtype in [torch.float16, torch.bfloat16],
+ lambda: f"expected x to be f16/bf16, got {x.dtype}",
)
torch._check(w.dim() == 2, lambda: "w must be a 2D tensor")
torch._check(
|
2.41.0
|
9f50333e91e9e8b20a78517becd74bca70c7d46
|
Fri, 12 Apr 2024 15:34:17 -0400
|
[PATCH 0147/1000] Improve assert message for unbacked symint not written out (#123965)
|
Signed-off-by: Edward Z. Yang <ezyang@meta.com> Pull Request resolved: https://github.com/pytorch/pytorch/pull/123965 Approved by: https://github.com/Skylion007
|
diff --git a/torch/_inductor/ir.py b/torch/_inductor/ir.py
index 860b71c545..ba0a726a26 100644
--- a/torch/_inductor/ir.py
+++ b/torch/_inductor/ir.py
@@ -3069,7 +3069,7 @@ class Buffer(IRNode):
symbols_to_define.remove(s)
assert (
not symbols_to_define
- ), f"unbacked symint {s} not written out, check comment above"
+ ), f"unbacked symint {symbols_to_define} not written out, check comment above"
def realize(self):
pass
|
2.41.0
|
3a05e791aabe4f2e3938626bff849641dc101ea
|
Sat, 13 Apr 2024 10:54:01 -0700
|
[PATCH 0148/1000] Don't add non-integer Triton kernel arg 1 to equal_to_1 (#123886)
|
Summary: Triton compiler adds constnat argument 1 to `equal_to_1` [only when it's an int](https://github.com/openai/triton/blob/8c5e33c77ef83e0cb99c744e58842930e602df31/python/triton/runtime/jit.py#L275). Here we restrict Inductor's `equal_to_1` in the same way. Test Plan: ``` $ python test/inductor/test_triton_kernels.py -k test_triton_kernel_equal_to_1_float_arg ... ---------------------------------------------------------------------- Ran 1 test in 6.528s OK $ python test/inductor/test_triton_kernels.py -k test_triton_kernel_equal_to_1_arg ... ---------------------------------------------------------------------- Ran 2 tests in 10.142s OK ``` Pull Request resolved: https://github.com/pytorch/pytorch/pull/123886 Approved by: https://github.com/oulgen ghstack dependencies: #123703
|
diff --git a/test/inductor/test_aot_inductor.py b/test/inductor/test_aot_inductor.py
index 5de6d91a0b..2a41afbbe4 100644
--- a/test/inductor/test_aot_inductor.py
+++ b/test/inductor/test_aot_inductor.py
@@ -43,6 +43,7 @@ if HAS_CUDA:
add_kernel_2d_autotuned,
add_kernel_autotuned,
add_kernel_with_optional_param,
+ add_kernel_with_scaling,
)
if IS_WINDOWS and IS_CI:
@@ -1846,6 +1847,44 @@ class AOTInductorTestsTemplate:
self.check_model(Model(), example_inputs)
+ @skipIfRocm
+ @common_utils.parametrize("dynamic", [False, True])
+ def test_triton_kernel_equal_to_1_float_arg(self, dynamic):
+ if self.device != "cuda":
+ raise unittest.SkipTest("requires CUDA")
+
+ class Model(torch.nn.Module):
+ def forward(self, x, y):
+ out = torch.empty_like(x)
+ n_elements = x.numel()
+ scaling_factor = (n_elements**0) / 1.0
+ add_kernel_with_scaling[(n_elements,)](
+ x,
+ y,
+ out,
+ n_elements,
+ scaling_factor,
+ BLOCK_SIZE=16,
+ )
+ return out
+
+ dynamic_shapes = None
+ if dynamic:
+ dim0_xy = Dim("s0", min=2, max=1024)
+ dynamic_shapes = {
+ "x": {0: dim0_xy, 1: None},
+ "y": {0: dim0_xy, 1: None},
+ }
+ example_inputs = (
+ torch.randn(2, device=self.device),
+ torch.randn(2, device=self.device),
+ )
+ self.check_model(
+ Model(),
+ example_inputs,
+ dynamic_shapes=dynamic_shapes,
+ )
+
def test_shifted_constraint_ranges(self):
class Model(torch.nn.Module):
def __init__(self):
diff --git a/test/inductor/test_triton_kernels.py b/test/inductor/test_triton_kernels.py
index 162728b3a3..73dfff74fc 100644
--- a/test/inductor/test_triton_kernels.py
+++ b/test/inductor/test_triton_kernels.py
@@ -1012,6 +1012,36 @@ def forward(self, x_1, output_1):
self.assertTrue("equal_to_1=(3,)" in sources[0])
self.assertEqual(compiled_out, eager_out)
+ @requires_cuda
+ @skipIfRocm
+ @common_utils.parametrize("dynamic", [False, True])
+ def test_triton_kernel_equal_to_1_float_arg(self, dynamic):
+ def f(x, y):
+ out = torch.empty_like(x)
+ n_elements = x.numel()
+ scaling_factor = (n_elements**0) / 1.0
+ add_kernel_with_scaling[(n_elements,)](
+ x,
+ y,
+ out,
+ n_elements,
+ scaling_factor,
+ BLOCK_SIZE=16,
+ )
+ return out
+
+ x = torch.randn(2, device="cuda")
+ y = torch.randn(2, device="cuda")
+ eager_out = f(x, y)
+ compiled_out, sources = run_and_get_code(
+ torch.compile(f, dynamic=dynamic), x, y
+ )
+
+ # float 1.0 (both literal or symbolic)
+ # should not be added to equal_to_1
+ self.assertTrue("equal_to_1=()" in sources[0])
+ self.assertEqual(compiled_out, eager_out)
+
@requires_cuda
@skipIfRocm
def test_triton_kernel_with_imported_symbol(self):
diff --git a/torch/_inductor/codegen/cpp_wrapper_cuda.py b/torch/_inductor/codegen/cpp_wrapper_cuda.py
index 688289aa86..e0a3baff06 100644
--- a/torch/_inductor/codegen/cpp_wrapper_cuda.py
+++ b/torch/_inductor/codegen/cpp_wrapper_cuda.py
@@ -198,6 +198,8 @@ class CppWrapperCuda(CppWrapperCpu):
var_name = f"var_{next(self.arg_var_id)}"
if isinstance(arg, (sympy.Integer, sympy.Symbol, SymbolicCallArg)):
self.writeline(f"auto {var_name} = {arg};")
+ elif isinstance(arg, sympy.Float):
+ self.writeline(f"float {var_name} = {self.expr_printer(arg)};")
elif isinstance(arg, sympy.Expr):
self.writeline(f"auto {var_name} = {self.expr_printer(arg)};")
elif is_int(arg):
diff --git a/torch/_inductor/codegen/triton_utils.py b/torch/_inductor/codegen/triton_utils.py
index d1f58187ca..c95e699bcd 100644
--- a/torch/_inductor/codegen/triton_utils.py
+++ b/torch/_inductor/codegen/triton_utils.py
@@ -1,5 +1,7 @@
from typing import Any, Dict, List, Optional
+import sympy
+
import torch
from .. import config
@@ -36,7 +38,7 @@ def signature_of(arg: KernelArgType, *, size_dtype: str) -> str:
# From triton/runtime/jit.py
# `None` is nullptr. Implicitly convert to *i8.
return "*i8"
- elif isinstance(arg.expr, float):
+ elif isinstance(arg.expr, (float, sympy.Float)):
return "fp32"
if size_dtype == "tl.int32":
return "i32"
@@ -118,7 +120,7 @@ def config_of(
i
for i, arg in zip(indices, args)
if isinstance(arg, SizeArg)
- and arg.expr is not None
+ and isinstance(arg.expr, (int, sympy.Integer))
and V.graph.sizevars.statically_known_equals(arg.expr, 1) # type: ignore[arg-type]
)
# ids_of_folded_args is set from equal_to_1
diff --git a/torch/_inductor/codegen/wrapper.py b/torch/_inductor/codegen/wrapper.py
index a754f3522e..09b6328e26 100644
--- a/torch/_inductor/codegen/wrapper.py
+++ b/torch/_inductor/codegen/wrapper.py
@@ -1068,7 +1068,11 @@ class WrapperCodeGen(CodeGen):
)
else:
signature.append(SizeArg(key, arg))
- if arg is not None and V.graph.sizevars.statically_known_equals(arg, 1): # type: ignore[arg-type]
+ if isinstance(
+ arg, (int, sympy.Integer)
+ ) and V.graph.sizevars.statically_known_equals(
+ arg, 1 # type: ignore[arg-type]
+ ):
equal_to_1_arg_idx.append(idx)
index_dtype = "tl.int32"
triton_meta = {
diff --git a/torch/testing/_internal/triton_utils.py b/torch/testing/_internal/triton_utils.py
index 5478aea36e..dd1ab9e6d6 100644
--- a/torch/testing/_internal/triton_utils.py
+++ b/torch/testing/_internal/triton_utils.py
@@ -116,6 +116,24 @@ if HAS_CUDA:
tmp2 = tmp0 + tmp1
tl.store(out_ptr + (x1 + (x_elements * y0)), tmp2, xmask & ymask)
+ @triton.jit
+ def add_kernel_with_scaling(
+ in_ptr0,
+ in_ptr1,
+ out_ptr,
+ n_elements,
+ scaling_factor,
+ BLOCK_SIZE: "tl.constexpr",
+ ):
+ pid = tl.program_id(axis=0)
+ block_start = pid * BLOCK_SIZE
+ offsets = block_start + tl.arange(0, BLOCK_SIZE)
+ mask = offsets < n_elements
+ x = tl.load(in_ptr0 + offsets, mask=mask)
+ y = tl.load(in_ptr1 + offsets, mask=mask)
+ output = (x + y) * scaling_factor
+ tl.store(out_ptr + offsets, output, mask=mask)
+
@triton.jit
def mul2_kernel(
in_ptr0,
|
2.41.0
|
3ac61587aa368c613ef01df1f328a396b64cd5d
|
Mon, 15 Apr 2024 06:21:52 +0000
|
[PATCH 0149/1000] Enable UFMT on `test/functorch` (#123541)
|
Partially addresses #123062 Ran lintrunner on: - `test/functorch` Co-authored-by: Edward Z. Yang <ezyang@meta.com> Pull Request resolved: https://github.com/pytorch/pytorch/pull/123541 Approved by: https://github.com/zou3519, https://github.com/ezyang
|
diff --git a/.lintrunner.toml b/.lintrunner.toml
index 65a4c936e2..e223d1a069 100644
--- a/.lintrunner.toml
+++ b/.lintrunner.toml
@@ -1144,24 +1144,6 @@ exclude_patterns = [
'test/distributed/test_pg_wrapper.py',
'test/distributed/test_store.py',
'test/expect/__init__.py',
- 'test/functorch/attn_ft.py',
- 'test/functorch/attn_positional.py',
- 'test/functorch/common_utils.py',
- 'test/functorch/discover_coverage.py',
- 'test/functorch/functorch_additional_op_db.py',
- 'test/functorch/test_aotdispatch.py',
- 'test/functorch/test_control_flow.py',
- 'test/functorch/test_dims.py',
- 'test/functorch/test_eager_transforms.py',
- 'test/functorch/test_logging.py',
- 'test/functorch/test_memory_efficient_fusion.py',
- 'test/functorch/test_minifier.py',
- 'test/functorch/test_ops.py',
- 'test/functorch/test_parsing.py',
- 'test/functorch/test_rearrange.py',
- 'test/functorch/test_vmap.py',
- 'test/functorch/test_vmap_registrations.py',
- 'test/functorch/xfail_suggester.py',
'test/jit/__init__.py',
'test/jit/_imported_class_test/__init__.py',
'test/jit/_imported_class_test/bar.py',
diff --git a/test/functorch/attn_ft.py b/test/functorch/attn_ft.py
index 7a81c791aa..ee46566319 100644
--- a/test/functorch/attn_ft.py
+++ b/test/functorch/attn_ft.py
@@ -3,10 +3,11 @@
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
+import math
+
import torch
+from functorch.dim import cat, dimlists, dims, softmax
from torch import nn
-from functorch.dim import dims, dimlists, softmax, cat
-import math
class Linear(nn.Linear):
@@ -16,10 +17,17 @@ class Linear(nn.Linear):
result = (input[b, ci] * self.weight[co, ci]).sum(ci) + self.bias[co]
return result.order(b, co)
+
class BertSelfAttention(nn.Module):
- def __init__(self, hidden_size, num_attention_heads,
- attention_probs_dropout_prob, position_embedding_type=None,
- max_position_embeddings=None, linear=Linear):
+ def __init__(
+ self,
+ hidden_size,
+ num_attention_heads,
+ attention_probs_dropout_prob,
+ position_embedding_type=None,
+ max_position_embeddings=None,
+ linear=Linear,
+ ):
super().__init__()
if hidden_size % num_attention_heads != 0:
raise ValueError(
@@ -41,7 +49,9 @@ class BertSelfAttention(nn.Module):
if self.position_embedding_type is not None:
assert max_position_embeddings is not None
self.max_position_embeddings = max_position_embeddings
- self.distance_embedding = nn.Embedding(2 * max_position_embeddings - 1, self.attention_head_size)
+ self.distance_embedding = nn.Embedding(
+ 2 * max_position_embeddings - 1, self.attention_head_size
+ )
def forward(
self,
@@ -70,7 +80,6 @@ class BertSelfAttention(nn.Module):
k = k[batch, key_sequence, [heads, features]]
v = v[batch, key_sequence, [heads, features]]
-
# this option allows the model to attend to not just the elements of the current sequence
# but the previous elements as well as additional tokens.
if past_key_value is not None:
@@ -85,7 +94,6 @@ class BertSelfAttention(nn.Module):
# key_sequence
key_sequence = extended_key_sequence
-
# Take the dot product between "query" and "key" to get the raw attention scores.
# The actual outer-product and summation are explicitly represented here,
# and like einsum, will be pattern matched to an efficient matrix multiply op.
@@ -112,7 +120,9 @@ class BertSelfAttention(nn.Module):
# this form of indirect indexing is more straightforward than either advanced indexing or torch.gather which both
# have a lot of dependencies on the positions of indexing tensors.
- positional_embedding = self.distance_embedding.weight[self.max_position_embeddings - 1 + distance, features]
+ positional_embedding = self.distance_embedding.weight[
+ self.max_position_embeddings - 1 + distance, features
+ ]
if self.position_embedding_type == "relative_key":
# these were einsum ops in the positional code because they are not easy to fit to existing matmul operators
@@ -120,16 +130,24 @@ class BertSelfAttention(nn.Module):
relative_position_scores = (q * positional_embedding).sum(features)
attention_scores = attention_scores + relative_position_scores
elif self.position_embedding_type == "relative_key_query":
- relative_position_scores_query = (q * positional_embedding).sum(features)
+ relative_position_scores_query = (q * positional_embedding).sum(
+ features
+ )
relative_position_scores_key = (k * positional_embedding).sum(features)
- attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
+ attention_scores = (
+ attention_scores
+ + relative_position_scores_query
+ + relative_position_scores_key
+ )
attention_probs = attention_scores
# Normalize the attention scores to probabilities.
attention_probs = softmax(attention_scores, dim=key_sequence)
# # This is actually dropping out entire tokens to attend to, which might
# # seem a bit unusual, but is taken from the original Transformer paper.
- attention_probs = torch.nn.functional.dropout(attention_probs, p=self.dropout_prob)
+ attention_probs = torch.nn.functional.dropout(
+ attention_probs, p=self.dropout_prob
+ )
# similarly, we can replace the matmul with a direct listing of the outer product, which makes it clear
# we are weighting the values v across all keys with the attention scores.
diff --git a/test/functorch/attn_positional.py b/test/functorch/attn_positional.py
index b10130751f..c75e8f3ec9 100644
--- a/test/functorch/attn_positional.py
+++ b/test/functorch/attn_positional.py
@@ -3,14 +3,21 @@
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
+import math
+
import torch
from torch import nn
-import math
+
class BertSelfAttention(nn.Module):
- def __init__(self, hidden_size, num_attention_heads,
- attention_probs_dropout_prob,
- position_embedding_type=None, max_position_embeddings=None):
+ def __init__(
+ self,
+ hidden_size,
+ num_attention_heads,
+ attention_probs_dropout_prob,
+ position_embedding_type=None,
+ max_position_embeddings=None,
+ ):
super().__init__()
if hidden_size % num_attention_heads != 0:
raise ValueError(
@@ -32,10 +39,15 @@ class BertSelfAttention(nn.Module):
if self.position_embedding_type is not None:
assert max_position_embeddings is not None
self.max_position_embeddings = max_position_embeddings
- self.distance_embedding = nn.Embedding(2 * max_position_embeddings - 1, self.attention_head_size)
+ self.distance_embedding = nn.Embedding(
+ 2 * max_position_embeddings - 1, self.attention_head_size
+ )
def transpose_for_scores(self, x):
- new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
+ new_x_shape = x.size()[:-1] + (
+ self.num_attention_heads,
+ self.attention_head_size,
+ )
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
@@ -56,26 +68,43 @@ class BertSelfAttention(nn.Module):
k = torch.cat([past_key_value[0], k], dim=2)
v = torch.cat([past_key_value[1], v], dim=2)
-
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(q, k.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
if self.position_embedding_type is not None:
seq_length = hidden_states.size()[1]
- position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
- position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
+ position_ids_l = torch.arange(
+ seq_length, dtype=torch.long, device=hidden_states.device
+ ).view(-1, 1)
+ position_ids_r = torch.arange(
+ seq_length, dtype=torch.long, device=hidden_states.device
+ ).view(1, -1)
distance = position_ids_l - position_ids_r
- positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
- positional_embedding = positional_embedding.to(dtype=q.dtype) # fp16 compatibility
+ positional_embedding = self.distance_embedding(
+ distance + self.max_position_embeddings - 1
+ )
+ positional_embedding = positional_embedding.to(
+ dtype=q.dtype
+ ) # fp16 compatibility
if self.position_embedding_type == "relative_key":
- relative_position_scores = torch.einsum("bhld,lrd->bhlr", q, positional_embedding)
+ relative_position_scores = torch.einsum(
+ "bhld,lrd->bhlr", q, positional_embedding
+ )
attention_scores = attention_scores + relative_position_scores
elif self.position_embedding_type == "relative_key_query":
- relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", q, positional_embedding)
- relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", k, positional_embedding)
- attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
+ relative_position_scores_query = torch.einsum(
+ "bhld,lrd->bhlr", q, positional_embedding
+ )
+ relative_position_scores_key = torch.einsum(
+ "bhrd,lrd->bhlr", k, positional_embedding
+ )
+ attention_scores = (
+ attention_scores
+ + relative_position_scores_query
+ + relative_position_scores_key
+ )
attention_probs = attention_scores
# Normalize the attention scores to probabilities.
@@ -84,7 +113,6 @@ class BertSelfAttention(nn.Module):
# # seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
-
context_layer = torch.matmul(attention_probs, v)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
diff --git a/test/functorch/common_utils.py b/test/functorch/common_utils.py
index 46555f5d51..fed53d645c 100644
--- a/test/functorch/common_utils.py
+++ b/test/functorch/common_utils.py
@@ -5,20 +5,20 @@
# LICENSE file in the root directory of this source tree.
import itertools
+import os
+import unittest
+from collections import namedtuple
+
import torch
-from functorch import vmap
import torch.utils._pytree as pytree
+from functorch import vmap
from functorch_additional_op_db import additional_op_db
-from torch.testing._internal.common_methods_invocations import DecorateInfo
-from torch.testing._internal.common_methods_invocations import op_db
-from torch.testing._internal.common_modules import module_db
-import os
-import unittest
-from torch.testing._internal.common_device_type import toleranceOverride
from torch.testing._internal.autograd_function_db import autograd_function_db
-from collections import namedtuple
+from torch.testing._internal.common_device_type import toleranceOverride
+from torch.testing._internal.common_methods_invocations import DecorateInfo, op_db
+from torch.testing._internal.common_modules import module_db
-IS_FBCODE = os.getenv('FUNCTORCH_TEST_FBCODE') == '1'
+IS_FBCODE = os.getenv("FUNCTORCH_TEST_FBCODE") == "1"
def loop(op, in_dims, out_dim, batch_size, *batched_args, **kwarg_values):
@@ -28,7 +28,10 @@ def loop(op, in_dims, out_dim, batch_size, *batched_args, **kwarg_values):
flat_args, args_spec = pytree.tree_flatten(batched_args)
flat_dims, dims_spec = pytree.tree_flatten(in_dims)
assert args_spec == dims_spec
- new_args = [a.select(in_dim, idx) if in_dim is not None else a for a, in_dim in zip(flat_args, flat_dims)]
+ new_args = [
+ a.select(in_dim, idx) if in_dim is not None else a
+ for a, in_dim in zip(flat_args, flat_dims)
+ ]
out = op(*pytree.tree_unflatten(new_args, args_spec), **kwarg_values)
flat_out, out_spec = pytree.tree_flatten(out)
outs.append(flat_out)
@@ -40,7 +43,17 @@ def loop(op, in_dims, out_dim, batch_size, *batched_args, **kwarg_values):
# Like loop helper function but for 2 levels of vmap. If we need more levels than this, probably possible
# to generalize the loops function but it seemed too complicated for this
-def loop2(op, in_dims1, in_dims2, out_dim1, out_dim2, batch_size1, batch_size2, *batched_args, **kwarg_values):
+def loop2(
+ op,
+ in_dims1,
+ in_dims2,
+ out_dim1,
+ out_dim2,
+ batch_size1,
+ batch_size2,
+ *batched_args,
+ **kwarg_values,
+):
outs = []
flat_args, args_spec = pytree.tree_flatten(batched_args)
flat_dims1, dims_spec1 = pytree.tree_flatten(in_dims1)
@@ -50,9 +63,15 @@ def loop2(op, in_dims1, in_dims2, out_dim1, out_dim2, batch_size1, batch_size2,
assert len(flat_dims1) == len(flat_dims2)
for idx1 in range(batch_size1):
out_split = []
- arg_split = [a.select(in_dim1, idx1) if in_dim1 is not None else a for a, in_dim1 in zip(flat_args, flat_dims1)]
+ arg_split = [
+ a.select(in_dim1, idx1) if in_dim1 is not None else a
+ for a, in_dim1 in zip(flat_args, flat_dims1)
+ ]
for idx2 in range(batch_size2):
- new_args = [a.select(in_dim, idx2) if in_dim is not None else a for a, in_dim in zip(arg_split, flat_dims2)]
+ new_args = [
+ a.select(in_dim, idx2) if in_dim is not None else a
+ for a, in_dim in zip(arg_split, flat_dims2)
+ ]
out = op(*pytree.tree_unflatten(new_args, args_spec), **kwarg_values)
out_split.append(out)
outs.append(out_split)
@@ -103,6 +122,7 @@ def memoize(fn):
if args not in memo:
memo[args] = fn(*args)
return memo[args]
+
return wrapped
@@ -123,10 +143,13 @@ def get_bdim_choices(num_tensors):
assert choices[-1] == (None,) * num_tensors
return tuple(choices[:-1])
+
# NB: This is O(2 ** num_tensors).
# num_tensors ranges from 1 to 10, with 2-4 being most common.
# Try not to extravagate it if you're modifying it.
-def get_bdim_choices_batch_norm(num_tensors, _, running_mean=None, running_var=None, *args):
+def get_bdim_choices_batch_norm(
+ num_tensors, _, running_mean=None, running_var=None, *args
+):
choices = []
options = (-1, None)
@@ -176,13 +199,20 @@ def construct_in_dims(bdim_choice_for_tensors, is_tensors):
def is_batch_norm_training(op_name, kwarg_values):
- batch_norm_fns = ("nn.functional.batch_norm", "nn.functional.instance_norm") # instance norm calls batch norm
+ batch_norm_fns = (
+ "nn.functional.batch_norm",
+ "nn.functional.instance_norm",
+ ) # instance norm calls batch norm
if op_name not in batch_norm_fns:
return False
# batch norm and instance norm require the value to be a plain bool
- default_training = op_name == "nn.functional.instance_norm" # instance norm defaults to training, batch norm doesn't
- is_training = tuple(arg for arg in tuple(kwarg_values.values()) if isinstance(arg, bool))
+ default_training = (
+ op_name == "nn.functional.instance_norm"
+ ) # instance norm defaults to training, batch norm doesn't
+ is_training = tuple(
+ arg for arg in tuple(kwarg_values.values()) if isinstance(arg, bool)
+ )
if len(is_training) == 0:
return default_training
else:
@@ -190,7 +220,9 @@ def is_batch_norm_training(op_name, kwarg_values):
return is_training[0]
-def generate_vmap_inputs(arg_values, kwarg_values, is_batch_norm_and_training=False, batch_size=2):
+def generate_vmap_inputs(
+ arg_values, kwarg_values, is_batch_norm_and_training=False, batch_size=2
+):
flat_args, arg_spec = pytree.tree_flatten(tuple(arg_values))
is_tensors = [isinstance(a, torch.Tensor) for a in flat_args]
num_tensors = sum(is_tensors)
@@ -198,8 +230,11 @@ def generate_vmap_inputs(arg_values, kwarg_values, is_batch_norm_and_training=Fa
# batch it since running_mean/var will be seen as unbatched tensors
if num_tensors == 1 and is_batch_norm_and_training:
return
- bdim_choices = get_bdim_choices_batch_norm(
- num_tensors, *arg_values) if is_batch_norm_and_training else get_bdim_choices(num_tensors)
+ bdim_choices = (
+ get_bdim_choices_batch_norm(num_tensors, *arg_values)
+ if is_batch_norm_and_training
+ else get_bdim_choices(num_tensors)
+ )
@memoize
def get_batched_arg(arg, bdim):
@@ -211,8 +246,10 @@ def generate_vmap_inputs(arg_values, kwarg_values, is_batch_norm_and_training=Fa
for bdim_choice in bdim_choices:
flat_in_dims = construct_in_dims(bdim_choice, is_tensors)
- flat_batched_args = tuple(arg if in_dim is None else get_batched_arg(arg, in_dim)
- for arg, in_dim in zip(flat_args, flat_in_dims))
+ flat_batched_args = tuple(
+ arg if in_dim is None else get_batched_arg(arg, in_dim)
+ for arg, in_dim in zip(flat_args, flat_in_dims)
+ )
batched_args = pytree.tree_unflatten(flat_batched_args, arg_spec)
in_dims = pytree.tree_unflatten(flat_in_dims, arg_spec)
yield batched_args, in_dims, kwarg_values
@@ -223,13 +260,19 @@ def clone_if_tensor(x):
return x.clone()
return x
+
# Helper function to compare output of `vmap` against the
# `for-loop` version.
def _compute_quantities_for_vmap_test(
- op, orig_batched_args, orig_kwarg_values, in_dims,
- out_dim, batch_size, compute_loop_out=True,
- clone_inputs=False):
-
+ op,
+ orig_batched_args,
+ orig_kwarg_values,
+ in_dims,
+ out_dim,
+ batch_size,
+ compute_loop_out=True,
+ clone_inputs=False,
+):
def maybe_clone_inputs():
if clone_inputs:
batched_args = pytree.tree_map(clone_if_tensor, orig_batched_args)
@@ -251,7 +294,9 @@ def _compute_quantities_for_vmap_test(
# t = make_fx(vmap(f, in_dims=in_dims, out_dims=out_dim))(*batched_args, **kwarg_values)
# print(in_dims, [arg.shape for arg in batched_args], kwarg_values)
batched_args, kwarg_values = maybe_clone_inputs()
- batched_out = vmap(op, in_dims=in_dims, out_dims=out_dim)(*batched_args, **kwarg_values)
+ batched_out = vmap(op, in_dims=in_dims, out_dims=out_dim)(
+ *batched_args, **kwarg_values
+ )
# Tests case where we dispatch to a batching rule with no bdims
# This should be handled by autogenerated plumbing. For vmap support
@@ -270,7 +315,9 @@ def _compute_quantities_for_vmap_test(
inner_in_dims = (0,) + pytree.tree_map(lambda x: None, in_dims)
outer_in_dims = (0,) + in_dims
batched_args, kwarg_values = maybe_clone_inputs()
- vmapvmap_output = vmap(vmap(f, inner_in_dims), outer_in_dims)(dummy, *batched_args, **kwarg_values)
+ vmapvmap_output = vmap(vmap(f, inner_in_dims), outer_in_dims)(
+ dummy, *batched_args, **kwarg_values
+ )
yield (batched_out, loop_out, vmapvmap_output, vmapvmap_expected)
@@ -278,16 +325,36 @@ def _compute_quantities_for_vmap_test(
# Function with more friendly return types
# compared to `_compute_quantities_for_vmap_test`
def compute_quantities_for_vmap_test(
- op, orig_batched_args, orig_kwarg_values, in_dims,
- out_dim=0, batch_size=2, compute_loop_out=True,
- clone_inputs=False):
- for quantities in _compute_quantities_for_vmap_test(op, orig_batched_args, orig_kwarg_values, in_dims,
- out_dim, batch_size, compute_loop_out, clone_inputs):
+ op,
+ orig_batched_args,
+ orig_kwarg_values,
+ in_dims,
+ out_dim=0,
+ batch_size=2,
+ compute_loop_out=True,
+ clone_inputs=False,
+):
+ for quantities in _compute_quantities_for_vmap_test(
+ op,
+ orig_batched_args,
+ orig_kwarg_values,
+ in_dims,
+ out_dim,
+ batch_size,
+ compute_loop_out,
+ clone_inputs,
+ ):
yield (quantities[0], quantities[1])
yield (quantities[2], quantities[3])
-def get_fallback_and_vmap_exhaustive(op, arg_values, kwarg_values, is_batch_norm_and_training=False, compute_loop_out=True):
+def get_fallback_and_vmap_exhaustive(
+ op,
+ arg_values,
+ kwarg_values,
+ is_batch_norm_and_training=False,
+ compute_loop_out=True,
+):
out_dim = 0
batch_size = 2
@@ -303,60 +370,82 @@ def get_fallback_and_vmap_exhaustive(op, arg_values, kwarg_values, is_batch_norm
# expand it based on the `out_dim` and `batch_size`.
expected_unbatched = op(*arg_values, **kwarg_values)
expected_batched = pytree.tree_map(make_batched, expected_unbatched)
- generator = generate_vmap_inputs(arg_values, kwarg_values, is_batch_norm_and_training)
+ generator = generate_vmap_inputs(
+ arg_values, kwarg_values, is_batch_norm_and_training
+ )
for batched_args, in_dims, kwarg_values in generator:
for quantities in _compute_quantities_for_vmap_test(
- op, batched_args, kwarg_values, in_dims, out_dim, batch_size,
- compute_loop_out=False):
+ op,
+ batched_args,
+ kwarg_values,
+ in_dims,
+ out_dim,
+ batch_size,
+ compute_loop_out=False,
+ ):
assert quantities[1] is None
yield (quantities[0], expected_batched)
yield (quantities[2], quantities[3])
def opinfo_in_dict(opinfo, d):
- return (opinfo.name in d) or (f'{opinfo.name}.{opinfo.variant_test_name}' in d)
+ return (opinfo.name in d) or (f"{opinfo.name}.{opinfo.variant_test_name}" in d)
-DecorateMeta = namedtuple("DecorateMeta", [
- "op_name",
- "variant_name",
- "decorator",
- "device_type",
- "dtypes",
-])
+DecorateMeta = namedtuple(
+ "DecorateMeta",
+ [
+ "op_name",
+ "variant_name",
+ "decorator",
+ "device_type",
+ "dtypes",
+ ],
+)
-def decorate(op_name, variant_name='', *, decorator=None, device_type=None, dtypes=None):
+def decorate(
+ op_name, variant_name="", *, decorator=None, device_type=None, dtypes=None
+):
assert decorator is not None
- return DecorateMeta(op_name=op_name,
- variant_name=variant_name,
- decorator=decorator,
- device_type=device_type,
- dtypes=dtypes)
-
-
-def xfail(op_name, variant_name='', *, device_type=None, dtypes=None):
- return decorate(op_name=op_name,
- variant_name=variant_name,
- decorator=unittest.expectedFailure,
- device_type=device_type,
- dtypes=dtypes)
-
-
-def skip(op_name, variant_name='', *, device_type=None, dtypes=None):
- return decorate(op_name=op_name,
- variant_name=variant_name,
- decorator=unittest.skip("Skipped!"),
- device_type=device_type,
- dtypes=dtypes)
+ return DecorateMeta(
+ op_name=op_name,
+ variant_name=variant_name,
+ decorator=decorator,
+ device_type=device_type,
+ dtypes=dtypes,
+ )
+
+
+def xfail(op_name, variant_name="", *, device_type=None, dtypes=None):
+ return decorate(
+ op_name=op_name,
+ variant_name=variant_name,
+ decorator=unittest.expectedFailure,
+ device_type=device_type,
+ dtypes=dtypes,
+ )
+
+
+def skip(op_name, variant_name="", *, device_type=None, dtypes=None):
+ return decorate(
+ op_name=op_name,
+ variant_name=variant_name,
+ decorator=unittest.skip("Skipped!"),
+ device_type=device_type,
+ dtypes=dtypes,
+ )
def skipOps(test_case_name, base_test_name, to_skip):
all_opinfos = op_db + additional_op_db + autograd_function_db
for decorate_meta in to_skip:
- matching_opinfos = [o for o in all_opinfos
- if o.name == decorate_meta.op_name and
- o.variant_test_name == decorate_meta.variant_name]
+ matching_opinfos = [
+ o
+ for o in all_opinfos
+ if o.name == decorate_meta.op_name
+ and o.variant_test_name == decorate_meta.variant_name
+ ]
assert len(matching_opinfos) > 0, f"Couldn't find OpInfo for {decorate_meta}"
assert len(matching_opinfos) == 1, (
"OpInfos should be uniquely determined by their (name, variant_name). "
@@ -364,39 +453,55 @@ def skipOps(test_case_name, base_test_name, to_skip):
)
opinfo = matching_opinfos[0]
decorators = list(opinfo.decorators)
- new_decorator = DecorateInfo(decorate_meta.decorator,
- test_case_name, base_test_name,
- device_type=decorate_meta.device_type,
- dtypes=decorate_meta.dtypes)
+ new_decorator = DecorateInfo(
+ decorate_meta.decorator,
+ test_case_name,
+ base_test_name,
+ device_type=decorate_meta.device_type,
+ dtypes=decorate_meta.dtypes,
+ )
decorators.append(new_decorator)
opinfo.decorators = tuple(decorators)
# This decorator doesn't modify fn in any way
def wrapped(fn):
return fn
+
return wrapped
def decorateForModules(decorator, module_classes, device_type=None, dtypes=None):
-
# This decorator doesn't modify fn in any way
- def wrapped(fn, module_classes=module_classes, decorator=decorator,
- device_type=device_type, dtypes=dtypes):
- name_parts = fn.__qualname__.split('.')
- assert len(name_parts) == 2, "Decorator only applies to a test function of a test class"
+ def wrapped(
+ fn,
+ module_classes=module_classes,
+ decorator=decorator,
+ device_type=device_type,
+ dtypes=dtypes,
+ ):
+ name_parts = fn.__qualname__.split(".")
+ assert (
+ len(name_parts) == 2
+ ), "Decorator only applies to a test function of a test class"
test_case_name, base_test_name = name_parts
for module_cls in module_classes:
matching_module_infos = [m for m in module_db if m.module_cls == module_cls]
- assert len(matching_module_infos) == 1, f"Couldn't find single ModuleInfo for {module_cls}"
+ assert (
+ len(matching_module_infos) == 1
+ ), f"Couldn't find single ModuleInfo for {module_cls}"
module_info = matching_module_infos[0]
decorators = list(module_info.decorators)
- new_decorator = DecorateInfo(decorator,
- test_case_name, base_test_name,
- device_type=device_type,
- dtypes=dtypes)
+ new_decorator = DecorateInfo(
+ decorator,
+ test_case_name,
+ base_test_name,
+ device_type=device_type,
+ dtypes=dtypes,
+ )
decorators.append(new_decorator)
module_info.decorators = tuple(decorators)
return fn
+
return wrapped
@@ -405,6 +510,7 @@ def expectedFailureIf(condition):
if condition:
return unittest.expectedFailure(fn)
return fn
+
return decorator
@@ -413,26 +519,35 @@ def tol2(op_name, variant_name, override_dct, *, device_type=None):
def tol1(op_name, override_dct, *, device_type=None):
- return tol2(op_name, '', override_dct, device_type=device_type)
+ return tol2(op_name, "", override_dct, device_type=device_type)
def opsToleranceOverride(test_case_name, base_test_name, overrides):
all_opinfos = op_db + additional_op_db
for override in overrides:
op_name, variant_name, override, device_type = override
- matching_opinfos = [o for o in all_opinfos
- if o.name == op_name and o.variant_test_name == variant_name]
+ matching_opinfos = [
+ o
+ for o in all_opinfos
+ if o.name == op_name and o.variant_test_name == variant_name
+ ]
assert len(matching_opinfos) == 1, f"Couldn't find OpInfo for {override}"
opinfo = matching_opinfos[0]
decorators = list(opinfo.decorators)
- decorators.append(DecorateInfo(
- toleranceOverride(override),
- test_case_name, base_test_name, device_type=device_type))
+ decorators.append(
+ DecorateInfo(
+ toleranceOverride(override),
+ test_case_name,
+ base_test_name,
+ device_type=device_type,
+ )
+ )
opinfo.decorators = tuple(decorators)
# This decorator doesn't modify fn in any way
def wrapped(fn):
return fn
+
return wrapped
diff --git a/test/functorch/discover_coverage.py b/test/functorch/discover_coverage.py
index 80cacddec6..bbe3922b62 100644
--- a/test/functorch/discover_coverage.py
+++ b/test/functorch/discover_coverage.py
@@ -1,83 +1,86 @@
-import torch
import copy
-from torch.testing._internal.common_methods_invocations import op_db
-from functorch_additional_op_db import additional_op_db
-from enum import Enum
-import torch._functorch.top_operators_github_usage as top_ops
+import enum
import pprint
import unittest
-import enum
-from torch.testing._internal.common_device_type import toleranceOverride
+from enum import Enum
# Importing these files make modifications to the op_db that we need
import test_ops # noqa: F401
import test_vmap # noqa: F401
+import torch
+import torch._functorch.top_operators_github_usage as top_ops
+from functorch_additional_op_db import additional_op_db
+from torch.testing._internal.common_device_type import toleranceOverride
+from torch.testing._internal.common_methods_invocations import op_db
all_overridable = list(torch.overrides.get_testing_overrides().keys())
public_docs = [
- (torch.nn.functional, 'torch.nn.functional', 'docs/source/nn.functional.rst'),
- (torch.fft, 'torch.fft', 'docs/source/fft.rst'),
- (torch.special, 'torch.special', 'docs/source/special.rst'),
- (torch.linalg, 'torch.linalg', 'docs/source/linalg.rst'),
- (torch, 'torch', 'docs/source/torch.rst'),
- (torch.Tensor, 'torch.Tensor', 'docs/source/tensors.rst'),
+ (torch.nn.functional, "torch.nn.functional", "docs/source/nn.functional.rst"),
+ (torch.fft, "torch.fft", "docs/source/fft.rst"),
+ (torch.special, "torch.special", "docs/source/special.rst"),
+ (torch.linalg, "torch.linalg", "docs/source/linalg.rst"),
+ (torch, "torch", "docs/source/torch.rst"),
+ (torch.Tensor, "torch.Tensor", "docs/source/tensors.rst"),
]
# torch.abs, Tensor.abs, Tensor.abs_ are all considered to be different
-def get_public_overridable_apis(pytorch_root='/raid/rzou/pt/debug-cpu'):
+def get_public_overridable_apis(pytorch_root="/raid/rzou/pt/debug-cpu"):
results = {}
all_overridable_apis = set(torch.overrides.get_testing_overrides().keys())
for module, module_name, src in public_docs:
- with open(f'{pytorch_root}/{src}') as f:
+ with open(f"{pytorch_root}/{src}") as f:
lines = f.readlines()
# APIs eitehr begin with 4 spaces or ".. autofunction::"
- api_lines1 = [line.strip() for line in lines if line.startswith(' ' * 4)]
- api_lines2 = [line.strip()[len('.. autofunction:: '):]
- for line in lines if line.startswith('.. autofunction::')]
+ api_lines1 = [line.strip() for line in lines if line.startswith(" " * 4)]
+ api_lines2 = [
+ line.strip()[len(".. autofunction:: ") :]
+ for line in lines
+ if line.startswith(".. autofunction::")
+ ]
lines = api_lines1 + api_lines2
- lines = [line[7:] if line.startswith('Tensor.') else line for line in lines]
+ lines = [line[7:] if line.startswith("Tensor.") else line for line in lines]
lines = [line for line in lines if hasattr(module, line)]
for line in lines:
api = getattr(module, line)
if api in all_overridable_apis:
- results[f'{module_name}.{line}'] = api
+ results[f"{module_name}.{line}"] = api
return results
denylist = {
- 'torch.Tensor.data_ptr',
- 'torch.Tensor.dim',
- 'torch.Tensor.element_size',
- 'torch.Tensor.backward',
- 'torch.Tensor.as_strided',
- 'torch.Tensor.register_hook',
- 'torch.Tensor.record_stream',
- 'torch.Tensor.qscheme',
- 'torch.Tensor.ndimension',
- 'torch.Tensor.smm',
- 'torch.Tensor.sspaddmm',
- 'torch.Tensor.retain_grad',
- 'torch.Tensor.sparse_mask',
- 'torch.Tensor.sparse_dim',
- 'torch.Tensor.dense_dim',
- 'torch.Tensor.values',
- 'torch.Tensor.indices',
- 'torch.Tensor.numel',
- 'torch.Tensor.size',
- 'torch.Tensor.nelement',
- 'torch.Tensor.q_scale',
- 'torch.Tensor.q_zero_point',
- 'torch.Tensor.q_per_channel_scales',
- 'torch.Tensor.q_per_channel_zero_points',
- 'torch.Tensor.q_per_channel_axis',
- 'torch.Tensor.int_repr',
- 'torch.Tensor.to_sparse',
- 'torch.Tensor.is_inference',
- 'torch.Tensor.storage',
- 'torch.Tensor.storage_type',
+ "torch.Tensor.data_ptr",
+ "torch.Tensor.dim",
+ "torch.Tensor.element_size",
+ "torch.Tensor.backward",
+ "torch.Tensor.as_strided",
+ "torch.Tensor.register_hook",
+ "torch.Tensor.record_stream",
+ "torch.Tensor.qscheme",
+ "torch.Tensor.ndimension",
+ "torch.Tensor.smm",
+ "torch.Tensor.sspaddmm",
+ "torch.Tensor.retain_grad",
+ "torch.Tensor.sparse_mask",
+ "torch.Tensor.sparse_dim",
+ "torch.Tensor.dense_dim",
+ "torch.Tensor.values",
+ "torch.Tensor.indices",
+ "torch.Tensor.numel",
+ "torch.Tensor.size",
+ "torch.Tensor.nelement",
+ "torch.Tensor.q_scale",
+ "torch.Tensor.q_zero_point",
+ "torch.Tensor.q_per_channel_scales",
+ "torch.Tensor.q_per_channel_zero_points",
+ "torch.Tensor.q_per_channel_axis",
+ "torch.Tensor.int_repr",
+ "torch.Tensor.to_sparse",
+ "torch.Tensor.is_inference",
+ "torch.Tensor.storage",
+ "torch.Tensor.storage_type",
}
@@ -85,18 +88,19 @@ def get_method_only_ops_we_care_about():
apis = get_public_overridable_apis()
result = []
for key in apis.keys():
- if not key.startswith('torch.Tensor'):
+ if not key.startswith("torch.Tensor"):
continue
if key in denylist:
continue
- api = key.split('.')[2]
+ api = key.split(".")[2]
# filter out in-place
- if api.endswith('_'):
+ if api.endswith("_"):
continue
- if f'torch.{api}' not in apis.keys():
+ if f"torch.{api}" not in apis.keys():
result.append(api)
return result
+
# Deduplicates torch.abs and Tensor.abs
@@ -104,10 +108,10 @@ def get_public_overridable_ops():
results = get_public_overridable_apis()
cpy = copy.deepcopy(results)
for key in cpy.keys():
- if not key.startswith('torch.Tensor'):
+ if not key.startswith("torch.Tensor"):
continue
- api = key.split('.')[2]
- if f'torch.{api}' in results.keys():
+ api = key.split(".")[2]
+ if f"torch.{api}" in results.keys():
del results[key]
return results
@@ -117,7 +121,7 @@ def get_public_overridable_outplace_ops():
cpy = copy.deepcopy(results)
for key in cpy.keys():
# NB: there are no dunder methods bcs we don't document those
- if key.endswith('_'):
+ if key.endswith("_"):
del results[key]
return results
@@ -127,22 +131,23 @@ def get_public_overridable_outplace_we_care_about():
cpy = copy.deepcopy(results)
for key in cpy.keys():
# quantization
- if 'quant' in key or '.q_' in key:
+ if "quant" in key or ".q_" in key:
del results[key]
# is_cpu, etc. It doesn't make sense to have OpInfos for these
- if '.is_' in key:
+ if ".is_" in key:
del results[key]
if key in denylist and key in results:
del results[key]
return results
+
# e.g. nn.functional.softmax
def get_op(dotted_name):
- names = dotted_name.split('.')
+ names = dotted_name.split(".")
mod = torch
for name in names:
if not hasattr(mod, name):
@@ -150,6 +155,7 @@ def get_op(dotted_name):
mod = getattr(mod, name)
return mod
+
# Maps function -> [OpInfo]
@@ -176,56 +182,92 @@ def get_ops_covered_by_opinfos():
factory_fns = {
- 'tensor', 'zeros', 'ones', 'randn', 'arange', 'rand', 'empty', 'randperm',
- 'linspace', 'logspace', 'hann_window', 'full', 'eye', 'blackman_window',
- 'bartlett_window', 'randint', 'range',
+ "tensor",
+ "zeros",
+ "ones",
+ "randn",
+ "arange",
+ "rand",
+ "empty",
+ "randperm",
+ "linspace",
+ "logspace",
+ "hann_window",
+ "full",
+ "eye",
+ "blackman_window",
+ "bartlett_window",
+ "randint",
+ "range",
}
def get_top_ops(torch_threshold, nn_fn_threshold, with_counts=False):
- denylist = set({
- # These are either not real "operators", factory functions
- # that trivially work, or not-documented ops.
- 'load', 'no_grad', 'save', 'from_numpy',
- 'manual_seed', 'set_grad_enabled',
- 'set_default_tensor_type', 'set_num_threads',
- 'set_printoptions', 'numel',
- 'set_default_dtype', 'sparse_coo_tensor', 'set_rng_state',
- 'get_rng_state', 'get_default_dtype', 'initial_seed',
- 'get_num_threads', 'quantize_per_tensor',
- 'hann_window', 'is_tensor', 'as_tensor',
- 'equal', 'enable_grad', 'seed', 'is_storage',
- 'is_floating_point', 'nn.functional.torch',
- 'set_flush_denormal', 'set_num_interop_threads', 'dequantize',
- 'get_num_interop_threads', 'nn.functional.math',
- 'nn.functional.threshold_',
- 'nn.functional.selu_',
- 'nn.functional.elu_',
- 'nn.functional.rrelu_',
- 'nn.functional.leaky_relu_',
- 'nn.functional.hardtanh_',
- 'nn.functional.has_torch_function',
- 'nn.functional.has_torch_function_unary',
- 'nn.functional.has_torch_function_variadic',
- 'nn.functional.handle_torch_function',
- 'nn.functional.adaptive_max_pool1d_with_indices',
- 'nn.functional.adaptive_max_pool2d_with_indices',
- 'nn.functional.adaptive_max_pool3d_with_indices',
- 'nn.functional.fractional_max_pool2d_with_indices',
- 'nn.functional.fractional_max_pool3d_with_indices',
- 'is_complex',
- 'grad',
- 'quantize_per_channel',
- 'nn.functional.max_pool2d_with_indices',
- 'nn.functional.max_pool3d_with_indices',
- 'nn.functional.max_pool1d_with_indices',
- 'nn.functional.celu_',
- 'nn.functional.grad',
- 'nn.functional.relu_',
- 'nn.functional.boolean_dispatch',
- 'nn.functional.assert_int_or_pair',
- 'fft', # is namespace
- })
+ denylist = set(
+ {
+ # These are either not real "operators", factory functions
+ # that trivially work, or not-documented ops.
+ "load",
+ "no_grad",
+ "save",
+ "from_numpy",
+ "manual_seed",
+ "set_grad_enabled",
+ "set_default_tensor_type",
+ "set_num_threads",
+ "set_printoptions",
+ "numel",
+ "set_default_dtype",
+ "sparse_coo_tensor",
+ "set_rng_state",
+ "get_rng_state",
+ "get_default_dtype",
+ "initial_seed",
+ "get_num_threads",
+ "quantize_per_tensor",
+ "hann_window",
+ "is_tensor",
+ "as_tensor",
+ "equal",
+ "enable_grad",
+ "seed",
+ "is_storage",
+ "is_floating_point",
+ "nn.functional.torch",
+ "set_flush_denormal",
+ "set_num_interop_threads",
+ "dequantize",
+ "get_num_interop_threads",
+ "nn.functional.math",
+ "nn.functional.threshold_",
+ "nn.functional.selu_",
+ "nn.functional.elu_",
+ "nn.functional.rrelu_",
+ "nn.functional.leaky_relu_",
+ "nn.functional.hardtanh_",
+ "nn.functional.has_torch_function",
+ "nn.functional.has_torch_function_unary",
+ "nn.functional.has_torch_function_variadic",
+ "nn.functional.handle_torch_function",
+ "nn.functional.adaptive_max_pool1d_with_indices",
+ "nn.functional.adaptive_max_pool2d_with_indices",
+ "nn.functional.adaptive_max_pool3d_with_indices",
+ "nn.functional.fractional_max_pool2d_with_indices",
+ "nn.functional.fractional_max_pool3d_with_indices",
+ "is_complex",
+ "grad",
+ "quantize_per_channel",
+ "nn.functional.max_pool2d_with_indices",
+ "nn.functional.max_pool3d_with_indices",
+ "nn.functional.max_pool1d_with_indices",
+ "nn.functional.celu_",
+ "nn.functional.grad",
+ "nn.functional.relu_",
+ "nn.functional.boolean_dispatch",
+ "nn.functional.assert_int_or_pair",
+ "fft", # is namespace
+ }
+ )
torch_ops = top_ops.top_torch
nn_fn_ops = top_ops.get_nn_functional_top_list()
@@ -246,7 +288,7 @@ def get_ops_percentage(torch_threshold, nn_fn_threshold):
def get_num_usages(opname):
# Ignore this, this is heavily inflated
- if opname == 't':
+ if opname == "t":
return 0
result = [op[1] for op in data if op[0] == opname]
assert len(result) == 1
@@ -296,13 +338,13 @@ class Status(Enum):
tests = {
- 'test_vmap_exhaustive',
- 'test_op_has_batch_rule',
- 'test_vjp',
- 'test_vmapvjp',
- 'test_vmapvjp_has_batch_rule',
- 'test_jvp',
- 'test_vmapjvp',
+ "test_vmap_exhaustive",
+ "test_op_has_batch_rule",
+ "test_vjp",
+ "test_vmapvjp",
+ "test_vmapvjp_has_batch_rule",
+ "test_jvp",
+ "test_vmapjvp",
}
@@ -336,7 +378,7 @@ def get_skipped_or_xfailed_ops_for(test_name):
opinfos = op_to_opinfo[op]
for opinfo in opinfos:
for decorator in opinfo.decorators:
- if not hasattr(decorator, 'test_name'):
+ if not hasattr(decorator, "test_name"):
continue
if decorator.test_name != test_name:
continue
@@ -363,7 +405,7 @@ def get_statuses(for_subset=None, invert=False):
result = copy.deepcopy(tests)
for opinfo in opinfos:
for decorator in opinfo.decorators:
- if not hasattr(decorator, 'test_name'):
+ if not hasattr(decorator, "test_name"):
continue
if decorator.test_name in tests and decorator.test_name in result:
result.remove(decorator.test_name)
@@ -404,7 +446,9 @@ overridable_outplace_ops = get_public_overridable_outplace_ops()
overridable_outplace_we_care_about = get_public_overridable_outplace_we_care_about()
tested_overridable_outplace_ops = get_covered_ops(overridable_outplace_we_care_about)
-untested_overridable_outplace_ops = get_covered_ops(overridable_outplace_we_care_about, invert=True)
+untested_overridable_outplace_ops = get_covered_ops(
+ overridable_outplace_we_care_about, invert=True
+)
# print("List of OpInfos we need:")
# for key in untested_overridable_outplace_ops.keys():
@@ -412,15 +456,19 @@ untested_overridable_outplace_ops = get_covered_ops(overridable_outplace_we_care
# print("-" * 80)
# print("")
-print(f'Overridable public APIs: {len(overridable_apis)}')
-print(f'Overridable public ops: {len(overridable_ops)}')
-print(f'Overridable public outplace ops: {len(overridable_outplace_ops)}')
-print(f'Overridable public outplace ops we care about: {len(overridable_outplace_we_care_about)}')
-print(f'OpInfo-tested overridable public outplace ops: {len(tested_overridable_outplace_ops)}')
+print(f"Overridable public APIs: {len(overridable_apis)}")
+print(f"Overridable public ops: {len(overridable_ops)}")
+print(f"Overridable public outplace ops: {len(overridable_outplace_ops)}")
+print(
+ f"Overridable public outplace ops we care about: {len(overridable_outplace_we_care_about)}"
+)
+print(
+ f"OpInfo-tested overridable public outplace ops: {len(tested_overridable_outplace_ops)}"
+)
def remove_torch(name):
- assert name[:6] == 'torch.'
+ assert name[:6] == "torch."
return name[6:]
@@ -430,19 +478,19 @@ def get_list_of_all_tests():
mytest = {
- 'test_vmap_exhaustive',
- 'test_op_has_batch_rule',
- 'test_vjp',
- 'test_vmapvjp',
- 'test_vmapvjp_has_batch_rule',
+ "test_vmap_exhaustive",
+ "test_op_has_batch_rule",
+ "test_vjp",
+ "test_vmapvjp",
+ "test_vmapvjp_has_batch_rule",
}
-print('*' * 80)
+print("*" * 80)
all_tests = get_list_of_all_tests()
for test in mytest:
result = get_skipped_or_xfailed_ops_for(test)
diff = len(all_tests - result)
- print(f'{test}: {diff}')
+ print(f"{test}: {diff}")
def get_jvp_coverage(subset=None):
@@ -452,33 +500,42 @@ def get_jvp_coverage(subset=None):
op_to_opinfo = get_ops_covered_by_opinfos()
ops_dct = tested_overridable_outplace_ops
if subset is not None:
- ops_dct = {name: op for name, op in ops_dct.items()
- if remove_torch(name) in subset}
- supports_autograd_ops_dct = {name: op_to_opinfo[fn] for name, fn in ops_dct.items()
- if op_to_opinfo[fn][0].supports_autograd}
- supports_forwardad_ops_dct = {name: op_to_opinfo[fn] for name, fn in ops_dct.items()
- if op_to_opinfo[fn][0].supports_forward_ad}
+ ops_dct = {
+ name: op for name, op in ops_dct.items() if remove_torch(name) in subset
+ }
+ supports_autograd_ops_dct = {
+ name: op_to_opinfo[fn]
+ for name, fn in ops_dct.items()
+ if op_to_opinfo[fn][0].supports_autograd
+ }
+ supports_forwardad_ops_dct = {
+ name: op_to_opinfo[fn]
+ for name, fn in ops_dct.items()
+ if op_to_opinfo[fn][0].supports_forward_ad
+ }
ops = {remove_torch(test) for test in list(ops_dct.keys())}
- supports_autograd = {remove_torch(test)
- for test in list(supports_autograd_ops_dct.keys())}
- supports_forward_ad = {remove_torch(test)
- for test in list(supports_forwardad_ops_dct.keys())}
+ supports_autograd = {
+ remove_torch(test) for test in list(supports_autograd_ops_dct.keys())
+ }
+ supports_forward_ad = {
+ remove_torch(test) for test in list(supports_forwardad_ops_dct.keys())
+ }
assert supports_forward_ad.issubset(supports_autograd)
assert supports_autograd.issubset(ops)
- failed_ops = get_skipped_or_xfailed_ops_for('test_jvp')
+ failed_ops = get_skipped_or_xfailed_ops_for("test_jvp")
coverage = len(supports_forward_ad - failed_ops)
no_forward_ad = len(supports_autograd) - len(supports_forward_ad)
- print(f'test_jvp, {coverage}, {no_forward_ad}, {len(ops)}')
+ print(f"test_jvp, {coverage}, {no_forward_ad}, {len(ops)}")
get_jvp_coverage()
get_jvp_coverage(get_top_ops(100, 25))
for op in get_top_ops(100, 25):
print(op)
-print('*' * 80)
+print("*" * 80)
# result = get_skipped_or_xfailed_ops_for('test_vmap_exhaustive')
# result = get_skipped_or_xfailed_ops_for('test_op_has_batch_rule')
@@ -489,16 +546,16 @@ print('*' * 80)
statuses = transpose_statuses()
for test in tests:
- print(f'{test} coverage {len(statuses[test])}')
+ print(f"{test} coverage {len(statuses[test])}")
method_only_ops = get_method_only_ops_we_care_about()
# for op in method_only_ops:
# print(f' {op},')
top_ops_not_covered_by_opinfo = get_top_ops_not_covered_by_opinfo(100, 25)
-print('=' * 80)
+print("=" * 80)
for op in top_ops_not_covered_by_opinfo:
- print(f'{op}, {top_ops.usage_count[op]}')
+ print(f"{op}, {top_ops.usage_count[op]}")
# print("top ops not covered by opinfo: ")
# top_ops_not_covered_by_opinfo = get_top_ops_not_covered_by_opinfo(200, 50)
@@ -523,58 +580,60 @@ def remove_from_set(parent, to_remove):
def print_coverage_info(th=100, nn=25):
- print('=' * 80)
+ print("=" * 80)
print(f"top {th}, {nn} coverage")
statuses = transpose_statuses(get_top_ops(th, nn), invert=True)
top_ops_not_covered_by_opinfo = get_top_ops_not_covered_by_opinfo(th, nn)
# testing problems
exemptions = {
- 'torch.nn.functional.dropout', # randomness
+ "torch.nn.functional.dropout", # randomness
}
# Allowed exemptions
vmap_exemptions = {
- 'torch.randn_like', # randomness
- 'torch.rand_like', # randomness
- 'torch.allclose', # number output
- 'torch.unique', # dynamic
- 'torch.nonzero', # dynamic
- 'torch.masked_select', # dynamic
- 'torch.prod', # dynamic (backward)
- 'torch.norm', # norm with nuc is not commonly used; we support the other cases.
- 'torch.svd', # There isn't a bug, it is just nondeterministic so we can't test it.
- 'torch.nn.functional.embedding', # We support everything except the sparse option.
+ "torch.randn_like", # randomness
+ "torch.rand_like", # randomness
+ "torch.allclose", # number output
+ "torch.unique", # dynamic
+ "torch.nonzero", # dynamic
+ "torch.masked_select", # dynamic
+ "torch.prod", # dynamic (backward)
+ "torch.norm", # norm with nuc is not commonly used; we support the other cases.
+ "torch.svd", # There isn't a bug, it is just nondeterministic so we can't test it.
+ "torch.nn.functional.embedding", # We support everything except the sparse option.
}
- remove_from_set(statuses['test_vmap_exhaustive'], vmap_exemptions)
- remove_from_set(statuses['test_vmapvjp'], vmap_exemptions)
- remove_from_set(statuses['test_vmapvjp_has_batch_rule'], vmap_exemptions)
- remove_from_set(statuses['test_op_has_batch_rule'], vmap_exemptions)
- remove_from_set(statuses['test_vmapjvp'], vmap_exemptions)
+ remove_from_set(statuses["test_vmap_exhaustive"], vmap_exemptions)
+ remove_from_set(statuses["test_vmapvjp"], vmap_exemptions)
+ remove_from_set(statuses["test_vmapvjp_has_batch_rule"], vmap_exemptions)
+ remove_from_set(statuses["test_op_has_batch_rule"], vmap_exemptions)
+ remove_from_set(statuses["test_vmapjvp"], vmap_exemptions)
for test in tests:
remove_from_set(statuses[test], exemptions)
print(f"total ops in set: {th + nn}")
print(f"tested by OpInfo: {th + nn - len(top_ops_not_covered_by_opinfo)}")
for test in tests:
- if test in {'test_jvp', 'test_vmapjvp'}:
+ if test in {"test_jvp", "test_vmapjvp"}:
continue
- print(f'{test} failing coverage {len(statuses[test])}')
+ print(f"{test} failing coverage {len(statuses[test])}")
# We don't care about these yet
- del statuses['test_jvp']
- del statuses['test_vmapjvp']
+ del statuses["test_jvp"]
+ del statuses["test_vmapjvp"]
pprint.pprint(statuses)
def get_name_to_opinfo_map():
dct = {}
- for op in (op_db + additional_op_db):
+ for op in op_db + additional_op_db:
+
def add(name, op):
if name not in dct:
dct[name] = []
dct[name].append(op)
+
add(op.name, op)
for alias in op.aliases:
add(alias.name, op)
@@ -591,42 +650,54 @@ class Support(enum.Enum):
FACTORY_FNS = {
- 'tensor', 'zeros', 'ones', 'randn', 'arange', 'rand', 'empty', 'range',
- 'full', 'randperm', 'eye', 'randint', 'linspace', 'logspace',
+ "tensor",
+ "zeros",
+ "ones",
+ "randn",
+ "arange",
+ "rand",
+ "empty",
+ "range",
+ "full",
+ "randperm",
+ "eye",
+ "randint",
+ "linspace",
+ "logspace",
}
VJP_EXEMPTIONS = {
- 'nn.functional.dropout', # not actually problem, randomness testing artifact
- 'nn.functional.dropout2d', # not actually problem, randomness testing artifact
- 'nn.functional.rrelu', # not actually problem, randomness testing artifact
- 'bernoulli', # not actually problem, randomness testing artifact
- 'normal', # not actually problem, randomness testing artifact
+ "nn.functional.dropout", # not actually problem, randomness testing artifact
+ "nn.functional.dropout2d", # not actually problem, randomness testing artifact
+ "nn.functional.rrelu", # not actually problem, randomness testing artifact
+ "bernoulli", # not actually problem, randomness testing artifact
+ "normal", # not actually problem, randomness testing artifact
}
VMAP_EXEMPTIONS = {
- 'randn_like', # randomness
- 'rand_like', # randomness
- 'allclose', # number output
- 'unique', # dynamic
- 'nonzero', # dynamic
- 'masked_select', # dynamic
- 'prod', # dynamic (backward)
- 'norm', # norm with nuc is not commonly used; we support the other cases.
- 'svd', # There isn't a bug, it is just nondeterministic so we can't test it.
- 'nn.functional.embedding', # We support everything except the sparse option.
- 'nn.functional.dropout', # randomness
- 'nn.functional.dropout2d', # randomness
- 'bernoulli', # randomness
- 'multinomial', # randomness
- 'normal', # randomness
+ "randn_like", # randomness
+ "rand_like", # randomness
+ "allclose", # number output
+ "unique", # dynamic
+ "nonzero", # dynamic
+ "masked_select", # dynamic
+ "prod", # dynamic (backward)
+ "norm", # norm with nuc is not commonly used; we support the other cases.
+ "svd", # There isn't a bug, it is just nondeterministic so we can't test it.
+ "nn.functional.embedding", # We support everything except the sparse option.
+ "nn.functional.dropout", # randomness
+ "nn.functional.dropout2d", # randomness
+ "bernoulli", # randomness
+ "multinomial", # randomness
+ "normal", # randomness
}
JVP_EXEMPTIONS = {
- 'nn.functional.dropout', # not actually problem, randomness testing artifact
- 'nn.functional.dropout2d', # not actually problem, randomness testing artifact
- 'nn.functional.rrelu', # not actually problem, randomness testing artifact
- 'normal', # not actually problem, randomness testing artifact
- 'bernoulli', # not actually problem, randomness testing artifact
+ "nn.functional.dropout", # not actually problem, randomness testing artifact
+ "nn.functional.dropout2d", # not actually problem, randomness testing artifact
+ "nn.functional.rrelu", # not actually problem, randomness testing artifact
+ "normal", # not actually problem, randomness testing artifact
+ "bernoulli", # not actually problem, randomness testing artifact
}
@@ -651,7 +722,7 @@ class Operator:
return Support.UNKNOWN
for opinfo in self.opinfos:
for decorator in opinfo.decorators:
- if not hasattr(decorator, 'test_name'):
+ if not hasattr(decorator, "test_name"):
continue
if decorator.test_name != test_name:
continue
@@ -674,35 +745,35 @@ class Operator:
return Support.YES
if self.name in VJP_EXEMPTIONS:
return Support.YES
- return self.no_opinfos_skip_test('test_vjp')
+ return self.no_opinfos_skip_test("test_vjp")
def supports_vmap(self):
if self.name in FACTORY_FNS:
return Support.YES
if self.name in VMAP_EXEMPTIONS:
return Support.YES
- return self.no_opinfos_skip_test('test_vmap_exhaustive')
+ return self.no_opinfos_skip_test("test_vmap_exhaustive")
def supports_fast_vmap(self):
if self.name in FACTORY_FNS:
return Support.YES
if self.name in VMAP_EXEMPTIONS:
return Support.YES
- return self.no_opinfos_skip_test('test_op_has_batch_rule')
+ return self.no_opinfos_skip_test("test_op_has_batch_rule")
def supports_vmapvjp(self):
if self.name in FACTORY_FNS:
return Support.YES
if self.name in VMAP_EXEMPTIONS:
return Support.YES
- return self.no_opinfos_skip_test('test_vmapvjp')
+ return self.no_opinfos_skip_test("test_vmapvjp")
def supports_fast_vmapvjp(self):
if self.name in FACTORY_FNS:
return Support.YES
if self.name in VMAP_EXEMPTIONS:
return Support.YES
- return self.no_opinfos_skip_test('test_vmapvjp_has_batch_rule')
+ return self.no_opinfos_skip_test("test_vmapvjp_has_batch_rule")
def supports_jvp(self):
if self.name in FACTORY_FNS:
@@ -711,56 +782,58 @@ class Operator:
return Support.YES
if not self.has_opinfo():
return Support.UNKNOWN
- if self.any_opinfo_attr('supports_autograd') and \
- not self.all_opinfo_attr('supports_forward_ad'):
+ if self.any_opinfo_attr("supports_autograd") and not self.all_opinfo_attr(
+ "supports_forward_ad"
+ ):
return Support.NO
- return self.no_opinfos_skip_test('test_jvp')
+ return self.no_opinfos_skip_test("test_jvp")
def supports_jvpvjp(self):
if self.name in FACTORY_FNS:
return Support.YES
exemptions = {
# we have support (see OpInfo), testing artifact
- 'nn.functional.dropout2d',
- 'nn.functional.dropout',
+ "nn.functional.dropout2d",
+ "nn.functional.dropout",
# exception: we dont even support double backward for this
- 'nn.functional.hardswish',
- 'bernoulli', # this isn't differentiable
- 'normal', # not differentiable
+ "nn.functional.hardswish",
+ "bernoulli", # this isn't differentiable
+ "normal", # not differentiable
}
if self.name in exemptions:
return Support.YES
- return self.no_opinfos_skip_test('test_jvpvjp')
+ return self.no_opinfos_skip_test("test_jvpvjp")
def _supports_vmapjvp_base(self, test):
if self.name in FACTORY_FNS:
return Support.YES
VMAPJVP_EXEMPTIONS = {
- 'prod', # dynamic (backward)
- 'nn.functional.batch_norm', # testing problem
- 'normal', # not actually problem, randomness testing artifact
- 'bernoulli', # not actually problem, randomness testing artifact
- 'nn.functional.dropout2d', # not actually problem, randomness testing artifact
- 'nn.functional.dropout', # not actually problem, randomness testing artifact
+ "prod", # dynamic (backward)
+ "nn.functional.batch_norm", # testing problem
+ "normal", # not actually problem, randomness testing artifact
+ "bernoulli", # not actually problem, randomness testing artifact
+ "nn.functional.dropout2d", # not actually problem, randomness testing artifact
+ "nn.functional.dropout", # not actually problem, randomness testing artifact
# Not a problem.
# It's just that the max_norm testing mutates inputs...
# (we have our own functorch variant of the OpInfo without max_norm)
- 'nn.functional.embedding',
+ "nn.functional.embedding",
}
if self.name in VMAPJVP_EXEMPTIONS:
return Support.YES
if not self.has_opinfo():
return Support.UNKNOWN
- if self.any_opinfo_attr('supports_autograd') and \
- not self.all_opinfo_attr('supports_forward_ad'):
+ if self.any_opinfo_attr("supports_autograd") and not self.all_opinfo_attr(
+ "supports_forward_ad"
+ ):
return Support.NO
return self.no_opinfos_skip_test(test)
def supports_vmapjvp(self):
- return self._supports_vmapjvp_base('test_vmapjvpall')
+ return self._supports_vmapjvp_base("test_vmapjvpall")
def supports_fast_vmapjvp(self):
- return self._supports_vmapjvp_base('test_vmapjvpall_has_batch_rule')
+ return self._supports_vmapjvp_base("test_vmapjvpall_has_batch_rule")
class OperatorSet:
@@ -790,12 +863,12 @@ class OperatorSet:
names = dct.keys()
names_sanitized = []
for n in names:
- torch_tensor = 'torch.Tensor.'
- torch_dot = 'torch.'
+ torch_tensor = "torch.Tensor."
+ torch_dot = "torch."
if n.startswith(torch_tensor):
- names_sanitized.append(n[len(torch_tensor):])
+ names_sanitized.append(n[len(torch_tensor) :])
elif n.startswith(torch_dot):
- names_sanitized.append(n[len(torch_dot):])
+ names_sanitized.append(n[len(torch_dot) :])
else:
raise AssertionError()
return cls.from_names(names_sanitized)
@@ -812,32 +885,32 @@ class OperatorSet:
def summary(self):
checks = [
- 'supports_vjp',
- 'supports_vmap',
- 'supports_fast_vmap',
- 'supports_vmapvjp',
- 'supports_fast_vmapvjp',
- 'supports_jvp',
- 'supports_vmapjvp',
- 'supports_fast_vmapjvp',
- 'supports_jvpvjp',
+ "supports_vjp",
+ "supports_vmap",
+ "supports_fast_vmap",
+ "supports_vmapvjp",
+ "supports_fast_vmapvjp",
+ "supports_jvp",
+ "supports_vmapjvp",
+ "supports_fast_vmapjvp",
+ "supports_jvpvjp",
]
- result = ['test, yes, no, unknown']
+ result = ["test, yes, no, unknown"]
for check in checks:
accessor = getattr(Operator, check)
all_results = self.query(accessor)
yes_amt = len(all_results[Support.YES])
no_amt = len(all_results[Support.NO])
unknown_amt = len(all_results[Support.UNKNOWN])
- result.append(f'{check}, {yes_amt}, {no_amt}, {unknown_amt}')
- return '\n'.join(result)
+ result.append(f"{check}, {yes_amt}, {no_amt}, {unknown_amt}")
+ return "\n".join(result)
opset = OperatorSet.all()
has_no_opinfo = opset.query(Operator.has_opinfo, (False,))
print("=" * 30 + " Summary " + "=" * 30)
-print(f'% of usages on github: {get_ops_percentage(99999, 99999)}')
+print(f"% of usages on github: {get_ops_percentage(99999, 99999)}")
print(opset.summary())
# sanity checks
@@ -845,7 +918,7 @@ result = opset.query(Operator.supports_vjp, (Support.NO, Support.UNKNOWN))
# pprint.pprint(result)
print("=" * 30 + " Top 60 Summary " + "=" * 30)
-print(f'% of usages on github: {get_ops_percentage(35, 25)}')
+print(f"% of usages on github: {get_ops_percentage(35, 25)}")
opset = OperatorSet.from_top_ops_threshold(35, 25)
# result = opset.query(Operator.supports_vmapjvp, (Support.NO, Support.UNKNOWN))
# pprint.pprint(result)
@@ -861,7 +934,7 @@ opset = OperatorSet.from_top_ops_threshold(35, 25)
print(opset.summary())
print("=" * 30 + " Top 125 Summary " + "=" * 30)
-print(f'% of usages on github: {get_ops_percentage(100, 25)}')
+print(f"% of usages on github: {get_ops_percentage(100, 25)}")
opset = OperatorSet.from_top125()
# result = opset.query(Operator.supports_vmap, (Support.NO, Support.UNKNOWN))
# pprint.pprint(result)
diff --git a/test/functorch/functorch_additional_op_db.py b/test/functorch/functorch_additional_op_db.py
index ecd3bcbe4a..1c0ce20c52 100644
--- a/test/functorch/functorch_additional_op_db.py
+++ b/test/functorch/functorch_additional_op_db.py
@@ -1,12 +1,20 @@
-from functools import partial
import itertools
import unittest
+from functools import partial
import torch
-from torch.testing._internal.common_dtype import floating_types, floating_types_and, all_types_and_complex_and
+from torch.testing._internal.common_dtype import (
+ all_types_and_complex_and,
+ floating_types,
+ floating_types_and,
+)
+from torch.testing._internal.common_methods_invocations import (
+ DecorateInfo,
+ OpInfo,
+ SampleInput,
+)
from torch.testing._internal.common_utils import make_tensor
-from torch.testing._internal.common_methods_invocations import OpInfo, SampleInput, DecorateInfo
# List of OpInfos that aren't in PyTorch Core yet.
# They are here because we wanted a fast way of writing OpInfos and may not be
@@ -18,118 +26,176 @@ additional_op_db = []
# https://github.com/pytorch/pytorch/pull/61068
-def sample_inputs_conv2d(has_bias, self, device, dtype, requires_grad, extra_args=(), groups=1):
+def sample_inputs_conv2d(
+ has_bias, self, device, dtype, requires_grad, extra_args=(), groups=1
+):
in_ch, out_ch = 6, 4
- inp = make_tensor((2, in_ch * groups, 7, 5), device=device, dtype=dtype,
- requires_grad=requires_grad, low=-1, high=1)
- weight = make_tensor((out_ch * groups, in_ch, 3, 2), device=device, dtype=dtype,
- requires_grad=requires_grad, low=-1, high=1)
+ inp = make_tensor(
+ (2, in_ch * groups, 7, 5),
+ device=device,
+ dtype=dtype,
+ requires_grad=requires_grad,
+ low=-1,
+ high=1,
+ )
+ weight = make_tensor(
+ (out_ch * groups, in_ch, 3, 2),
+ device=device,
+ dtype=dtype,
+ requires_grad=requires_grad,
+ low=-1,
+ high=1,
+ )
bias = None
if has_bias:
- bias = make_tensor((out_ch * groups,), device=device, dtype=dtype,
- requires_grad=requires_grad, low=-1, high=1)
+ bias = make_tensor(
+ (out_ch * groups,),
+ device=device,
+ dtype=dtype,
+ requires_grad=requires_grad,
+ low=-1,
+ high=1,
+ )
return [SampleInput(inp, args=((weight, bias) + extra_args))]
-additional_op_db.extend([
- OpInfo('nn.functional.conv2d',
- aten_name="conv2d",
- variant_test_name='no_bias',
- supports_autograd=True,
- supports_forward_ad=True,
- sample_inputs_func=partial(sample_inputs_conv2d, False),
- dtypes=floating_types(),
- dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
- supports_out=False),
- OpInfo('nn.functional.conv2d',
- aten_name="conv2d",
- variant_test_name='with_bias',
- supports_autograd=True,
- supports_forward_ad=True,
- sample_inputs_func=partial(sample_inputs_conv2d, True),
- dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
- dtypes=floating_types(),
- supports_out=False),
- OpInfo('nn.functional.conv2d',
- aten_name="conv2d",
- variant_test_name='stride_with_bias',
- supports_autograd=True,
- supports_forward_ad=True,
- sample_inputs_func=partial(sample_inputs_conv2d, True, extra_args=((2, 2))),
- dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
- dtypes=floating_types(),
- supports_out=False),
- OpInfo('nn.functional.conv2d',
- aten_name="conv2d",
- variant_test_name='stride_no_bias',
- supports_autograd=True,
- supports_forward_ad=True,
- sample_inputs_func=partial(sample_inputs_conv2d, False, extra_args=((2, 2))),
- dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
- dtypes=floating_types(),
- supports_out=False),
- OpInfo('nn.functional.conv2d',
- aten_name="conv2d",
- variant_test_name='stride_padding_with_bias',
- supports_autograd=True,
- supports_forward_ad=True,
- sample_inputs_func=partial(sample_inputs_conv2d, True, extra_args=((2, 2), (1, 1))),
- dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
- dtypes=floating_types(),
- supports_out=False),
- OpInfo('nn.functional.conv2d',
- aten_name="conv2d",
- variant_test_name='stride_padding_no_bias',
- supports_autograd=True,
- supports_forward_ad=True,
- sample_inputs_func=partial(sample_inputs_conv2d, False, extra_args=((2, 2), (1, 1))),
- dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
- dtypes=floating_types(),
- supports_out=False),
- OpInfo('nn.functional.conv2d',
- aten_name="conv2d",
- variant_test_name='strided_padding_dilation_with_bias',
- supports_autograd=True,
- supports_forward_ad=True,
- sample_inputs_func=partial(sample_inputs_conv2d, True, extra_args=((2, 2), (1, 1), (2, 2))),
- dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
- dtypes=floating_types(),
- supports_out=False),
- OpInfo('nn.functional.conv2d',
- aten_name="conv2d",
- variant_test_name='strided_padding_dilation_no_bias',
- supports_autograd=True,
- supports_forward_ad=True,
- sample_inputs_func=partial(sample_inputs_conv2d, True, extra_args=((2, 2), (1, 1), (2, 2))),
- dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
- dtypes=floating_types(),
- supports_out=False),
- OpInfo('nn.functional.conv2d',
- aten_name="conv2d",
- variant_test_name='stride_groups_with_bias',
- supports_autograd=True,
- supports_forward_ad=True,
- sample_inputs_func=partial(sample_inputs_conv2d, True, extra_args=((2, 3), 0, 1, 2), groups=2),
- dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
- dtypes=floating_types(),
- supports_out=False),
- OpInfo('nn.functional.conv2d',
- aten_name="conv2d",
- variant_test_name='stride_depthwise_with_bias',
- supports_autograd=True,
- supports_forward_ad=True,
- sample_inputs_func=partial(sample_inputs_conv2d, True, extra_args=((2, 3), 0, 1, 6), groups=6),
- dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
- dtypes=floating_types(),
- supports_out=False),
-])
+additional_op_db.extend(
+ [
+ OpInfo(
+ "nn.functional.conv2d",
+ aten_name="conv2d",
+ variant_test_name="no_bias",
+ supports_autograd=True,
+ supports_forward_ad=True,
+ sample_inputs_func=partial(sample_inputs_conv2d, False),
+ dtypes=floating_types(),
+ dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
+ supports_out=False,
+ ),
+ OpInfo(
+ "nn.functional.conv2d",
+ aten_name="conv2d",
+ variant_test_name="with_bias",
+ supports_autograd=True,
+ supports_forward_ad=True,
+ sample_inputs_func=partial(sample_inputs_conv2d, True),
+ dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
+ dtypes=floating_types(),
+ supports_out=False,
+ ),
+ OpInfo(
+ "nn.functional.conv2d",
+ aten_name="conv2d",
+ variant_test_name="stride_with_bias",
+ supports_autograd=True,
+ supports_forward_ad=True,
+ sample_inputs_func=partial(sample_inputs_conv2d, True, extra_args=((2, 2))),
+ dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
+ dtypes=floating_types(),
+ supports_out=False,
+ ),
+ OpInfo(
+ "nn.functional.conv2d",
+ aten_name="conv2d",
+ variant_test_name="stride_no_bias",
+ supports_autograd=True,
+ supports_forward_ad=True,
+ sample_inputs_func=partial(
+ sample_inputs_conv2d, False, extra_args=((2, 2))
+ ),
+ dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
+ dtypes=floating_types(),
+ supports_out=False,
+ ),
+ OpInfo(
+ "nn.functional.conv2d",
+ aten_name="conv2d",
+ variant_test_name="stride_padding_with_bias",
+ supports_autograd=True,
+ supports_forward_ad=True,
+ sample_inputs_func=partial(
+ sample_inputs_conv2d, True, extra_args=((2, 2), (1, 1))
+ ),
+ dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
+ dtypes=floating_types(),
+ supports_out=False,
+ ),
+ OpInfo(
+ "nn.functional.conv2d",
+ aten_name="conv2d",
+ variant_test_name="stride_padding_no_bias",
+ supports_autograd=True,
+ supports_forward_ad=True,
+ sample_inputs_func=partial(
+ sample_inputs_conv2d, False, extra_args=((2, 2), (1, 1))
+ ),
+ dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
+ dtypes=floating_types(),
+ supports_out=False,
+ ),
+ OpInfo(
+ "nn.functional.conv2d",
+ aten_name="conv2d",
+ variant_test_name="strided_padding_dilation_with_bias",
+ supports_autograd=True,
+ supports_forward_ad=True,
+ sample_inputs_func=partial(
+ sample_inputs_conv2d, True, extra_args=((2, 2), (1, 1), (2, 2))
+ ),
+ dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
+ dtypes=floating_types(),
+ supports_out=False,
+ ),
+ OpInfo(
+ "nn.functional.conv2d",
+ aten_name="conv2d",
+ variant_test_name="strided_padding_dilation_no_bias",
+ supports_autograd=True,
+ supports_forward_ad=True,
+ sample_inputs_func=partial(
+ sample_inputs_conv2d, True, extra_args=((2, 2), (1, 1), (2, 2))
+ ),
+ dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
+ dtypes=floating_types(),
+ supports_out=False,
+ ),
+ OpInfo(
+ "nn.functional.conv2d",
+ aten_name="conv2d",
+ variant_test_name="stride_groups_with_bias",
+ supports_autograd=True,
+ supports_forward_ad=True,
+ sample_inputs_func=partial(
+ sample_inputs_conv2d, True, extra_args=((2, 3), 0, 1, 2), groups=2
+ ),
+ dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
+ dtypes=floating_types(),
+ supports_out=False,
+ ),
+ OpInfo(
+ "nn.functional.conv2d",
+ aten_name="conv2d",
+ variant_test_name="stride_depthwise_with_bias",
+ supports_autograd=True,
+ supports_forward_ad=True,
+ sample_inputs_func=partial(
+ sample_inputs_conv2d, True, extra_args=((2, 3), 0, 1, 6), groups=6
+ ),
+ dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
+ dtypes=floating_types(),
+ supports_out=False,
+ ),
+ ]
+)
# TODO: PyTorch core has a check for if requires_grad=True or not.
# We actually want to test more things for backward here which is why we have our own
def sample_inputs_embedding(op_info, device, dtype, requires_grad, **kwargs):
def make_input(shape):
- return make_tensor(shape, device=device, dtype=dtype, requires_grad=requires_grad)
+ return make_tensor(
+ shape, device=device, dtype=dtype, requires_grad=requires_grad
+ )
def make_long_input(shape, *, low, high):
return make_tensor(shape, device=device, dtype=torch.long, low=low, high=high)
@@ -140,32 +206,53 @@ def sample_inputs_embedding(op_info, device, dtype, requires_grad, **kwargs):
def generator():
# 0-D index tensor
idx = make_long_input((), low=0, high=M)
- yield SampleInput(make_input((M, S)), args=(idx,),)
+ yield SampleInput(
+ make_input((M, S)),
+ args=(idx,),
+ )
# 1-D index tensor
idx = make_long_input((S,), low=0, high=M)
- yield SampleInput(make_input((M, S)), args=(idx,),)
+ yield SampleInput(
+ make_input((M, S)),
+ args=(idx,),
+ )
# 2-D index tensor
idx = make_long_input((S, S), low=0, high=M)
- yield SampleInput(make_input((M, S)), args=(idx,),)
+ yield SampleInput(
+ make_input((M, S)),
+ args=(idx,),
+ )
idx = make_long_input((2, 2), low=0, high=S)
idx[0, 0] = 2
idx[1, 1] = 2
- yield SampleInput(make_input((S, S)), args=(idx,), kwargs={'padding_idx': 2},)
+ yield SampleInput(
+ make_input((S, S)),
+ args=(idx,),
+ kwargs={"padding_idx": 2},
+ )
idx = make_long_input((2, 2), low=0, high=S)
idx[0, 0] = 4
idx[1, 1] = 4
- yield SampleInput(make_input((S, S)), args=(idx,), kwargs={'padding_idx': -1},)
+ yield SampleInput(
+ make_input((S, S)),
+ args=(idx,),
+ kwargs={"padding_idx": -1},
+ )
# Scale the gradient based on the inverse frequency of a particular index.
idx = make_long_input((2, 2), low=0, high=S)
idx[0, 0] = 1
idx[0, 1] = 1
weights = make_input((S, S))
- yield SampleInput(weights, args=(idx,), kwargs={'scale_grad_by_freq': True},)
+ yield SampleInput(
+ weights,
+ args=(idx,),
+ kwargs={"scale_grad_by_freq": True},
+ )
return list(generator())
@@ -177,29 +264,36 @@ additional_op_db.append(
# We use lambda to reshuffle the positional arguments.
# This is because currently only the `input` field of SampleInput
# is tested in gradient tests.
- op=lambda weight, idx, **kwargs: torch.nn.functional.embedding(idx, weight, **kwargs),
+ op=lambda weight, idx, **kwargs: torch.nn.functional.embedding(
+ idx, weight, **kwargs
+ ),
dtypes=floating_types_and(torch.bfloat16, torch.float16),
sample_inputs_func=sample_inputs_embedding,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_out=False,
- ))
+ )
+)
def sample_inputs_mse_loss(op_info, device, dtype, requires_grad, **kwargs):
def make_input(shape, requires_grad=requires_grad):
- return make_tensor(shape, device=device, dtype=dtype, requires_grad=requires_grad)
+ return make_tensor(
+ shape, device=device, dtype=dtype, requires_grad=requires_grad
+ )
- rhs_requires_grad = kwargs.get('rhs_requires_grad', requires_grad)
+ rhs_requires_grad = kwargs.get("rhs_requires_grad", requires_grad)
S = 5
shapes = ((S, S), (S, S, S), (S, S, S, S))
reductions = ("none", "mean", "sum")
for shape, reduction in itertools.product(shapes, reductions):
- yield SampleInput(make_input(shape),
- args=(make_input(shape, requires_grad=rhs_requires_grad),),
- kwargs={"reduction": reduction})
+ yield SampleInput(
+ make_input(shape),
+ args=(make_input(shape, requires_grad=rhs_requires_grad),),
+ kwargs={"reduction": reduction},
+ )
additional_op_db.append(
@@ -214,7 +308,8 @@ additional_op_db.append(
backward_dtypes=floating_types(),
dtypesIfCUDA=floating_types_and(torch.bfloat16, torch.float16),
backward_dtypesIfCUDA=floating_types_and(torch.bfloat16, torch.float16),
- ))
+ )
+)
# TODO: upstream sample inputs to pytorch/pytorch.
@@ -234,7 +329,14 @@ def sample_inputs_getitem(op_info, device, dtype, requires_grad, **kwargs):
(3, ([slice(None), [0, 3], slice(None)],)),
(3, ([[0, 3], slice(None), slice(None)],)),
(3, ([[0, 3], [1, 2], slice(None)],)),
- (3, ([[0, 3], ],)),
+ (
+ 3,
+ (
+ [
+ [0, 3],
+ ],
+ ),
+ ),
(3, ([[0, 3], slice(None)],)),
(3, ([[0, 3], Ellipsis],)),
(3, ([[0, 2, 3], [1, 3, 3], torch.LongTensor([0, 0, 2])],)),
@@ -254,31 +356,46 @@ def sample_inputs_getitem(op_info, device, dtype, requires_grad, **kwargs):
def get_shape(dim):
return tuple(S + i for i in range(dim))
- return tuple(SampleInput(
- make_tensor(get_shape(self_dim), device=device, dtype=dtype, low=None, high=None, requires_grad=requires_grad),
- args=args)
- for self_dim, args in test_args)
+ return tuple(
+ SampleInput(
+ make_tensor(
+ get_shape(self_dim),
+ device=device,
+ dtype=dtype,
+ low=None,
+ high=None,
+ requires_grad=requires_grad,
+ ),
+ args=args,
+ )
+ for self_dim, args in test_args
+ )
# TODO: split PyTorch's __getitem__. The problem is we don't support indexing
# with masks with vmap.
additional_op_db.append(
- OpInfo('__getitem__',
- variant_test_name='functorch',
- dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
- supports_out=False,
- supports_inplace_autograd=False,
- supports_scripting=False,
- op=torch.Tensor.__getitem__,
- assert_jit_shape_analysis=False, # TODO: support index.Tensor()
- supports_forward_ad=True,
- sample_inputs_func=sample_inputs_getitem,))
+ OpInfo(
+ "__getitem__",
+ variant_test_name="functorch",
+ dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
+ supports_out=False,
+ supports_inplace_autograd=False,
+ supports_scripting=False,
+ op=torch.Tensor.__getitem__,
+ assert_jit_shape_analysis=False, # TODO: support index.Tensor()
+ supports_forward_ad=True,
+ sample_inputs_func=sample_inputs_getitem,
+ )
+)
# Turns out at::index_put is different from torch.index_put...
# TODO: figure out how to upstream this
def sample_inputs_aten_index_put(op_info, device, dtype, requires_grad, **kwargs):
- make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
+ make_arg = partial(
+ make_tensor, dtype=dtype, device=device, requires_grad=requires_grad
+ )
inputs = []
adv_idx = torch.LongTensor([[0, 1], [2, 3]])
# self_shape, indices
@@ -306,43 +423,62 @@ def sample_inputs_aten_index_put(op_info, device, dtype, requires_grad, **kwargs
def sample_inputs_index_put(op_info, device, dtype, requires_grad, **kwargs):
- make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
- make_idx = partial(make_tensor, dtype=torch.long, device=device, requires_grad=False)
+ make_arg = partial(
+ make_tensor, dtype=dtype, device=device, requires_grad=requires_grad
+ )
+ make_idx = partial(
+ make_tensor, dtype=torch.long, device=device, requires_grad=False
+ )
S = 5
inputs = []
for accumulate in [False, True]:
# putting vectors at indexed locations
- inputs.append(SampleInput(
- make_arg((S, S)),
- args=((make_idx((2,), low=0, high=4),), make_arg((2, S))),
- kwargs=dict(accumulate=accumulate)))
+ inputs.append(
+ SampleInput(
+ make_arg((S, S)),
+ args=((make_idx((2,), low=0, high=4),), make_arg((2, S))),
+ kwargs=dict(accumulate=accumulate),
+ )
+ )
# putting multi-dim tensors at indexed locations
- inputs.append(SampleInput(
- make_arg((S, S, 2)),
- args=((make_idx((3,), low=0, high=4),), make_arg((3, S, 2))),
- kwargs=dict(accumulate=accumulate)))
+ inputs.append(
+ SampleInput(
+ make_arg((S, S, 2)),
+ args=((make_idx((3,), low=0, high=4),), make_arg((3, S, 2))),
+ kwargs=dict(accumulate=accumulate),
+ )
+ )
# value with size `0` dim
- inputs.append(SampleInput(
- make_arg((S, 0)),
- args=((make_idx((3,), low=0, high=4),), make_arg((3, 0))),
- kwargs=dict(accumulate=accumulate)))
+ inputs.append(
+ SampleInput(
+ make_arg((S, 0)),
+ args=((make_idx((3,), low=0, high=4),), make_arg((3, 0))),
+ kwargs=dict(accumulate=accumulate),
+ )
+ )
# scalar value
- inputs.append(SampleInput(
- make_arg((S,)),
- args=((make_idx((), low=0, high=S),), make_arg(())),
- kwargs=dict(accumulate=accumulate)))
+ inputs.append(
+ SampleInput(
+ make_arg((S,)),
+ args=((make_idx((), low=0, high=S),), make_arg(())),
+ kwargs=dict(accumulate=accumulate),
+ )
+ )
# cuda and accumulate don't work well
# Reference: https://github.com/pytorch/pytorch/issues/72053
- if not accumulate and device == 'cuda':
+ if not accumulate and device == "cuda":
# Broadcast `values`
- inputs.append(SampleInput(
- make_arg((S, S)),
- args=((make_idx((2,), low=0, high=S),), make_arg((S,))),
- kwargs=dict(accumulate=accumulate)))
+ inputs.append(
+ SampleInput(
+ make_arg((S, S)),
+ args=((make_idx((2,), low=0, high=S),), make_arg((S,))),
+ kwargs=dict(accumulate=accumulate),
+ )
+ )
return inputs
@@ -350,48 +486,64 @@ def sample_inputs_index_put(op_info, device, dtype, requires_grad, **kwargs):
additional_op_db.append(
OpInfo(
"index_put",
- variant_test_name='functorch',
+ variant_test_name="functorch",
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_out=False,
sample_inputs_func=sample_inputs_index_put,
supports_forward_ad=True,
- ))
+ )
+)
additional_op_db.append(
OpInfo(
"ops.aten.index_put",
- variant_test_name='functorch',
+ variant_test_name="functorch",
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_out=False,
sample_inputs_func=sample_inputs_aten_index_put,
supports_forward_ad=True,
- ))
+ )
+)
+
def sample_inputs_masked_fill(op_info, device, dtype, requires_grad, **kwargs):
S = 3
- make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
+ make_arg = partial(
+ make_tensor, device=device, dtype=dtype, requires_grad=requires_grad
+ )
yield SampleInput(make_arg((S, S)), args=(torch.randn(S, S, device=device) > 0, 10))
yield SampleInput(make_arg((S, S)), args=(torch.randn(S, device=device) > 0, 10))
yield SampleInput(make_arg(()), args=(torch.randn((), device=device) > 0, 10))
yield SampleInput(make_arg((S, S)), args=(torch.randn((), device=device) > 0, 10))
- yield SampleInput(make_arg((S,)),
- args=(torch.randn(S, S, device=device) > 0, 10),
- broadcasts_input=True)
+ yield SampleInput(
+ make_arg((S,)),
+ args=(torch.randn(S, S, device=device) > 0, 10),
+ broadcasts_input=True,
+ )
+
additional_op_db.append(
- OpInfo('masked_fill',
- variant_test_name='functorch_Scalar_only',
- dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf),
- sample_inputs_func=sample_inputs_masked_fill,
- supports_forward_ad=True,
- supports_fwgrad_bwgrad=True,
- check_batched_forward_grad=False,
- supports_out=False)
+ OpInfo(
+ "masked_fill",
+ variant_test_name="functorch_Scalar_only",
+ dtypes=all_types_and_complex_and(
+ torch.bool, torch.half, torch.bfloat16, torch.chalf
+ ),
+ sample_inputs_func=sample_inputs_masked_fill,
+ supports_forward_ad=True,
+ supports_fwgrad_bwgrad=True,
+ check_batched_forward_grad=False,
+ supports_out=False,
+ )
)
-def sample_inputs_new_zeros_with_same_feature_meta(op_info, device, dtype, requires_grad, **kwargs):
- make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
+def sample_inputs_new_zeros_with_same_feature_meta(
+ op_info, device, dtype, requires_grad, **kwargs
+):
+ make_arg = partial(
+ make_tensor, dtype=dtype, device=device, requires_grad=requires_grad
+ )
matrix = [
# tangent, base, num_tangent_bdims
([5], [2, 3], 0),
@@ -407,164 +559,255 @@ def sample_inputs_new_zeros_with_same_feature_meta(op_info, device, dtype, requi
for tangent_shape, base_shape, num_tangent_bdims in matrix:
tangent = make_arg(tangent_shape)
base = make_arg(base_shape)
- results.append(SampleInput(
- tangent,
- args=(base,),
- kwargs=dict(self_num_batch_dims=num_tangent_bdims)))
+ results.append(
+ SampleInput(
+ tangent,
+ args=(base,),
+ kwargs=dict(self_num_batch_dims=num_tangent_bdims),
+ )
+ )
return results
additional_op_db.append(
OpInfo(
"ops.aten._new_zeros_with_same_feature_meta",
- variant_test_name='functorchonly',
+ variant_test_name="functorchonly",
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_out=False,
supports_autograd=False,
supports_forward_ad=False,
sample_inputs_func=sample_inputs_new_zeros_with_same_feature_meta,
- ))
+ )
+)
def sample_inputs_conversion(op_info, device, dtype, requires_grad, **kwargs):
- make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
- shapes = ((),
- (2, 3))
+ make_arg = partial(
+ make_tensor, dtype=dtype, device=device, requires_grad=requires_grad
+ )
+ shapes = ((), (2, 3))
memory_format_options = [None, torch.contiguous_format]
for shape, memory_format in itertools.product(shapes, memory_format_options):
- yield SampleInput(make_arg(shape),
- kwargs={'memory_format': memory_format} if memory_format else {})
-
-
-additional_op_db.extend([
- OpInfo('bfloat16',
- op=lambda x, *args, **kwargs: x.bfloat16(*args, **kwargs),
- dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
- supports_out=False,
- variant_test_name='functorch_no_channels_last',
- sample_inputs_func=sample_inputs_conversion,
- skips=(
- # autograd tests don't handle operators that change dtype
- DecorateInfo(unittest.expectedFailure, 'TestFwdGradients'),
- DecorateInfo(unittest.expectedFailure, 'TestBwdGradients'),
- DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"),
- # RuntimeError: attribute lookup is not defined on builtin
- DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
- DecorateInfo(unittest.skip("Skipped!"), 'TestNNCOpInfo', 'test_nnc_correctness'),
- )),
- OpInfo('bool',
- op=lambda x, *args, **kwargs: x.bool(*args, **kwargs),
- dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
- supports_out=False,
- variant_test_name='functorch_no_channels_last',
- sample_inputs_func=sample_inputs_conversion,
- supports_autograd=False,
- skips=(
- DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"),
- # RuntimeError: attribute lookup is not defined on builtin
- DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
- )),
- OpInfo('byte',
- op=lambda x, *args, **kwargs: x.byte(*args, **kwargs),
- dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
- supports_out=False,
- variant_test_name='functorch_no_channels_last',
- sample_inputs_func=sample_inputs_conversion,
- # The autograd test runner cannot handle functions that change dtype
- supports_autograd=False,
- skips=(
- DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"),
- # RuntimeError: attribute lookup is not defined on builtin
- DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
- )),
- OpInfo('char',
- op=lambda x, *args, **kwargs: x.char(*args, **kwargs),
- dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
- supports_out=False,
- variant_test_name='functorch_no_channels_last',
- sample_inputs_func=sample_inputs_conversion,
- # The autograd test runner cannot handle functions that change dtype
- supports_autograd=False,
- skips=(
- DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"),
- # RuntimeError: attribute lookup is not defined on builtin
- DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
- )),
- OpInfo('double',
- op=lambda x, *args, **kwargs: x.double(*args, **kwargs),
- dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
- supports_out=False,
- variant_test_name='functorch_no_channels_last',
- sample_inputs_func=sample_inputs_conversion,
- supports_forward_ad=True,
- supports_fwgrad_bwgrad=True,
- skips=(
- DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"),
- # RuntimeError: attribute lookup is not defined on builtin
- DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
- )),
- OpInfo('float',
- op=lambda x, *args, **kwargs: x.float(*args, **kwargs),
- dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
- supports_out=False,
- variant_test_name='functorch_no_channels_last',
- sample_inputs_func=sample_inputs_conversion,
- skips=(
- # autograd tests don't handle operators that change dtype
- DecorateInfo(unittest.expectedFailure, 'TestFwdGradients'),
- DecorateInfo(unittest.expectedFailure, 'TestBwdGradients'),
- DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"),
- # RuntimeError: attribute lookup is not defined on builtin
- DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
- )),
- OpInfo('half',
- op=lambda x, *args, **kwargs: x.half(*args, **kwargs),
- dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
- supports_out=False,
- variant_test_name='functorch_no_channels_last',
- sample_inputs_func=sample_inputs_conversion,
- skips=(
- # autograd tests don't handle operators that change dtype
- DecorateInfo(unittest.expectedFailure, 'TestFwdGradients'),
- DecorateInfo(unittest.expectedFailure, 'TestBwdGradients'),
- DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"),
- # RuntimeError: attribute lookup is not defined on builtin
- DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
- )),
- OpInfo('int',
- op=lambda x, *args, **kwargs: x.int(*args, **kwargs),
- dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
- supports_out=False,
- variant_test_name='functorch_no_channels_last',
- sample_inputs_func=sample_inputs_conversion,
- supports_autograd=False,
- skips=(
- DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"),
- # RuntimeError: attribute lookup is not defined on builtin
- DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
- )),
- OpInfo('long',
- op=lambda x, *args, **kwargs: x.long(*args, **kwargs),
- dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
- supports_out=False,
- variant_test_name='functorch_no_channels_last',
- sample_inputs_func=sample_inputs_conversion,
- supports_autograd=False,
- skips=(
- DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"),
- # RuntimeError: attribute lookup is not defined on builtin
- DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
- )),
- OpInfo('short',
- op=lambda x, *args, **kwargs: x.short(*args, **kwargs),
- dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
- supports_out=False,
- variant_test_name='functorch_no_channels_last',
- sample_inputs_func=sample_inputs_conversion,
- supports_autograd=False,
- skips=(
- DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"),
- # RuntimeError: attribute lookup is not defined on builtin
- DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
- )),
-])
+ yield SampleInput(
+ make_arg(shape),
+ kwargs={"memory_format": memory_format} if memory_format else {},
+ )
+
+
+additional_op_db.extend(
+ [
+ OpInfo(
+ "bfloat16",
+ op=lambda x, *args, **kwargs: x.bfloat16(*args, **kwargs),
+ dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
+ supports_out=False,
+ variant_test_name="functorch_no_channels_last",
+ sample_inputs_func=sample_inputs_conversion,
+ skips=(
+ # autograd tests don't handle operators that change dtype
+ DecorateInfo(unittest.expectedFailure, "TestFwdGradients"),
+ DecorateInfo(unittest.expectedFailure, "TestBwdGradients"),
+ DecorateInfo(
+ unittest.expectedFailure,
+ "TestNormalizeOperators",
+ "test_normalize_operator_exhaustive",
+ ),
+ # RuntimeError: attribute lookup is not defined on builtin
+ DecorateInfo(
+ unittest.expectedFailure, "TestJit", "test_variant_consistency_jit"
+ ),
+ DecorateInfo(
+ unittest.skip("Skipped!"), "TestNNCOpInfo", "test_nnc_correctness"
+ ),
+ ),
+ ),
+ OpInfo(
+ "bool",
+ op=lambda x, *args, **kwargs: x.bool(*args, **kwargs),
+ dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
+ supports_out=False,
+ variant_test_name="functorch_no_channels_last",
+ sample_inputs_func=sample_inputs_conversion,
+ supports_autograd=False,
+ skips=(
+ DecorateInfo(
+ unittest.expectedFailure,
+ "TestNormalizeOperators",
+ "test_normalize_operator_exhaustive",
+ ),
+ # RuntimeError: attribute lookup is not defined on builtin
+ DecorateInfo(
+ unittest.expectedFailure, "TestJit", "test_variant_consistency_jit"
+ ),
+ ),
+ ),
+ OpInfo(
+ "byte",
+ op=lambda x, *args, **kwargs: x.byte(*args, **kwargs),
+ dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
+ supports_out=False,
+ variant_test_name="functorch_no_channels_last",
+ sample_inputs_func=sample_inputs_conversion,
+ # The autograd test runner cannot handle functions that change dtype
+ supports_autograd=False,
+ skips=(
+ DecorateInfo(
+ unittest.expectedFailure,
+ "TestNormalizeOperators",
+ "test_normalize_operator_exhaustive",
+ ),
+ # RuntimeError: attribute lookup is not defined on builtin
+ DecorateInfo(
+ unittest.expectedFailure, "TestJit", "test_variant_consistency_jit"
+ ),
+ ),
+ ),
+ OpInfo(
+ "char",
+ op=lambda x, *args, **kwargs: x.char(*args, **kwargs),
+ dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
+ supports_out=False,
+ variant_test_name="functorch_no_channels_last",
+ sample_inputs_func=sample_inputs_conversion,
+ # The autograd test runner cannot handle functions that change dtype
+ supports_autograd=False,
+ skips=(
+ DecorateInfo(
+ unittest.expectedFailure,
+ "TestNormalizeOperators",
+ "test_normalize_operator_exhaustive",
+ ),
+ # RuntimeError: attribute lookup is not defined on builtin
+ DecorateInfo(
+ unittest.expectedFailure, "TestJit", "test_variant_consistency_jit"
+ ),
+ ),
+ ),
+ OpInfo(
+ "double",
+ op=lambda x, *args, **kwargs: x.double(*args, **kwargs),
+ dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
+ supports_out=False,
+ variant_test_name="functorch_no_channels_last",
+ sample_inputs_func=sample_inputs_conversion,
+ supports_forward_ad=True,
+ supports_fwgrad_bwgrad=True,
+ skips=(
+ DecorateInfo(
+ unittest.expectedFailure,
+ "TestNormalizeOperators",
+ "test_normalize_operator_exhaustive",
+ ),
+ # RuntimeError: attribute lookup is not defined on builtin
+ DecorateInfo(
+ unittest.expectedFailure, "TestJit", "test_variant_consistency_jit"
+ ),
+ ),
+ ),
+ OpInfo(
+ "float",
+ op=lambda x, *args, **kwargs: x.float(*args, **kwargs),
+ dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
+ supports_out=False,
+ variant_test_name="functorch_no_channels_last",
+ sample_inputs_func=sample_inputs_conversion,
+ skips=(
+ # autograd tests don't handle operators that change dtype
+ DecorateInfo(unittest.expectedFailure, "TestFwdGradients"),
+ DecorateInfo(unittest.expectedFailure, "TestBwdGradients"),
+ DecorateInfo(
+ unittest.expectedFailure,
+ "TestNormalizeOperators",
+ "test_normalize_operator_exhaustive",
+ ),
+ # RuntimeError: attribute lookup is not defined on builtin
+ DecorateInfo(
+ unittest.expectedFailure, "TestJit", "test_variant_consistency_jit"
+ ),
+ ),
+ ),
+ OpInfo(
+ "half",
+ op=lambda x, *args, **kwargs: x.half(*args, **kwargs),
+ dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
+ supports_out=False,
+ variant_test_name="functorch_no_channels_last",
+ sample_inputs_func=sample_inputs_conversion,
+ skips=(
+ # autograd tests don't handle operators that change dtype
+ DecorateInfo(unittest.expectedFailure, "TestFwdGradients"),
+ DecorateInfo(unittest.expectedFailure, "TestBwdGradients"),
+ DecorateInfo(
+ unittest.expectedFailure,
+ "TestNormalizeOperators",
+ "test_normalize_operator_exhaustive",
+ ),
+ # RuntimeError: attribute lookup is not defined on builtin
+ DecorateInfo(
+ unittest.expectedFailure, "TestJit", "test_variant_consistency_jit"
+ ),
+ ),
+ ),
+ OpInfo(
+ "int",
+ op=lambda x, *args, **kwargs: x.int(*args, **kwargs),
+ dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
+ supports_out=False,
+ variant_test_name="functorch_no_channels_last",
+ sample_inputs_func=sample_inputs_conversion,
+ supports_autograd=False,
+ skips=(
+ DecorateInfo(
+ unittest.expectedFailure,
+ "TestNormalizeOperators",
+ "test_normalize_operator_exhaustive",
+ ),
+ # RuntimeError: attribute lookup is not defined on builtin
+ DecorateInfo(
+ unittest.expectedFailure, "TestJit", "test_variant_consistency_jit"
+ ),
+ ),
+ ),
+ OpInfo(
+ "long",
+ op=lambda x, *args, **kwargs: x.long(*args, **kwargs),
+ dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
+ supports_out=False,
+ variant_test_name="functorch_no_channels_last",
+ sample_inputs_func=sample_inputs_conversion,
+ supports_autograd=False,
+ skips=(
+ DecorateInfo(
+ unittest.expectedFailure,
+ "TestNormalizeOperators",
+ "test_normalize_operator_exhaustive",
+ ),
+ # RuntimeError: attribute lookup is not defined on builtin
+ DecorateInfo(
+ unittest.expectedFailure, "TestJit", "test_variant_consistency_jit"
+ ),
+ ),
+ ),
+ OpInfo(
+ "short",
+ op=lambda x, *args, **kwargs: x.short(*args, **kwargs),
+ dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
+ supports_out=False,
+ variant_test_name="functorch_no_channels_last",
+ sample_inputs_func=sample_inputs_conversion,
+ supports_autograd=False,
+ skips=(
+ DecorateInfo(
+ unittest.expectedFailure,
+ "TestNormalizeOperators",
+ "test_normalize_operator_exhaustive",
+ ),
+ # RuntimeError: attribute lookup is not defined on builtin
+ DecorateInfo(
+ unittest.expectedFailure, "TestJit", "test_variant_consistency_jit"
+ ),
+ ),
+ ),
+ ]
+)
diff --git a/test/functorch/test_aotdispatch.py b/test/functorch/test_aotdispatch.py
index 3c9cf40cf4..4f7545ddf2 100644
--- a/test/functorch/test_aotdispatch.py
+++ b/test/functorch/test_aotdispatch.py
@@ -6,92 +6,110 @@
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
-from typing import Union, Callable, List, Any, Optional, Dict
-from unittest.mock import patch
-from torch.testing._internal.common_utils import (
- TestCase,
- run_tests,
- IS_ARM64,
- IS_MACOS,
- IS_WINDOWS,
- IS_X86,
- compare_equal_outs_and_grads,
- outs_and_grads,
- skipIfRocm,
-)
-from torch.testing._internal.two_tensor import TwoTensor, TwoTensorMode
import copy
-import torch
-import torch._dynamo as torchdynamo
-import torch.nn as nn
-import torch.utils._pytree as pytree
+import itertools
import unittest
import warnings
-import itertools
from contextlib import nullcontext
from functools import partial
-from torch.nn.utils.rnn import PackedSequence
-from torch.testing._internal.common_device_type import instantiate_device_type_tests, toleranceOverride, tol
-from torch.testing._internal.common_methods_invocations import op_db
-from torch.testing._internal.common_modules import module_db, modules
-from torch.testing._internal.common_utils import parametrize, instantiate_parametrized_tests
-from torch.testing._internal.optests import _test_aot_autograd_forwards_backwards_helper, aot_autograd_check
-from torch.testing._internal.hop_db import hop_db
-from torch._higher_order_ops.out_dtype import out_dtype
-from functorch import (
- grad, vjp, vmap, jacrev,
- make_fx
-)
-from torch._functorch.aot_autograd import aot_module_simplified, aot_export_module, aot_export_joint_simple
+from typing import Any, Callable, Dict, List, Optional, Union
+from unittest.mock import patch
+
+import torch
+import torch._dynamo as torchdynamo
+import torch.nn as nn
+import torch.utils._pytree as pytree
+from common_utils import decorate, decorateForModules, skip, skipOps, xfail
+from functorch import grad, jacrev, make_fx, vjp, vmap
from functorch.compile import (
- nnc_jit, compiled_function, compiled_module,
- min_cut_rematerialization_partition, aot_function, aot_module,
- nop, default_partition, default_decompositions,
- memory_efficient_fusion, get_aot_compilation_context, make_boxed_compiler
+ aot_function,
+ aot_module,
+ compiled_function,
+ compiled_module,
+ default_decompositions,
+ default_partition,
+ get_aot_compilation_context,
+ make_boxed_compiler,
+ memory_efficient_fusion,
+ min_cut_rematerialization_partition,
+ nnc_jit,
+ nop,
)
from functorch.experimental import control_flow
from torch._decomp import decomposition_table
-
-from torch.testing._internal.common_device_type import ops
-from common_utils import (
- decorate,
- xfail,
- skip,
- skipOps,
- decorateForModules,
+from torch._functorch.aot_autograd import (
+ aot_export_joint_simple,
+ aot_export_module,
+ aot_module_simplified,
)
+from torch._higher_order_ops.out_dtype import out_dtype
from torch._subclasses.fake_tensor import DynamicOutputShapeException, FakeTensorMode
from torch.fx.experimental.proxy_tensor import is_sym_node
-from torch.fx.experimental.symbolic_shapes import ShapeEnv, GuardOnDataDependentSymNode
+from torch.fx.experimental.symbolic_shapes import GuardOnDataDependentSymNode, ShapeEnv
+from torch.nn.utils.rnn import PackedSequence
+
+from torch.testing._internal.common_device_type import (
+ instantiate_device_type_tests,
+ ops,
+ tol,
+ toleranceOverride,
+)
+from torch.testing._internal.common_methods_invocations import op_db
+from torch.testing._internal.common_modules import module_db, modules
+from torch.testing._internal.common_utils import (
+ compare_equal_outs_and_grads,
+ instantiate_parametrized_tests,
+ IS_ARM64,
+ IS_MACOS,
+ IS_WINDOWS,
+ IS_X86,
+ outs_and_grads,
+ parametrize,
+ run_tests,
+ skipIfRocm,
+ TestCase,
+)
+from torch.testing._internal.hop_db import hop_db
+from torch.testing._internal.optests import (
+ _test_aot_autograd_forwards_backwards_helper,
+ aot_autograd_check,
+)
+from torch.testing._internal.two_tensor import TwoTensor, TwoTensorMode
USE_TORCHVISION = False
try:
import torchvision
+
USE_TORCHVISION = True
except ImportError:
- warnings.warn("Couldn't import torchvision. Some of our tests use it, try "
- "to install it with commands from pytorch.org, post-fixed with "
- "`--no-deps` to avoid overwriting the pytorch installation",
- UserWarning)
+ warnings.warn(
+ "Couldn't import torchvision. Some of our tests use it, try "
+ "to install it with commands from pytorch.org, post-fixed with "
+ "`--no-deps` to avoid overwriting the pytorch installation",
+ UserWarning,
+ )
USE_NETWORKX = False
try:
import networkx # noqa: F401
+
USE_NETWORKX = True
except ImportError:
- warnings.warn("Some tests use networkx but it was not installed",
- UserWarning)
+ warnings.warn("Some tests use networkx but it was not installed", UserWarning)
# NB: numpy is a testing dependency!
+
class AOTTestCase(TestCase):
def setUp(self):
super().setUp()
+
class TestPythonKey(AOTTestCase):
def test_make_fx(self, device):
def f(x):
return torch.sin(x)
+
inp = torch.randn(3)
fx_f = make_fx(f)(inp)
@@ -101,6 +119,7 @@ class TestPythonKey(AOTTestCase):
def test_make_fx_grad(self, device):
def f(x):
return torch.sin(x).sum()
+
inp = torch.randn(3)
f = grad(f)
fx_f = make_fx(f)(inp)
@@ -111,6 +130,7 @@ class TestPythonKey(AOTTestCase):
def test_scalar_device(self, device):
def f(a, b):
return a + b
+
inps = [torch.randn(3, device=device), torch.tensor(5)]
fx_f = make_fx(f)(*inps)
self.assertEqual(fx_f(*inps), f(*inps))
@@ -118,6 +138,7 @@ class TestPythonKey(AOTTestCase):
def test_make_fx_vmap(self, device):
def f(x):
return torch.sin(x)
+
inp = torch.randn(5, 3)
f = vmap(f)
fx_f = make_fx(f)(inp)
@@ -127,6 +148,7 @@ class TestPythonKey(AOTTestCase):
def test_make_fx_jacrev(self, device):
def f(x):
return x.sin().sum()
+
inp = torch.randn(3)
f = jacrev(jacrev(f))
fx_f = make_fx(f)(inp)
@@ -211,6 +233,7 @@ class TestPythonKey(AOTTestCase):
def test_external_calls(self, device):
def f(a, b):
return torch.mv(a, b)
+
jit_f = nnc_jit(f)
inp = [torch.randn(3, 3), torch.randn(3)]
self.assertEqual(jit_f(*inp), f(*inp))
@@ -218,14 +241,16 @@ class TestPythonKey(AOTTestCase):
def test_nnc_passthrough(self, device):
def f(x, y):
return x + y, y
+
inp = (torch.randn(3), torch.randn(3))
jit_f = nnc_jit(f)
self.assertEqual(jit_f(*inp), f(*inp))
def f(x):
- x['a'] = x['a'] * 2
+ x["a"] = x["a"] * 2
return x
- inp = ({'a': torch.randn(3), 'b': torch.randn(3)},)
+
+ inp = ({"a": torch.randn(3), "b": torch.randn(3)},)
jit_f = nnc_jit(f)
self.assertEqual(jit_f(*inp), f(*inp))
@@ -246,9 +271,11 @@ class TestPythonKey(AOTTestCase):
grads2 = [a.grad for a in mod.parameters()]
self.assertEqual(grads, grads2)
+
def get_base(t):
return t._base if t._is_view() else t
+
def is_in_base(t, maybe_tensors):
t_base = get_base(t)
for maybe_tensor in maybe_tensors:
@@ -257,6 +284,7 @@ def is_in_base(t, maybe_tensors):
return True
return False
+
class TestAOTAutograd(AOTTestCase):
# test_mutation will:
# - Ensure that inputs are non-leaves, so our graphs can mutate them
@@ -321,20 +349,24 @@ class TestAOTAutograd(AOTTestCase):
if isinstance(f, nn.Module):
compiled_f = aot_module(
f,
- fw_compiler=make_boxed_compiler(partial(extract_graph, graph_cell=fw_graph_cell)),
+ fw_compiler=make_boxed_compiler(
+ partial(extract_graph, graph_cell=fw_graph_cell)
+ ),
bw_compiler=nop,
decompositions=decompositions,
keep_inference_input_mutations=keep_input_mutations,
- dynamic=dynamic
+ dynamic=dynamic,
)
else:
compiled_f = aot_function(
f,
- fw_compiler=make_boxed_compiler(partial(extract_graph, graph_cell=fw_graph_cell)),
+ fw_compiler=make_boxed_compiler(
+ partial(extract_graph, graph_cell=fw_graph_cell)
+ ),
bw_compiler=nop,
decompositions=decompositions,
keep_inference_input_mutations=keep_input_mutations,
- dynamic=dynamic
+ dynamic=dynamic,
)
ref_out, ref_grad = outs_and_grads(f, graph_inps, inp)
test_out, test_grad = outs_and_grads(compiled_f, graph_inps_copy, inp_copy)
@@ -347,9 +379,15 @@ class TestAOTAutograd(AOTTestCase):
if isinstance(ref_o, torch.Tensor):
self.assertEqual(ref_o.requires_grad, test_o.requires_grad)
self.assertEqual(ref_o.is_leaf, test_o.is_leaf)
- ref_is_view_of_non_interm = is_in_base(ref_o, graph_inps) or is_in_base(ref_o, ref_out)
- test_is_view_of_non_interm = is_in_base(test_o, graph_inps_copy) or is_in_base(test_o, test_out)
- self.assertEqual(ref_is_view_of_non_interm, test_is_view_of_non_interm)
+ ref_is_view_of_non_interm = is_in_base(
+ ref_o, graph_inps
+ ) or is_in_base(ref_o, ref_out)
+ test_is_view_of_non_interm = is_in_base(
+ test_o, graph_inps_copy
+ ) or is_in_base(test_o, test_out)
+ self.assertEqual(
+ ref_is_view_of_non_interm, test_is_view_of_non_interm
+ )
self.assertEqual(ref_o, test_o)
if test_mutation:
# This tests that autograd meta is set properly on the output we can
@@ -367,6 +405,7 @@ class TestAOTAutograd(AOTTestCase):
# int, None, Tensor
def f(a, b, c):
return a * c
+
inp = [2, None, torch.ones(3, 3, dtype=torch.float32, requires_grad=True)]
self.verify_aot_autograd(f, inp)
inp = [2, None, torch.ones(3, 3, dtype=torch.float32, requires_grad=False)]
@@ -375,6 +414,7 @@ class TestAOTAutograd(AOTTestCase):
def test_single_output(self):
def f(a, b):
return a + b
+
inp = [torch.randn(3, 3, requires_grad=True), torch.randn(3, 3)]
self.verify_aot_autograd(f, inp)
inp = [torch.randn(3, 3, requires_grad=False), torch.randn(3, 3)]
@@ -383,6 +423,7 @@ class TestAOTAutograd(AOTTestCase):
def test_multi_output(self):
def f(a, b):
return a + b, a - b
+
inp = [torch.randn(3, 3, requires_grad=True), torch.randn(3, 3)]
self.verify_aot_autograd(f, inp)
inp = [torch.randn(3, 3, requires_grad=False), torch.randn(3, 3)]
@@ -391,6 +432,7 @@ class TestAOTAutograd(AOTTestCase):
def test_multi_output_list(self):
def f(a, b):
return [a + b, a - b]
+
inp = [torch.randn(3, 3, requires_grad=True), torch.randn(3, 3)]
self.verify_aot_autograd(f, inp)
inp = [torch.randn(3, 3, requires_grad=False), torch.randn(3, 3)]
@@ -400,7 +442,7 @@ class TestAOTAutograd(AOTTestCase):
def test_squeeze_mutation(self):
def f(a):
b = a.clone().squeeze(-1)
- b.add_(1.)
+ b.add_(1.0)
return a + b
inp = [torch.randn(3, 1, requires_grad=True)]
@@ -439,12 +481,11 @@ class TestAOTAutograd(AOTTestCase):
self.verify_aot_autograd(F(), [x, y], dynamic=False)
self.verify_aot_autograd(F(), [x, y], dynamic=True)
-
-
def test_input_mutation_simple(self):
def f(a):
a.mul_(2)
return a * 3
+
inp = [torch.ones(3, 3, requires_grad=True)]
fw_graph = self.verify_aot_autograd(f, inp, test_mutation=True)
inp = [torch.ones(3, 3, requires_grad=False)]
@@ -454,12 +495,15 @@ class TestAOTAutograd(AOTTestCase):
# but autograd operates above functionalization so we need to manually clone.
# Hopefully backends can optimize this easily.
# - The extra return arg is because the compiled forward returns (mutated inputs + outputs)
- self.assertExpectedInline(fw_graph.code.strip(), """\
+ self.assertExpectedInline(
+ fw_graph.code.strip(),
+ """\
def forward(self, primals_1):
clone = torch.ops.aten.clone.default(primals_1); primals_1 = None
mul = torch.ops.aten.mul.Tensor(clone, 2); clone = None
mul_1 = torch.ops.aten.mul.Tensor(mul, 3)
- return [mul, mul_1]""")
+ return [mul, mul_1]""",
+ )
def test_input_mutation_set__input_mutation(self):
def f(a):
@@ -467,6 +511,7 @@ def forward(self, primals_1):
with torch.no_grad():
a.set_(b)
return a * b
+
inp = [torch.ones(3, 3, requires_grad=True)]
self.verify_aot_autograd(f, inp, test_mutation=True, keep_inp_mutations=True)
inp = [torch.ones(3, 3, requires_grad=False)]
@@ -482,7 +527,11 @@ def forward(self, primals_1):
# Also mutates b_,
a_.view(-1).mul_(2)
return a_ * b_slice
- inp = [torch.ones(3, 3, requires_grad=False), torch.zeros(3, 9, requires_grad=False)]
+
+ inp = [
+ torch.ones(3, 3, requires_grad=False),
+ torch.zeros(3, 9, requires_grad=False),
+ ]
self.verify_aot_autograd(f, inp, keep_inp_mutations=True)
def test_set__and_data_mutation_good(self):
@@ -492,9 +541,18 @@ def forward(self, primals_1):
a.set_(b)
b.mul_(2)
return a + b
- inp = [torch.ones(3, 3, requires_grad=True), torch.ones(3, 3, requires_grad=True)]
- fw_graph = self.verify_aot_autograd(f, inp, test_mutation=True, keep_inp_mutations=True)
- inp = [torch.ones(3, 3, requires_grad=False), torch.zeros(3, 3, requires_grad=False)]
+
+ inp = [
+ torch.ones(3, 3, requires_grad=True),
+ torch.ones(3, 3, requires_grad=True),
+ ]
+ fw_graph = self.verify_aot_autograd(
+ f, inp, test_mutation=True, keep_inp_mutations=True
+ )
+ inp = [
+ torch.ones(3, 3, requires_grad=False),
+ torch.zeros(3, 3, requires_grad=False),
+ ]
self.verify_aot_autograd(f, inp, test_mutation=True, keep_inp_mutations=True)
# Important things to note:
# - "return a.set_(b)" desugars into "return b"
@@ -503,13 +561,16 @@ def forward(self, primals_1):
# a is recorded as both a data mutation and a metadata mutation (due to set_ swapping its storage).
# - the runtime epilogue for a is "a.set_(mul)"
# - the runtime epilogue for b is "b.copy_(mul)"
- self.assertExpectedInline(fw_graph.code.strip(), """\
+ self.assertExpectedInline(
+ fw_graph.code.strip(),
+ """\
def forward(self, primals_1, primals_2):
mul = torch.ops.aten.mul.Tensor(primals_2, 2)
add = torch.ops.aten.add.Tensor(mul, mul)
set_ = torch.ops.aten.set_.source_Tensor(primals_1, mul); primals_1 = None
copy_ = torch.ops.aten.copy_.default(primals_2, mul); primals_2 = mul = None
- return [add]""")
+ return [add]""",
+ )
# This is a (hopefully) extremely rare case that is difficult to handle,
# so we ban it.
@@ -525,9 +586,14 @@ def forward(self, primals_1, primals_2):
# so we won't recognize that this caused an input mutation!
a_view.mul_(2)
return a + tmp
+
inp = [torch.ones(3, 3, requires_grad=True)]
- with self.assertRaisesRegex(RuntimeError, "cannot mutate tensors with frozen storage"):
- self.verify_aot_autograd(f, inp, test_mutation=True, keep_inp_mutations=True)
+ with self.assertRaisesRegex(
+ RuntimeError, "cannot mutate tensors with frozen storage"
+ ):
+ self.verify_aot_autograd(
+ f, inp, test_mutation=True, keep_inp_mutations=True
+ )
def test_set__not_allowed(self):
def f(a, b):
@@ -537,9 +603,17 @@ def forward(self, primals_1, primals_2):
# We currently ban this today, when the input also received a set_() input mutation.
a.mul_(2)
return a + b
- inp = [torch.ones(3, 3, requires_grad=True), torch.ones(3, 3, requires_grad=True)]
- with self.assertRaisesRegex(AssertionError, "prevented us from including it in the graph"):
- fw_graph = self.verify_aot_autograd(f, inp, test_mutation=True, keep_inp_mutations=True)
+
+ inp = [
+ torch.ones(3, 3, requires_grad=True),
+ torch.ones(3, 3, requires_grad=True),
+ ]
+ with self.assertRaisesRegex(
+ AssertionError, "prevented us from including it in the graph"
+ ):
+ fw_graph = self.verify_aot_autograd(
+ f, inp, test_mutation=True, keep_inp_mutations=True
+ )
def test_input_mutation_set__nop(self):
def f(a):
@@ -549,26 +623,33 @@ def forward(self, primals_1, primals_2):
a.set_(b)
a.set_(a_old)
return a + b.reshape(3, 3)
+
inp = [torch.ones(3, 3, requires_grad=True)]
- fw_graph = self.verify_aot_autograd(f, inp, test_mutation=True, keep_inp_mutations=True)
+ fw_graph = self.verify_aot_autograd(
+ f, inp, test_mutation=True, keep_inp_mutations=True
+ )
inp = [torch.ones(3, 3, requires_grad=False)]
self.verify_aot_autograd(f, inp, test_mutation=True, keep_inp_mutations=True)
# Things to note:
# - There are no set_() calls in the graph (we functionalize a.set_(b) into "b")
# - There is only **1** graph output. We properly realized that the two set_() calls
# undo each other, and so effectively no inputs are mutated.
- self.assertExpectedInline(fw_graph.code.strip(), """\
+ self.assertExpectedInline(
+ fw_graph.code.strip(),
+ """\
def forward(self, primals_1):
arange = torch.ops.aten.arange.default(9, dtype = torch.float32, device = device(type='cpu'), pin_memory = False)
alias = torch.ops.aten.alias.default(primals_1); primals_1 = None
view = torch.ops.aten.view.default(arange, [3, 3]); arange = None
add = torch.ops.aten.add.Tensor(alias, view); alias = view = None
- return [add]""")
+ return [add]""",
+ )
def test_input_mutation_simple_with_none_and_nontensor(self):
# Tensor, None, int
def f(a, b, c):
return a * c
+
f_compiled = aot_function(f, nop)
for req_grad in [True, False]:
inp = [torch.ones(3, 3, requires_grad=req_grad), None, 3]
@@ -621,13 +702,13 @@ def forward(self, primals_1):
self.assertEqual(x_ref.grad, x_test.grad)
self.assertEqual(x_ref_view.grad, x_test_view.grad)
-
def test_outputs_are_aliased(self):
# Tensor, None, int
def f(a):
b = a.mul(2)
c = b.view(-1)
return b, c
+
f_compiled = aot_function(f, nop)
for req_grad in [True, False]:
inp = torch.ones(3, requires_grad=req_grad)
@@ -646,15 +727,19 @@ def forward(self, primals_1):
def f(a):
a.mul_(2)
return a
+
inp = [torch.ones(3, 3, requires_grad=True)]
fw_graph = self.verify_aot_autograd(f, inp, test_mutation=True)
inp = [torch.ones(3, 3, requires_grad=False)]
self.verify_aot_autograd(f, inp, test_mutation=True)
- self.assertExpectedInline(fw_graph.code.strip(), """\
+ self.assertExpectedInline(
+ fw_graph.code.strip(),
+ """\
def forward(self, primals_1):
clone = torch.ops.aten.clone.default(primals_1); primals_1 = None
mul = torch.ops.aten.mul.Tensor(clone, 2); clone = None
- return [mul, mul]""")
+ return [mul, mul]""",
+ )
def test_input_mutation_multiple(self):
def f(a, b, c):
@@ -672,7 +757,9 @@ def forward(self, primals_1):
self.verify_aot_autograd(f, create_inp(False), test_mutation=True)
fw_graph = self.verify_aot_autograd(f, create_inp(True), test_mutation=True)
- self.assertExpectedInline(fw_graph.code.strip(), """\
+ self.assertExpectedInline(
+ fw_graph.code.strip(),
+ """\
def forward(self, primals_1, primals_2, primals_3):
clone = torch.ops.aten.clone.default(primals_1); primals_1 = None
clone_1 = torch.ops.aten.clone.default(primals_3); primals_3 = None
@@ -680,25 +767,26 @@ def forward(self, primals_1, primals_2, primals_3):
mul_1 = torch.ops.aten.mul.Tensor(clone_1, 2); clone_1 = None
add = torch.ops.aten.add.Tensor(mul, primals_2); primals_2 = None
add_1 = torch.ops.aten.add.Tensor(add, mul_1); add = None
- return [mul, mul_1, add_1]""")
+ return [mul, mul_1, add_1]""",
+ )
def test_input_mutation_return(self):
def f(a, b):
return torch.sin(a, out=b)
- inp = [
- torch.randn(3, 3),
- torch.ones(3, 3)
- ]
+ inp = [torch.randn(3, 3), torch.ones(3, 3)]
fw_graph = self.verify_aot_autograd(
f, inp, test_mutation=True, keep_inp_mutations=True
)
- self.assertExpectedInline(fw_graph.code.strip(), """\
+ self.assertExpectedInline(
+ fw_graph.code.strip(),
+ """\
def forward(self, arg0_1, arg1_1):
sin = torch.ops.aten.sin.default(arg0_1); arg0_1 = None
copy_ = torch.ops.aten.copy_.default(arg1_1, sin); arg1_1 = sin = None
- return (copy_,)""")
+ return (copy_,)""",
+ )
def test_input_mutation_metadata(self):
def f(a, b):
@@ -715,7 +803,6 @@ def forward(self, arg0_1, arg1_1):
self.verify_aot_autograd(f, create_inp(False), test_mutation=True)
def test_input_output_aliase_custom_autograd_function(self):
-
class Foo(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
@@ -738,6 +825,7 @@ def forward(self, arg0_1, arg1_1):
def f(a):
a.detach().mul_(2)
return a + 3
+
inp = [torch.ones(4, requires_grad=True)]
self.verify_aot_autograd(f, inp, test_mutation=False)
inp = [torch.ones(4, requires_grad=True)]
@@ -751,51 +839,69 @@ def forward(self, arg0_1, arg1_1):
with torch.no_grad():
a_alias.mul_(2)
return a + 1
+
inp = [torch.ones(4, requires_grad=True)]
# The important bit: we detected that the input mutation is safe
# to include **inside** the graph, since it was under no_grad
# (so all we need to do is use mark_dirty() on the input to bump the VC)
- fw_graph = self.verify_aot_autograd(f, inp, test_mutation=True, keep_inp_mutations=True)
- self.assertExpectedInline(fw_graph.code.strip(), """\
+ fw_graph = self.verify_aot_autograd(
+ f, inp, test_mutation=True, keep_inp_mutations=True
+ )
+ self.assertExpectedInline(
+ fw_graph.code.strip(),
+ """\
def forward(self, primals_1):
view = torch.ops.aten.view.default(primals_1, [-1])
mul = torch.ops.aten.mul.Tensor(view, 2); view = None
view_1 = torch.ops.aten.view.default(mul, [4]); mul = None
add = torch.ops.aten.add.Tensor(view_1, 1)
copy_ = torch.ops.aten.copy_.default(primals_1, view_1); primals_1 = view_1 = None
- return [add]""")
+ return [add]""",
+ )
def test_input_mutation_requires_grad_no_grad(self):
def f(a):
with torch.no_grad():
a.mul_(2)
return a + 3
+
inp = [torch.ones(4, requires_grad=True)]
- fw_graph = self.verify_aot_autograd(f, inp, test_mutation=True, keep_inp_mutations=True)
+ fw_graph = self.verify_aot_autograd(
+ f, inp, test_mutation=True, keep_inp_mutations=True
+ )
# Even though the input requires_grad, we expect the keep the input mutation in the graph
# (Even though this is a training graph!)
- self.assertExpectedInline(fw_graph.code.strip(), """\
+ self.assertExpectedInline(
+ fw_graph.code.strip(),
+ """\
def forward(self, primals_1):
mul = torch.ops.aten.mul.Tensor(primals_1, 2)
add = torch.ops.aten.add.Tensor(mul, 3)
copy_ = torch.ops.aten.copy_.default(primals_1, mul); primals_1 = mul = None
- return [add]""")
+ return [add]""",
+ )
def test_input_mutation_requires_grad_no_grad_inference_graph(self):
def f(a):
with torch.no_grad():
a.mul_(2)
return a + 3
+
inp = [torch.ones(4, requires_grad=True)]
# Even though the input requires_grad, we expect the keep the input mutation in the graph
- fw_graph = self.verify_aot_autograd(f, inp, test_mutation=True, keep_inp_mutations=True)
+ fw_graph = self.verify_aot_autograd(
+ f, inp, test_mutation=True, keep_inp_mutations=True
+ )
- self.assertExpectedInline(fw_graph.code.strip(), """\
+ self.assertExpectedInline(
+ fw_graph.code.strip(),
+ """\
def forward(self, arg0_1):
mul = torch.ops.aten.mul.Tensor(arg0_1, 2)
add = torch.ops.aten.add.Tensor(mul, 3)
copy_ = torch.ops.aten.copy_.default(arg0_1, mul); arg0_1 = mul = None
- return (add,)""")
+ return (add,)""",
+ )
def test_input_mutation_requires_grad_no_grad_detach_mixed(self):
# Perform a mix of mutations on a:
@@ -807,6 +913,7 @@ def forward(self, arg0_1):
with torch.no_grad():
a.mul_(4)
return a + 5
+
inp = [torch.ones(4, requires_grad=True)]
fw_graph = self.verify_aot_autograd(f, inp, test_mutation=True)
@@ -815,6 +922,7 @@ def forward(self, arg0_1):
a.transpose_(1, 0)
a.mul_(2)
return a + 1
+
inp = [torch.ones(3, 3, requires_grad=True)]
self.verify_aot_autograd(f, inp, test_mutation=True)
inp = [torch.ones(3, 3, requires_grad=False)]
@@ -826,7 +934,9 @@ def forward(self, arg0_1):
# are *also* saved for backwards.
# This tests that what we save for the backward is actually cloned inputs,
# and not the original inputs that got mutated.
- return torch._native_batch_norm_legit(inpt, weight, bias, running_mean, running_var, True, 0.5, 1e-5)
+ return torch._native_batch_norm_legit(
+ inpt, weight, bias, running_mean, running_var, True, 0.5, 1e-5
+ )
def create_inp(req_grad):
return [
@@ -838,13 +948,20 @@ def forward(self, arg0_1):
]
from torch._decomp import get_decompositions
+
# This simulates what inductor does (running the fw + bw decompositions)
- decompositions = get_decompositions([
- torch.ops.aten._native_batch_norm_legit_functional,
- torch.ops.aten.native_batch_norm_backward,
- ])
- self.verify_aot_autograd(f, create_inp(True), test_mutation=True, decompositions=decompositions)
- self.verify_aot_autograd(f, create_inp(False), test_mutation=True, decompositions=decompositions)
+ decompositions = get_decompositions(
+ [
+ torch.ops.aten._native_batch_norm_legit_functional,
+ torch.ops.aten.native_batch_norm_backward,
+ ]
+ )
+ self.verify_aot_autograd(
+ f, create_inp(True), test_mutation=True, decompositions=decompositions
+ )
+ self.verify_aot_autograd(
+ f, create_inp(False), test_mutation=True, decompositions=decompositions
+ )
def test_batchnorm_inference(self):
inp = [
@@ -876,15 +993,19 @@ def forward(self, arg0_1):
def test_input_output_view_simple(self):
def f(a):
return a.view(-1)
+
inp = [torch.ones(2, 2, requires_grad=False).add(1)]
self.verify_aot_autograd(f, inp, test_mutation=True)
inp = [torch.ones(2, 2, requires_grad=True).add(1)]
fw_graph = self.verify_aot_autograd(f, inp, test_mutation=True)
# Outputs that alias inputs are pulled out of the graph entirely, so we don't compile anything here
- self.assertExpectedInline(fw_graph.code.strip(), """\
+ self.assertExpectedInline(
+ fw_graph.code.strip(),
+ """\
def forward(self, primals_1):
view = torch.ops.aten.view.default(primals_1, [-1]); primals_1 = None
- return [view]""")
+ return [view]""",
+ )
def test_input_output_view_mutate_multiple(self):
def f(a, b, c):
@@ -905,7 +1026,9 @@ def forward(self, primals_1):
# We expect two outputs in the functional graph, a_updated and c_updated.
# The actual aliased outputs themselves aren't in the compiled forward graph;
# Instead, they're generated outside of the graph.
- self.assertExpectedInline(fw_graph.code.strip(), """\
+ self.assertExpectedInline(
+ fw_graph.code.strip(),
+ """\
def forward(self, primals_1, primals_2, primals_3):
clone = torch.ops.aten.clone.default(primals_1); primals_1 = None
clone_1 = torch.ops.aten.clone.default(primals_3); primals_3 = None
@@ -913,7 +1036,8 @@ def forward(self, primals_1, primals_2, primals_3):
mul_1 = torch.ops.aten.mul.Tensor(clone_1, 3); clone_1 = None
view = torch.ops.aten.view.default(primals_2, [2, 2]); primals_2 = None
view_2 = torch.ops.aten.view.default(mul_1, [2, 2])
- return [mul, mul_1, view, view_2]""")
+ return [mul, mul_1, view, view_2]""",
+ )
def test_input_output_view_metadata_mutate_multiple(self):
def f(a, b, c):
@@ -935,7 +1059,9 @@ def forward(self, primals_1, primals_2, primals_3):
# Everything else that does not show up in the graph includes:
# - The metadata mutation on c (we do it outside the graph)
# - All 3 original fw outputs, which are aliases of inputs (we regenerate them outside of the graph)
- self.assertExpectedInline(fw_graph.code.strip(), """\
+ self.assertExpectedInline(
+ fw_graph.code.strip(),
+ """\
def forward(self, primals_1, primals_2, primals_3):
clone = torch.ops.aten.clone.default(primals_2); primals_2 = None
view = torch.ops.aten.view.default(primals_3, [2, 2]); primals_3 = None
@@ -944,12 +1070,14 @@ def forward(self, primals_1, primals_2, primals_3):
view_1 = torch.ops.aten.view.default(primals_1, [2, 2]); primals_1 = None
view_3 = torch.ops.aten.view.default(t, [2, 2])
view_4 = torch.ops.aten.view.default(mul, [2, 2])
- return [mul, t, view_1, view_4, view_3]""")
+ return [mul, t, view_1, view_4, view_3]""",
+ )
def test_input_mutation_and_output_view(self):
def f(a):
a.add_(1)
return a.view(-1)
+
inp = [torch.ones(2, 2, requires_grad=False).add(1)]
self.verify_aot_autograd(f, inp, test_mutation=True)
inp = [torch.ones(2, 2, requires_grad=True).add(1)]
@@ -957,13 +1085,15 @@ def forward(self, primals_1, primals_2, primals_3):
# Here, total # of outputs is 1 because:
# - num_mutated_inps = 1 (a_updated)
# - num_fw_outputs = 0 (the output is an alias of the input, so we move it outside the compiled fw)
- self.assertExpectedInline(fw_graph.code.strip(), """\
+ self.assertExpectedInline(
+ fw_graph.code.strip(),
+ """\
def forward(self, primals_1):
clone = torch.ops.aten.clone.default(primals_1); primals_1 = None
add = torch.ops.aten.add.Tensor(clone, 1); clone = None
view_1 = torch.ops.aten.view.default(add, [-1])
- return [add, view_1]""")
-
+ return [add, view_1]""",
+ )
def test_input_mutation_output_view_multiple(self):
def f(a, b, c, d):
@@ -973,15 +1103,21 @@ def forward(self, primals_1):
def create_inp(req_grad):
return [
- torch.arange(4, requires_grad=req_grad, dtype=torch.float32).view(2, 2).add(1),
- torch.arange(4, requires_grad=req_grad, dtype=torch.float32).view(2, 2).add(1),
+ torch.arange(4, requires_grad=req_grad, dtype=torch.float32)
+ .view(2, 2)
+ .add(1),
+ torch.arange(4, requires_grad=req_grad, dtype=torch.float32)
+ .view(2, 2)
+ .add(1),
torch.ones(2, 2, requires_grad=req_grad).add(1),
torch.ones(2, 2, requires_grad=req_grad).add(1),
]
self.verify_aot_autograd(f, create_inp(False), test_mutation=True)
fw_graph = self.verify_aot_autograd(f, create_inp(True), test_mutation=True)
- self.assertExpectedInline(fw_graph.code.strip(), """\
+ self.assertExpectedInline(
+ fw_graph.code.strip(),
+ """\
def forward(self, primals_1, primals_2, primals_3, primals_4):
view = torch.ops.aten.view.default(primals_2, [2, 2]); primals_2 = None
clone = torch.ops.aten.clone.default(primals_3); primals_3 = None
@@ -990,23 +1126,28 @@ def forward(self, primals_1, primals_2, primals_3, primals_4):
add_1 = torch.ops.aten.add.Tensor(primals_4, 1); primals_4 = None
diagonal = torch.ops.aten.diagonal.default(transpose)
add_2 = torch.ops.aten.add.Tensor(primals_1, add); primals_1 = None
- return [transpose, add, add_1, diagonal, add_2]""")
+ return [transpose, add, add_1, diagonal, add_2]""",
+ )
def test_output_aliases_intermediate_single(self):
def f(a):
out = torch.mul(a, 3)
return out.view(-1)
+
inp = [torch.ones(3, 3, requires_grad=False)]
self.verify_aot_autograd(f, inp, test_mutation=True)
inp = [torch.ones(3, 3, requires_grad=True)]
fw_graph = self.verify_aot_autograd(f, inp, test_mutation=True)
# In AOTAutograd, we are obligated to make the compiled forward directly return `out`,
# and reconstruct `out.view(-1)` as a fresh output.
- self.assertExpectedInline(fw_graph.code.strip(), """\
+ self.assertExpectedInline(
+ fw_graph.code.strip(),
+ """\
def forward(self, primals_1):
mul = torch.ops.aten.mul.Tensor(primals_1, 3); primals_1 = None
view = torch.ops.aten.view.default(mul, [-1]); mul = None
- return [view]""")
+ return [view]""",
+ )
def test_output_aliases_input_multi_output_view_should_raise_autograd_error(self):
def f1(a):
@@ -1018,26 +1159,31 @@ def forward(self, primals_1):
inp2 = torch.ones(3, 3, requires_grad=True).clone()
inp3 = torch.ones(3, 3, requires_grad=True).clone()
- with self.assertRaisesRegex(RuntimeError, "Such functions do not allow the output views"):
+ with self.assertRaisesRegex(
+ RuntimeError, "Such functions do not allow the output views"
+ ):
out_test1 = f1_compiled(inp1)
# This raises a runtime error from autograd in eager mode
out_test1[0].mul_(2)
- with self.assertRaisesRegex(RuntimeError, "Such functions do not allow the output views"):
+ with self.assertRaisesRegex(
+ RuntimeError, "Such functions do not allow the output views"
+ ):
out_test2 = f1_compiled(inp2)
inp2.mul_(2)
# In eager mode, if we mutate a tensor, any multi-output-view aliases
# get their grad_fn replaced with error nodes, so accessing grad_fn should error
grad_fn = out_test2[0].grad_fn
- with self.assertRaisesRegex(RuntimeError, "Such functions do not allow the output views"):
+ with self.assertRaisesRegex(
+ RuntimeError, "Such functions do not allow the output views"
+ ):
out_test3 = f1_compiled(inp3)
out_test1[0].detach().mul_(2)
# The above case also applies to detached aliases (they turn the multi-output-view
# alias's grad_fns into error nodes)
grad_fn = out_test2[0].grad_fn
-
def test_output_aliases_input_multi_output_view(self):
# All aliased outs are from multi-output views, so AOTAutograd will hide the aliasing from autograd.
def f1(a):
@@ -1052,7 +1198,9 @@ def forward(self, primals_1):
# Assert that we get CompiledFunctionBackward in the backward graph,
# and not AsStridedBackward. No view-regeneration necessary for this mult-output view case.
# See Note: [AOTAutograd: differentiable outputs that alias each other from a multi-output view call]
- self.assertTrue(all('CompiledFunctionBackward' in str(o.grad_fn) for o in out_test))
+ self.assertTrue(
+ all("CompiledFunctionBackward" in str(o.grad_fn) for o in out_test)
+ )
sum(out_ref).sum().backward()
sum(out_test).sum().backward()
@@ -1074,7 +1222,7 @@ def forward(self, primals_1):
inp_clone = inp.clone()
out_ref = f3(inp_ref_clone)
out_test = f3_compiled(inp_clone)
- self.assertTrue(all('UnbindBackward' in str(o.grad_fn) for o in out_test[:3]))
+ self.assertTrue(all("UnbindBackward" in str(o.grad_fn) for o in out_test[:3]))
# The last output is not from a multi-output view, so autograd will let us mutate it.
out_ref[-1].mul_(2)
@@ -1087,7 +1235,6 @@ def forward(self, primals_1):
(inp + out_test[-1]).sum().backward()
self.assertEqual(inp_ref.grad, inp.grad)
-
def test_output_aliases_intermediate_multi_output_view(self):
# All aliased outs are from multi-output views, so AOTAutograd will hide the aliasing from autograd.
def f1(a):
@@ -1103,7 +1250,9 @@ def forward(self, primals_1):
# Assert that we get CompiledFunctionBackward in the backward graph,
# and not AsStridedBackward. No view-regeneration necessary for this mult-output view case.
# See Note: [AOTAutograd: differentiable outputs that alias each other from a multi-output view call]
- self.assertTrue(all('CompiledFunctionBackward' in str(o.grad_fn) for o in out_test))
+ self.assertTrue(
+ all("CompiledFunctionBackward" in str(o.grad_fn) for o in out_test)
+ )
sum(out_ref).sum().backward()
sum(out_test).sum().backward()
@@ -1123,7 +1272,9 @@ def forward(self, primals_1):
# Assert that we get CompiledFunctionBackward in the backward graph,
# and not AsStridedBackward. No view-regeneration necessary for this mult-output view case.
# See Note: [AOTAutograd: differentiable outputs that alias each other from a multi-output view call]
- self.assertTrue(all('CompiledFunctionBackward' in str(o.grad_fn) for o in out_test))
+ self.assertTrue(
+ all("CompiledFunctionBackward" in str(o.grad_fn) for o in out_test)
+ )
# The last output is not from a multi-output view, so autograd will let us mutate it.
out_ref[-1].mul_(2)
@@ -1146,7 +1297,9 @@ def forward(self, primals_1):
# Assert that we get CompiledFunctionBackward in the backward graph,
# and not AsStridedBackward. No view-regeneration necessary for this mult-output view case.
# See Note: [AOTAutograd: differentiable outputs that alias each other from a multi-output view call]
- self.assertTrue(all('CompiledFunctionBackward' in str(o.grad_fn) for o in out_test))
+ self.assertTrue(
+ all("CompiledFunctionBackward" in str(o.grad_fn) for o in out_test)
+ )
# The last output is not from a multi-output view, so autograd will let us mutate it.
out_ref[-1].mul_(2)
@@ -1191,7 +1344,6 @@ def forward(self, primals_1):
out_test_sum.sum().backward()
self.assertEqual(inp_ref.grad, inp.grad)
-
def test_output_aliases_intermediate_mutation_linear(self):
def f(x):
return (x + 1).view(-1)
@@ -1199,6 +1351,7 @@ def forward(self, primals_1):
inp = [torch.ones(3, 3, requires_grad=True)]
# use inductor's decomps (which will e.g. turn _unsafe_view() into view())
from torch._inductor.decomposition import decompositions
+
f_compiled = aot_function(f, nop, decompositions=decompositions)
out_ref = f(*inp)
@@ -1213,6 +1366,7 @@ def forward(self, primals_1):
out = torch.mul(a, 3)
# First output is an alias of an intermediate that doesn't require grad
return out.view(-1), b.add(1)
+
inp = [torch.ones(3, 3), torch.ones(3, 3, requires_grad=False)]
self.verify_aot_autograd(f, inp, test_mutation=True)
inp = [torch.ones(3, 3), torch.ones(3, 3, requires_grad=True)]
@@ -1220,18 +1374,22 @@ def forward(self, primals_1):
# important bit: we don't bother generating an intermediate base as an output in the graph,
# because the intermediate base itself didn't require gradients.
# (the only problematic case is when both the base and the aliasesed output require gradients).
- self.assertExpectedInline(fw_graph.code.strip(), """\
+ self.assertExpectedInline(
+ fw_graph.code.strip(),
+ """\
def forward(self, primals_1, primals_2):
mul = torch.ops.aten.mul.Tensor(primals_1, 3); primals_1 = None
view = torch.ops.aten.view.default(mul, [-1]); mul = None
add = torch.ops.aten.add.Tensor(primals_2, 1); primals_2 = None
- return [view, add]""")
+ return [view, add]""",
+ )
def test_output_aliases_intermediate_returned_multiple_times(self):
def f(a):
out = torch.mul(a, 3)
out_view = out.view(-1)
return out, out_view, out
+
inp = [torch.ones(3, 3, requires_grad=False)]
self.verify_aot_autograd(f, inp, test_mutation=True)
inp = [torch.ones(3, 3, requires_grad=True)]
@@ -1242,16 +1400,20 @@ def forward(self, primals_1, primals_2):
out = torch.mul(a, 3)
# AOTAutograd should manually generate these two output views in the epilogue.
return out.view(-1), out.view(-1)
+
inp = [torch.ones(3, 3, requires_grad=False)]
self.verify_aot_autograd(f, inp, test_mutation=True)
inp = [torch.ones(3, 3, requires_grad=True)]
fw_graph = self.verify_aot_autograd(f, inp, test_mutation=True)
- self.assertExpectedInline(fw_graph.code.strip(), """\
+ self.assertExpectedInline(
+ fw_graph.code.strip(),
+ """\
def forward(self, primals_1):
mul = torch.ops.aten.mul.Tensor(primals_1, 3); primals_1 = None
view = torch.ops.aten.view.default(mul, [-1])
view_1 = torch.ops.aten.view.default(mul, [-1])
- return [view, view_1, mul]""")
+ return [view, view_1, mul]""",
+ )
def test_output_aliases_intermediate_and_returned(self):
def f(a):
@@ -1259,15 +1421,19 @@ def forward(self, primals_1):
# AOTAutograd should manually generate the first output (a view of an intermediate)
# but not the second (which is itself the intermediate for the first)
return out.view(-1), out
+
inp = [torch.ones(3, 3, requires_grad=False)]
self.verify_aot_autograd(f, inp, test_mutation=True)
inp = [torch.ones(3, 3, requires_grad=True)]
fw_graph = self.verify_aot_autograd(f, inp, test_mutation=True)
- self.assertExpectedInline(fw_graph.code.strip(), """\
+ self.assertExpectedInline(
+ fw_graph.code.strip(),
+ """\
def forward(self, primals_1):
mul = torch.ops.aten.mul.Tensor(primals_1, 3); primals_1 = None
view = torch.ops.aten.view.default(mul, [-1])
- return [view, mul]""")
+ return [view, mul]""",
+ )
def test_output_aliases_intermediate_and_returned_flipped(self):
def f(a):
@@ -1275,15 +1441,19 @@ def forward(self, primals_1):
# AOTAutograd should manually generate the first output (a view of an intermediate)
# but not the second (which is itself the intermediate for the first)
return out, out.view(-1)
+
inp = [torch.ones(3, 3, requires_grad=False)]
self.verify_aot_autograd(f, inp, test_mutation=True)
inp = [torch.ones(3, 3, requires_grad=True)]
fw_graph = self.verify_aot_autograd(f, inp, test_mutation=True)
- self.assertExpectedInline(fw_graph.code.strip(), """\
+ self.assertExpectedInline(
+ fw_graph.code.strip(),
+ """\
def forward(self, primals_1):
mul = torch.ops.aten.mul.Tensor(primals_1, 3); primals_1 = None
view = torch.ops.aten.view.default(mul, [-1])
- return [mul, view]""")
+ return [mul, view]""",
+ )
def test_output_aliases_intermediate_and_returned_different_grad(self):
def f(a):
@@ -1291,11 +1461,14 @@ def forward(self, primals_1):
# AOTAutograd should manually generate the first output (a view of an intermediate)
# but not the second (which is itself the intermediate for the first)
return out.view(-1), out, out[0].detach()
+
inp = [torch.ones(3, 3, requires_grad=False)]
self.verify_aot_autograd(f, inp, test_mutation=True)
inp = [torch.ones(3, 3, requires_grad=True)]
fw_graph = self.verify_aot_autograd(f, inp, test_mutation=True)
- self.assertExpectedInline(fw_graph.code.strip(), """\
+ self.assertExpectedInline(
+ fw_graph.code.strip(),
+ """\
def forward(self, primals_1):
mul = torch.ops.aten.mul.Tensor(primals_1, 3); primals_1 = None
view = torch.ops.aten.view.default(mul, [-1])
@@ -1303,13 +1476,15 @@ def forward(self, primals_1):
detach = torch.ops.aten.detach.default(select); select = None
detach_1 = torch.ops.aten.detach.default(detach); detach = None
detach_2 = torch.ops.aten.detach.default(detach_1); detach_1 = None
- return [view, mul, detach_2]""")
+ return [view, mul, detach_2]""",
+ )
def test_output_aliases_intermediate_inplace_view(self):
def f(a):
out = torch.mul(a, 3)
out.t_()
return out
+
inp = [torch.ones(2, 4, requires_grad=True)]
# TODO: fix this test.
@@ -1325,17 +1500,20 @@ def forward(self, primals_1):
# `out` will show up as having OutputType.non_alias,
# and ._is_view() == False
return out, a + 1
+
inp = [torch.ones(2, 4, requires_grad=False)]
self.verify_aot_autograd(f, inp, test_mutation=True)
inp = [torch.ones(2, 4, requires_grad=True)]
fw_graph = self.verify_aot_autograd(f, inp, test_mutation=True)
- self.assertExpectedInline(fw_graph.code.strip(), """\
+ self.assertExpectedInline(
+ fw_graph.code.strip(),
+ """\
def forward(self, primals_1):
mul = torch.ops.aten.mul.Tensor(primals_1, 3)
t = torch.ops.aten.t.default(mul); mul = None
add = torch.ops.aten.add.Tensor(primals_1, 1); primals_1 = None
- return [t, add]""")
-
+ return [t, add]""",
+ )
def test_output_aliases_intermediate_inplace_view_and_view(self):
def f(a):
@@ -1344,6 +1522,7 @@ def forward(self, primals_1):
out.t_()
out_view2 = out.unsqueeze(0)
return out_view, out, out_view2
+
inp = [torch.ones(2, 4, requires_grad=True)]
# TODO: fix this test.
@@ -1356,18 +1535,22 @@ def forward(self, primals_1):
out2 = torch.mul(a, 4)
# AOTAutograd should manually generate these two output views in the epilogue.
return out1.view(-1), out2.transpose(1, 0), out1.transpose(1, 0)
+
inp = [torch.ones(3, 3, requires_grad=False)]
self.verify_aot_autograd(f, inp, test_mutation=True)
inp = [torch.ones(3, 3, requires_grad=True)]
fw_graph = self.verify_aot_autograd(f, inp, test_mutation=True)
- self.assertExpectedInline(fw_graph.code.strip(), """\
+ self.assertExpectedInline(
+ fw_graph.code.strip(),
+ """\
def forward(self, primals_1):
mul = torch.ops.aten.mul.Tensor(primals_1, 3)
mul_1 = torch.ops.aten.mul.Tensor(primals_1, 4); primals_1 = None
view = torch.ops.aten.view.default(mul, [-1])
transpose = torch.ops.aten.transpose.int(mul_1, 1, 0); mul_1 = None
transpose_1 = torch.ops.aten.transpose.int(mul, 1, 0)
- return [view, transpose, transpose_1, mul]""")
+ return [view, transpose, transpose_1, mul]""",
+ )
def test_output_all_alias_types(self):
# There are 3 types of aliasing that require us to return metadata in the compiled fw:
@@ -1384,11 +1567,17 @@ def forward(self, primals_1):
x = torch.ones(1, 2, 4, requires_grad=req_grad).clone()
return [(x,), (x,)]
- self.verify_aot_autograd(f, partial(inp_callable, req_grad=False), test_mutation=True)
- fw_graph = self.verify_aot_autograd(f, partial(inp_callable, req_grad=True), test_mutation=True)
+ self.verify_aot_autograd(
+ f, partial(inp_callable, req_grad=False), test_mutation=True
+ )
+ fw_graph = self.verify_aot_autograd(
+ f, partial(inp_callable, req_grad=True), test_mutation=True
+ )
# TODO: make this test run with dynamic shapes so it is more meaningful
# metadata output order: (a_updated_meta, out1_meta, out2_meta, out3_meta)
- self.assertExpectedInline(fw_graph.code.strip(), """\
+ self.assertExpectedInline(
+ fw_graph.code.strip(),
+ """\
def forward(self, primals_1):
view = torch.ops.aten.view.default(primals_1, [1, 2, 4]); primals_1 = None
transpose = torch.ops.aten.transpose.int(view, 1, 0); view = None
@@ -1396,7 +1585,8 @@ def forward(self, primals_1):
squeeze = torch.ops.aten.squeeze.default(mul)
transpose_1 = torch.ops.aten.transpose.int(mul, 1, 0)
unsqueeze = torch.ops.aten.unsqueeze.default(transpose, 0)
- return [transpose, squeeze, transpose_1, unsqueeze, mul]""")
+ return [transpose, squeeze, transpose_1, unsqueeze, mul]""",
+ )
@parametrize("req_grad", [False, True])
def test_subclass_metadata_mutation(self, req_grad):
@@ -1410,19 +1600,30 @@ def forward(self, primals_1):
return [(x,), (x,)]
# See https://github.com/pytorch/pytorch/issues/114975
- with self.assertRaisesRegex(RuntimeError, "Metadata mutations are currently not allowed on tensor subclasses"):
- self.verify_aot_autograd(f, partial(inp_callable, req_grad=req_grad), test_mutation=True, make_inputs_subclasses=True)
+ with self.assertRaisesRegex(
+ RuntimeError,
+ "Metadata mutations are currently not allowed on tensor subclasses",
+ ):
+ self.verify_aot_autograd(
+ f,
+ partial(inp_callable, req_grad=req_grad),
+ test_mutation=True,
+ make_inputs_subclasses=True,
+ )
def test_input_data_and_metadata_mutation(self):
def f(a):
a.t_()
a[0].mul_(2)
return a.view(a.shape)
+
inp = [torch.ones(3, 3, requires_grad=False)]
self.verify_aot_autograd(f, inp, test_mutation=True)
inp = [torch.ones(3, 3, requires_grad=True)]
fw_graph = self.verify_aot_autograd(f, inp, test_mutation=True)
- self.assertExpectedInline(fw_graph.code.strip(), """\
+ self.assertExpectedInline(
+ fw_graph.code.strip(),
+ """\
def forward(self, primals_1):
clone = torch.ops.aten.clone.default(primals_1); primals_1 = None
t = torch.ops.aten.t.default(clone)
@@ -1434,7 +1635,8 @@ def forward(self, primals_1):
t_4 = torch.ops.aten.t.default(t_2)
t_6 = torch.ops.aten.t.default(t_2); t_2 = None
view_1 = torch.ops.aten.view.default(t_6, [3, 3]); t_6 = None
- return [t_4, view_1]""")
+ return [t_4, view_1]""",
+ )
def test_view_and_inplace_view(self):
def f(a, b):
@@ -1444,24 +1646,28 @@ def forward(self, primals_1):
def create_inp(req_grad):
return [
torch.ones(3, 3, requires_grad=req_grad),
- torch.ones(3, 3, requires_grad=req_grad)
+ torch.ones(3, 3, requires_grad=req_grad),
]
self.verify_aot_autograd(f, create_inp(False), test_mutation=True)
fw_graph = self.verify_aot_autograd(f, create_inp(True), test_mutation=True)
- self.assertExpectedInline(fw_graph.code.strip(), """\
+ self.assertExpectedInline(
+ fw_graph.code.strip(),
+ """\
def forward(self, primals_1, primals_2):
view = torch.ops.aten.view.default(primals_1, [3, 3]); primals_1 = None
t = torch.ops.aten.t.default(view); view = None
view_1 = torch.ops.aten.view.default(primals_2, [3, 3]); primals_2 = None
view_2 = torch.ops.aten.view.default(t, [3, 3])
- return [t, view_1, view_2]""")
+ return [t, view_1, view_2]""",
+ )
def test_view_detach(self):
def f(a):
tmp = a.detach()
a.mul_(2)
return a, tmp
+
inp = [torch.ones(3, 3, requires_grad=True)]
self.verify_aot_autograd(f, inp, test_mutation=True)
inp = [torch.ones(3, 3, requires_grad=False)]
@@ -1471,6 +1677,7 @@ def forward(self, primals_1, primals_2):
def f(a, b):
a.requires_grad_(True)
return a.mul(3), b.mul(4)
+
inp = [
# First inp doesnt require grad, but we switch it on
torch.ones(3, 3, requires_grad=False),
@@ -1478,11 +1685,14 @@ def forward(self, primals_1, primals_2):
]
fw_graph = self.verify_aot_autograd(f, inp, test_mutation=True)
- self.assertExpectedInline(fw_graph.code.strip(), """\
+ self.assertExpectedInline(
+ fw_graph.code.strip(),
+ """\
def forward(self, primals_1, primals_2):
mul = torch.ops.aten.mul.Tensor(primals_1, 3); primals_1 = None
mul_1 = torch.ops.aten.mul.Tensor(primals_2, 4); primals_2 = None
- return [mul, mul_1]""")
+ return [mul, mul_1]""",
+ )
# This is a torture test:
# a and b get turned into a synthetic base in the compiled graph
@@ -1504,12 +1714,32 @@ def forward(self, primals_1, primals_2):
inp2 = x[0]
return [base], [inp1, inp2]
- self.verify_aot_autograd(f, partial(inp_callable, req_grad=False), test_mutation=True)
- self.verify_aot_autograd(f, partial(inp_callable, req_grad=True), test_mutation=True)
- with self.assertRaisesRegex(RuntimeError, "Encountered aliased inputs that are mutated in the graph, but"):
- self.verify_aot_autograd(f, partial(inp_callable, req_grad=False), test_mutation=True, make_inputs_subclasses=True)
- with self.assertRaisesRegex(RuntimeError, "Encountered aliased inputs that are mutated in the graph, but"):
- self.verify_aot_autograd(f, partial(inp_callable, req_grad=True), test_mutation=True, make_inputs_subclasses=True)
+ self.verify_aot_autograd(
+ f, partial(inp_callable, req_grad=False), test_mutation=True
+ )
+ self.verify_aot_autograd(
+ f, partial(inp_callable, req_grad=True), test_mutation=True
+ )
+ with self.assertRaisesRegex(
+ RuntimeError,
+ "Encountered aliased inputs that are mutated in the graph, but",
+ ):
+ self.verify_aot_autograd(
+ f,
+ partial(inp_callable, req_grad=False),
+ test_mutation=True,
+ make_inputs_subclasses=True,
+ )
+ with self.assertRaisesRegex(
+ RuntimeError,
+ "Encountered aliased inputs that are mutated in the graph, but",
+ ):
+ self.verify_aot_autograd(
+ f,
+ partial(inp_callable, req_grad=True),
+ test_mutation=True,
+ make_inputs_subclasses=True,
+ )
# https://github.com/pytorch/pytorch/issues/106456
def test_input_mutation_noncontiguous(self):
@@ -1524,18 +1754,32 @@ def forward(self, primals_1, primals_2):
inp = x[:, 0]
return [base], [inp]
- self.verify_aot_autograd(f, partial(inp_callable, req_grad=False), test_mutation=True)
- self.verify_aot_autograd(f, partial(inp_callable, req_grad=True), test_mutation=True)
+ self.verify_aot_autograd(
+ f, partial(inp_callable, req_grad=False), test_mutation=True
+ )
+ self.verify_aot_autograd(
+ f, partial(inp_callable, req_grad=True), test_mutation=True
+ )
with self.assertRaisesRegex(
RuntimeError,
- "Mutations on non-contiguous inputs are currently not allowed on tensor subclasses"
+ "Mutations on non-contiguous inputs are currently not allowed on tensor subclasses",
):
- self.verify_aot_autograd(f, partial(inp_callable, req_grad=False), test_mutation=True, make_inputs_subclasses=True)
+ self.verify_aot_autograd(
+ f,
+ partial(inp_callable, req_grad=False),
+ test_mutation=True,
+ make_inputs_subclasses=True,
+ )
with self.assertRaisesRegex(
RuntimeError,
- "Mutations on non-contiguous inputs are currently not allowed on tensor subclasses"
+ "Mutations on non-contiguous inputs are currently not allowed on tensor subclasses",
):
- self.verify_aot_autograd(f, partial(inp_callable, req_grad=True), test_mutation=True, make_inputs_subclasses=True)
+ self.verify_aot_autograd(
+ f,
+ partial(inp_callable, req_grad=True),
+ test_mutation=True,
+ make_inputs_subclasses=True,
+ )
# Mutations in the backward are allowed as long as the mutated object does not require grad
def test_backward_mutation_data(self):
@@ -1547,7 +1791,7 @@ def forward(self, primals_1, primals_2):
@staticmethod
def backward(ctx, grad_output):
- x, = ctx.saved_tensors
+ (x,) = ctx.saved_tensors
# bw mutation
x.mul_(2)
return grad_output.clone()
@@ -1568,7 +1812,9 @@ def forward(self, primals_1, primals_2):
torch.ones(3, 3, requires_grad=True),
torch.ones(3, 3, requires_grad=True),
]
- with self.assertRaisesRegex(AssertionError, "input that requires_grad and was mutated in the backward"):
+ with self.assertRaisesRegex(
+ AssertionError, "input that requires_grad and was mutated in the backward"
+ ):
self.verify_aot_autograd(f, inp_grad, test_mutation=True)
def test_backward_mutation_metadata(self):
@@ -1580,7 +1826,7 @@ def forward(self, primals_1, primals_2):
@staticmethod
def backward(ctx, grad_a, grad_b):
- b, = ctx.saved_tensors
+ (b,) = ctx.saved_tensors
# bw metadata mutation
b.transpose_(1, 0)
return grad_a.clone(), grad_b.clone()
@@ -1595,7 +1841,9 @@ def forward(self, primals_1, primals_2):
torch.ones(3, 3, requires_grad=False),
]
- with self.assertRaisesRegex(AssertionError, "input that had its metadata mutated in the backward"):
+ with self.assertRaisesRegex(
+ AssertionError, "input that had its metadata mutated in the backward"
+ ):
self.verify_aot_autograd(f, inp_no_grad, test_mutation=True)
def test_backward_mutation_on_grad_out(self):
@@ -1619,7 +1867,9 @@ def forward(self, primals_1, primals_2):
torch.ones(3, 3, requires_grad=True),
]
f_compiled = aot_function(f, nop)
- with self.assertRaisesRegex(AssertionError, "input to the backward that was mutated during the backward"):
+ with self.assertRaisesRegex(
+ AssertionError, "input to the backward that was mutated during the backward"
+ ):
out = f_compiled(*inp_grad)
# Partially addresses https://github.com/pytorch/pytorch/issues/106457
@@ -1638,17 +1888,36 @@ def forward(self, primals_1, primals_2):
b = x[2:4]
return [base], [a, b]
- fw_graph = self.verify_aot_autograd(f, partial(inp_callable1, req_grad=False), test_mutation=True)
- self.verify_aot_autograd(f, partial(inp_callable1, req_grad=True), test_mutation=True)
- self.verify_aot_autograd(f, partial(inp_callable1, req_grad=False), test_mutation=True, make_inputs_subclasses=True)
+ fw_graph = self.verify_aot_autograd(
+ f, partial(inp_callable1, req_grad=False), test_mutation=True
+ )
+ self.verify_aot_autograd(
+ f, partial(inp_callable1, req_grad=True), test_mutation=True
+ )
+ self.verify_aot_autograd(
+ f,
+ partial(inp_callable1, req_grad=False),
+ test_mutation=True,
+ make_inputs_subclasses=True,
+ )
# Input mutations on subclasses with training graphs fail backward guards today.
- with self.assertRaisesRegex(AssertionError, "attempted to compile the backward with incorrect subclass metadata"):
- self.verify_aot_autograd(f, partial(inp_callable1, req_grad=True), test_mutation=True, make_inputs_subclasses=True)
+ with self.assertRaisesRegex(
+ AssertionError,
+ "attempted to compile the backward with incorrect subclass metadata",
+ ):
+ self.verify_aot_autograd(
+ f,
+ partial(inp_callable1, req_grad=True),
+ test_mutation=True,
+ make_inputs_subclasses=True,
+ )
# Important characteristic: the graph takes in 2 inputs!
# That shows that we didn't try to run our complicated synthetic base logic,
# because we successfully detected false aliasing across the two inputs.
- self.assertExpectedInline(fw_graph.code.strip(), """\
+ self.assertExpectedInline(
+ fw_graph.code.strip(),
+ """\
def forward(self, arg0_1, arg1_1):
mul = torch.ops.aten.mul.Tensor(arg0_1, 3); arg0_1 = None
mul_1 = torch.ops.aten.mul.Tensor(arg1_1, 2); arg1_1 = None
@@ -1657,7 +1926,8 @@ def forward(self, arg0_1, arg1_1):
clone_1 = torch.ops.aten.clone.default(mul_1)
view_1 = torch.ops.aten.view.default(clone_1, [-1]); clone_1 = None
add = torch.ops.aten.add.Tensor(view, view_1); view = view_1 = None
- return (mul, mul_1, add)""")
+ return (mul, mul_1, add)""",
+ )
# No overlap, non-contiguous: first tensor ends before second tensor start
def inp_callable2(req_grad):
@@ -1698,7 +1968,7 @@ def forward(self, arg0_1, arg1_1):
# a's last element is at offset 195 (24 total elements)
a = x.as_strided((2, 4, 3), (110, 24, 4), storage_offset=5)
# b's first element is at offset 196: no overlap
- b = x[196:196 + a.numel()]
+ b = x[196 : 196 + a.numel()]
return [base], [a, b]
# overlap! non-contiguous
@@ -1724,17 +1994,31 @@ def forward(self, arg0_1, arg1_1):
# a's last element is at offset 195 (24 total elements)
a = x.as_strided((2, 4, 3), (110, 24, 4), storage_offset=5)
# b's first element is at offset 195: overlap!
- b = x[195:195 + a.numel()]
+ b = x[195 : 195 + a.numel()]
return [base], [a, b]
- fw_graph2 = self.verify_aot_autograd(f, partial(inp_callable2, req_grad=False), test_mutation=True)
- fw_graph3 = self.verify_aot_autograd(f, partial(inp_callable3, req_grad=False), test_mutation=True)
- fw_graph4 = self.verify_aot_autograd(f, partial(inp_callable4, req_grad=False), test_mutation=True)
- fw_graph5 = self.verify_aot_autograd(f, partial(inp_callable5, req_grad=False), test_mutation=True)
- fw_graph6 = self.verify_aot_autograd(f, partial(inp_callable6, req_grad=False), test_mutation=True)
+ fw_graph2 = self.verify_aot_autograd(
+ f, partial(inp_callable2, req_grad=False), test_mutation=True
+ )
+ fw_graph3 = self.verify_aot_autograd(
+ f, partial(inp_callable3, req_grad=False), test_mutation=True
+ )
+ fw_graph4 = self.verify_aot_autograd(
+ f, partial(inp_callable4, req_grad=False), test_mutation=True
+ )
+ fw_graph5 = self.verify_aot_autograd(
+ f, partial(inp_callable5, req_grad=False), test_mutation=True
+ )
+ fw_graph6 = self.verify_aot_autograd(
+ f, partial(inp_callable6, req_grad=False), test_mutation=True
+ )
- fw_graph_overlap1 = self.verify_aot_autograd(f, partial(inp_callable_overlap2, req_grad=False), test_mutation=True)
- fw_graph_overlap2 = self.verify_aot_autograd(f, partial(inp_callable_overlap1, req_grad=False), test_mutation=True)
+ fw_graph_overlap1 = self.verify_aot_autograd(
+ f, partial(inp_callable_overlap2, req_grad=False), test_mutation=True
+ )
+ fw_graph_overlap2 = self.verify_aot_autograd(
+ f, partial(inp_callable_overlap1, req_grad=False), test_mutation=True
+ )
# All non-overlap graphs should be the same since we detected false aliasing
self.assertEqual(str(fw_graph.code), str(fw_graph2.code))
@@ -1746,9 +2030,8 @@ def forward(self, arg0_1, arg1_1):
# All overlap graphs should be the same since we detected real aliasing
self.assertNotEqual(str(fw_graph.code), str(fw_graph_overlap1.code))
self.assertNotEqual(str(fw_graph.code), str(fw_graph_overlap2.code))
- self.assertTrue('as_strided_scatter' in str(fw_graph_overlap1.code))
- self.assertTrue('as_strided_scatter' in str(fw_graph_overlap2.code))
-
+ self.assertTrue("as_strided_scatter" in str(fw_graph_overlap1.code))
+ self.assertTrue("as_strided_scatter" in str(fw_graph_overlap2.code))
@unittest.skipIf(not torch.cuda.is_available(), "CUDA is unavailable")
def test_mem_leak_from_save_for_bw(self):
@@ -1770,8 +2053,8 @@ def forward(self, arg0_1, arg1_1):
f_compiled = aot_function(f, nop)
inps = [
- torch.ones(8, 8, device='cuda', requires_grad=True),
- torch.ones(1, 4, 1, device='cuda', requires_grad=True),
+ torch.ones(8, 8, device="cuda", requires_grad=True),
+ torch.ones(1, 4, 1, device="cuda", requires_grad=True),
]
mem_before = torch.cuda.memory_allocated()
f_compiled(*inps)
@@ -1792,10 +2075,24 @@ def forward(self, arg0_1, arg1_1):
inp2 = x[0]
return [base], [inp1, inp2]
- self.verify_aot_autograd(f, partial(inp_callable, req_grad=False), test_mutation=True)
- self.verify_aot_autograd(f, partial(inp_callable, req_grad=True), test_mutation=True)
- self.verify_aot_autograd(f, partial(inp_callable, req_grad=False), test_mutation=True, make_inputs_subclasses=True)
- self.verify_aot_autograd(f, partial(inp_callable, req_grad=True), test_mutation=True, make_inputs_subclasses=True)
+ self.verify_aot_autograd(
+ f, partial(inp_callable, req_grad=False), test_mutation=True
+ )
+ self.verify_aot_autograd(
+ f, partial(inp_callable, req_grad=True), test_mutation=True
+ )
+ self.verify_aot_autograd(
+ f,
+ partial(inp_callable, req_grad=False),
+ test_mutation=True,
+ make_inputs_subclasses=True,
+ )
+ self.verify_aot_autograd(
+ f,
+ partial(inp_callable, req_grad=True),
+ test_mutation=True,
+ make_inputs_subclasses=True,
+ )
def test_input_mutation_aliases_other_input(self):
def f(a, b):
@@ -1810,13 +2107,19 @@ def forward(self, arg0_1, arg1_1):
inp2 = x[0]
return [base], [inp1, inp2]
- self.verify_aot_autograd(f, partial(inp_callable, req_grad=False), test_mutation=True)
- fw_graph = self.verify_aot_autograd(f, partial(inp_callable, req_grad=True), test_mutation=True)
+ self.verify_aot_autograd(
+ f, partial(inp_callable, req_grad=False), test_mutation=True
+ )
+ fw_graph = self.verify_aot_autograd(
+ f, partial(inp_callable, req_grad=True), test_mutation=True
+ )
# Important parts of the graph:
# - the compiled graph takes in a base, and we generate a and b (the views) off of the base
# - clone() is still in the graph, because we need to call grad() on the original (non-mutated) inputs
# - We re-generate the views *after* the clone, to preserve view relationships.
- self.assertExpectedInline(fw_graph.code.strip(), """\
+ self.assertExpectedInline(
+ fw_graph.code.strip(),
+ """\
def forward(self, primals_1):
clone = torch.ops.aten.clone.default(primals_1); primals_1 = None
as_strided = torch.ops.aten.as_strided.default(clone, [2], [1], 0)
@@ -1825,7 +2128,8 @@ def forward(self, primals_1):
as_strided_2 = torch.ops.aten.as_strided.default(as_strided_scatter, [2], [1], 0)
as_strided_5 = torch.ops.aten.as_strided.default(as_strided_scatter, [2], [1], 0)
add_1 = torch.ops.aten.add.Tensor(as_strided_2, as_strided_5); as_strided_2 = as_strided_5 = None
- return [as_strided_scatter, add_1]""") # noqa: B950
+ return [as_strided_scatter, add_1]""",
+ ) # noqa: B950
def test_input_mutation_aliases_other_input2(self):
def f(a, b):
@@ -1840,9 +2144,15 @@ def forward(self, primals_1):
inp2 = x
return [base], [inp1, inp2]
- self.verify_aot_autograd(f, partial(inp_callable, req_grad=False), test_mutation=True)
- fw_graph = self.verify_aot_autograd(f, partial(inp_callable, req_grad=True), test_mutation=True)
- self.assertExpectedInline(fw_graph.code.strip(), """\
+ self.verify_aot_autograd(
+ f, partial(inp_callable, req_grad=False), test_mutation=True
+ )
+ fw_graph = self.verify_aot_autograd(
+ f, partial(inp_callable, req_grad=True), test_mutation=True
+ )
+ self.assertExpectedInline(
+ fw_graph.code.strip(),
+ """\
def forward(self, primals_1):
clone = torch.ops.aten.clone.default(primals_1); primals_1 = None
as_strided = torch.ops.aten.as_strided.default(clone, [2], [1], 0)
@@ -1851,7 +2161,8 @@ def forward(self, primals_1):
as_strided_2 = torch.ops.aten.as_strided.default(as_strided_scatter, [2], [1], 0)
as_strided_5 = torch.ops.aten.as_strided.default(as_strided_scatter, [2, 2], [2, 1], 0)
add_1 = torch.ops.aten.add.Tensor(as_strided_2, as_strided_5); as_strided_2 = as_strided_5 = None
- return [as_strided_scatter, add_1]""") # noqa: B950
+ return [as_strided_scatter, add_1]""",
+ ) # noqa: B950
def test_input_mutation_aliases_and_output_alias(self):
def f(a, b):
@@ -1865,9 +2176,15 @@ def forward(self, primals_1):
x = base.add(1)
return [base], [x.view(-1), x.view(-1)]
- self.verify_aot_autograd(f, partial(inp_callable, req_grad=False), test_mutation=True)
- fw_graph = self.verify_aot_autograd(f, partial(inp_callable, req_grad=True), test_mutation=True)
- self.assertExpectedInline(fw_graph.code.strip(), """\
+ self.verify_aot_autograd(
+ f, partial(inp_callable, req_grad=False), test_mutation=True
+ )
+ fw_graph = self.verify_aot_autograd(
+ f, partial(inp_callable, req_grad=True), test_mutation=True
+ )
+ self.assertExpectedInline(
+ fw_graph.code.strip(),
+ """\
def forward(self, primals_1):
clone = torch.ops.aten.clone.default(primals_1); primals_1 = None
as_strided = torch.ops.aten.as_strided.default(clone, [4], [1], 0)
@@ -1875,7 +2192,8 @@ def forward(self, primals_1):
as_strided_scatter = torch.ops.aten.as_strided_scatter.default(clone, add, [4], [1], 0); clone = add = None
as_strided_8 = torch.ops.aten.as_strided.default(as_strided_scatter, [4], [1], 0)
view_1 = torch.ops.aten.view.default(as_strided_8, [4]); as_strided_8 = None
- return [as_strided_scatter, view_1]""") # noqa: B950
+ return [as_strided_scatter, view_1]""",
+ ) # noqa: B950
def test_input_aliased_with_mutation_output_alias(self):
def f(a, b, c):
@@ -1894,9 +2212,15 @@ def forward(self, primals_1):
y = base2.add(1)
return [base1, base2], [x.view(-1), y, x.view(-1)]
- self.verify_aot_autograd(f, partial(inp_callable, req_grad=False), test_mutation=True)
- fw_graph = self.verify_aot_autograd(f, partial(inp_callable, req_grad=True), test_mutation=True)
- self.assertExpectedInline(fw_graph.code.strip(), """\
+ self.verify_aot_autograd(
+ f, partial(inp_callable, req_grad=False), test_mutation=True
+ )
+ fw_graph = self.verify_aot_autograd(
+ f, partial(inp_callable, req_grad=True), test_mutation=True
+ )
+ self.assertExpectedInline(
+ fw_graph.code.strip(),
+ """\
def forward(self, primals_1, primals_2):
clone = torch.ops.aten.clone.default(primals_1); primals_1 = None
as_strided_1 = torch.ops.aten.as_strided.default(clone, [4], [1], 0)
@@ -1905,7 +2229,8 @@ def forward(self, primals_1, primals_2):
add = torch.ops.aten.add.Tensor(primals_2, 1); primals_2 = None
as_strided_7 = torch.ops.aten.as_strided.default(as_strided_scatter, [4], [1], 0)
view_1 = torch.ops.aten.view.default(as_strided_7, [-1]); as_strided_7 = None
- return [as_strided_scatter, add, view_1]""") # noqa: B950
+ return [as_strided_scatter, add, view_1]""",
+ ) # noqa: B950
def test_input_metadata_mutation_aliases(self):
def f(a, b):
@@ -1920,14 +2245,21 @@ def forward(self, primals_1, primals_2):
x = base.add(1)
return [base], [x.view(-1), x.view(-1)]
- self.verify_aot_autograd(f, partial(inp_callable, req_grad=False), test_mutation=True)
- fw_graph = self.verify_aot_autograd(f, partial(inp_callable, req_grad=True), test_mutation=True)
+ self.verify_aot_autograd(
+ f, partial(inp_callable, req_grad=False), test_mutation=True
+ )
+ fw_graph = self.verify_aot_autograd(
+ f, partial(inp_callable, req_grad=True), test_mutation=True
+ )
# Expectation: fwd() takes in 2 args, and we don't construct a synthetic base.
- self.assertExpectedInline(fw_graph.code.strip(), """\
+ self.assertExpectedInline(
+ fw_graph.code.strip(),
+ """\
def forward(self, primals_1, primals_2):
t = torch.ops.aten.t.default(primals_1); primals_1 = None
add = torch.ops.aten.add.Tensor(t, primals_2); t = primals_2 = None
- return [add]""")
+ return [add]""",
+ )
def test_input_mutation_aliases_and_none_require_gradients(self):
def f(a, b, c):
@@ -1942,13 +2274,26 @@ def forward(self, primals_1, primals_2):
x = base.add(1)
return [base, c_arg], [x.view(-1), x.view(-1), c_arg]
- self.verify_aot_autograd(f, partial(inp_callable, req_grad=False), test_mutation=True)
+ self.verify_aot_autograd(
+ f, partial(inp_callable, req_grad=False), test_mutation=True
+ )
- with self.assertRaisesRegex(RuntimeError, "is a tensor subclass. This is not supported today"):
- self.verify_aot_autograd(f, partial(inp_callable, req_grad=False), test_mutation=True, make_inputs_subclasses=True)
+ with self.assertRaisesRegex(
+ RuntimeError, "is a tensor subclass. This is not supported today"
+ ):
+ self.verify_aot_autograd(
+ f,
+ partial(inp_callable, req_grad=False),
+ test_mutation=True,
+ make_inputs_subclasses=True,
+ )
- fw_graph = self.verify_aot_autograd(f, partial(inp_callable, req_grad=True), test_mutation=True)
- self.assertExpectedInline(fw_graph.code.strip(), """\
+ fw_graph = self.verify_aot_autograd(
+ f, partial(inp_callable, req_grad=True), test_mutation=True
+ )
+ self.assertExpectedInline(
+ fw_graph.code.strip(),
+ """\
def forward(self, primals_1, primals_2):
as_strided = torch.ops.aten.as_strided.default(primals_1, [4], [1], 0)
mul = torch.ops.aten.mul.Tensor(as_strided, 2); as_strided = None
@@ -1956,7 +2301,8 @@ def forward(self, primals_1, primals_2):
as_strided_3 = torch.ops.aten.as_strided.default(as_strided_scatter, [4], [1], 0)
add = torch.ops.aten.add.Tensor(as_strided_3, 1); as_strided_3 = None
add_1 = torch.ops.aten.add.Tensor(primals_2, 1); primals_2 = None
- return [as_strided_scatter, add, add_1]""") # noqa: B950
+ return [as_strided_scatter, add, add_1]""",
+ ) # noqa: B950
def test_input_mutation_aliases_bases_out_of_order(self):
# This tests our calling convention: if b and d are aliased, then the outer calling convention
@@ -1977,17 +2323,31 @@ def forward(self, primals_1, primals_2):
# a and c alias, b and d alias
return [base1, base2], [x1.view(-1), x2.view(-1), x1.view(-1), x2.view(-1)]
- self.verify_aot_autograd(f, partial(inp_callable, req_grad=False), test_mutation=True)
+ self.verify_aot_autograd(
+ f, partial(inp_callable, req_grad=False), test_mutation=True
+ )
- with self.assertRaisesRegex(RuntimeError, "Metadata mutations are currently not allowed on tensor subclasses"):
- self.verify_aot_autograd(f, partial(inp_callable, req_grad=False), test_mutation=True, make_inputs_subclasses=True)
+ with self.assertRaisesRegex(
+ RuntimeError,
+ "Metadata mutations are currently not allowed on tensor subclasses",
+ ):
+ self.verify_aot_autograd(
+ f,
+ partial(inp_callable, req_grad=False),
+ test_mutation=True,
+ make_inputs_subclasses=True,
+ )
- fw_graph = self.verify_aot_autograd(f, partial(inp_callable, req_grad=True), test_mutation=True)
+ fw_graph = self.verify_aot_autograd(
+ f, partial(inp_callable, req_grad=True), test_mutation=True
+ )
# 3 graph inputs: (b_d_base, a, c)
# 2 returns: (b_updated, a+c+d)
# (there are 2 original fw outs, but one is a view of b so it's not part of the graph)
# (there are also 2 input mutations, but one is a metadata-only mutation so the compiled forward doesn't return it)
- self.assertExpectedInline(fw_graph.code.strip(), """\
+ self.assertExpectedInline(
+ fw_graph.code.strip(),
+ """\
def forward(self, primals_1, primals_2, primals_3):
clone = torch.ops.aten.clone.default(primals_1); primals_1 = None
as_strided = torch.ops.aten.as_strided.default(clone, [4], [1], 0)
@@ -1999,7 +2359,8 @@ def forward(self, primals_1, primals_2, primals_3):
add_2 = torch.ops.aten.add.Tensor(add_1, unsqueeze_1); add_1 = None
as_strided_14 = torch.ops.aten.as_strided.default(as_strided_scatter, [4], [1], 0)
view_2 = torch.ops.aten.view.default(as_strided_14, [-1]); as_strided_14 = None
- return [as_strided_scatter, add_2, view_2, unsqueeze_1]""") # noqa: B950
+ return [as_strided_scatter, add_2, view_2, unsqueeze_1]""",
+ ) # noqa: B950
@unittest.skipIf(not torch.cuda.is_available(), "CUDA is unavailable")
def test_synthetic_base_base_attribute_is_none(self):
@@ -2008,7 +2369,7 @@ def forward(self, primals_1, primals_2, primals_3):
return a + b
def inp_callable():
- base = torch.ones(4, 4, device='cuda')
+ base = torch.ones(4, 4, device="cuda")
# detach() so that none of the inputs have a ._base attribute.
a = base[0].detach()
b = base[1].detach()
@@ -2017,7 +2378,6 @@ def forward(self, primals_1, primals_2, primals_3):
self.verify_aot_autograd(f, inp_callable, test_mutation=True)
-
def test_input_mutation_alias_everything(self):
# Mondo test that tests a combination of:
# input is mutated, that aliases another input (so we make a synthetic base)
@@ -2046,14 +2406,20 @@ def forward(self, primals_1, primals_2, primals_3):
c = base1_.view(-1)
return [base1, base2], [a, b, c]
- self.verify_aot_autograd(f, partial(inp_callable, req_grad=False), test_mutation=True)
- fw_graph = self.verify_aot_autograd(f, partial(inp_callable, req_grad=True), test_mutation=True)
+ self.verify_aot_autograd(
+ f, partial(inp_callable, req_grad=False), test_mutation=True
+ )
+ fw_graph = self.verify_aot_autograd(
+ f, partial(inp_callable, req_grad=True), test_mutation=True
+ )
# Expected:
# - 2 inputs in the forward: synthetic_base_a_c, b
# - 1 output in the forward: "tmp"
# out2 is an alias of an input, and will be generated off of b outside of the compiled fn
# out1 and out3 are aliases of tmp, that we generate outside of the compiled function
- self.assertExpectedInline(fw_graph.code.strip(), """\
+ self.assertExpectedInline(
+ fw_graph.code.strip(),
+ """\
def forward(self, primals_1, primals_2):
clone = torch.ops.aten.clone.default(primals_1); primals_1 = None
view = torch.ops.aten.view.default(primals_2, [2, 2]); primals_2 = None
@@ -2067,11 +2433,13 @@ def forward(self, primals_1, primals_2):
view_1 = torch.ops.aten.view.default(add, [-1])
t_1 = torch.ops.aten.t.default(t)
unsqueeze = torch.ops.aten.unsqueeze.default(view_1, 0)
- return [as_strided_scatter, t, view_1, t_1, unsqueeze, add]""") # noqa: B950
+ return [as_strided_scatter, t, view_1, t_1, unsqueeze, add]""",
+ ) # noqa: B950
def test_dynamic_shape_output_not_in_bw_graph(self):
def f(x):
return [x + 1, x.shape[0]]
+
inp = torch.ones(5, requires_grad=True)
bw_graph_cell = [None]
compiled_f = aot_function(
@@ -2089,15 +2457,21 @@ def forward(self, primals_1, primals_2):
# 1 grad_output as an input to the backward graph.
# (Otherwise, autograd will plumb a None as the value of the grad_output,
# which causes inductor to complain).
- self.assertExpectedInline(bw_graph_cell[0].code.strip(), """\
+ self.assertExpectedInline(
+ bw_graph_cell[0].code.strip(),
+ """\
def forward(self, tangents_1):
- return [tangents_1]""")
+ return [tangents_1]""",
+ )
def test_no_grad_input_output(self):
def f(a, b):
return a.cos(), b.cos(), a * b
- inp_thunks = [lambda: torch.randn(5, requires_grad=True), lambda: torch.randn(5, requires_grad=False)]
+ inp_thunks = [
+ lambda: torch.randn(5, requires_grad=True),
+ lambda: torch.randn(5, requires_grad=False),
+ ]
for inps in itertools.product(inp_thunks, repeat=2):
inps = [i() for i in inps]
self.verify_aot_autograd(f, inps)
@@ -2107,19 +2481,28 @@ def forward(self, tangents_1):
a_view = a.view(-1)
a_view.requires_grad_(True)
return a_view
+
inp = [torch.randn(3, 3), torch.randn(3, 3, requires_grad=True)]
self.verify_aot_autograd(f, inp)
def test_some_outputs_dont_require_grad_view(self):
def f(a, b):
return a.detach(), b
- inp = [torch.randn(3, 3, requires_grad=True), torch.randn(3, 3, requires_grad=True)]
+
+ inp = [
+ torch.randn(3, 3, requires_grad=True),
+ torch.randn(3, 3, requires_grad=True),
+ ]
self.verify_aot_autograd(f, inp)
def test_some_outputs_dont_require_grad_non_view(self):
def f(a, b):
return a.add(1).detach(), b
- inp = [torch.randn(3, 3, requires_grad=True), torch.randn(3, 3, requires_grad=True)]
+
+ inp = [
+ torch.randn(3, 3, requires_grad=True),
+ torch.randn(3, 3, requires_grad=True),
+ ]
self.verify_aot_autograd(f, inp)
def test_inner_grad(self):
@@ -2127,12 +2510,14 @@ def forward(self, tangents_1):
y = torch.exp(x)
z = torch.autograd.grad(y, x)
return z
+
inps = [torch.randn((), requires_grad=True)]
self.verify_aot_autograd(foo, inps)
def test_grad_context(self):
def foo(x):
return x * 2
+
inps = [torch.randn((), requires_grad=True)]
graph_size = None
@@ -2155,12 +2540,14 @@ def forward(self, tangents_1):
def test_output_dict(self):
def f(x):
- return {'a': x, 'b': x}
+ return {"a": x, "b": x}
+
inp = [torch.randn(3, 3, requires_grad=True)]
self.verify_aot_autograd(f, inp)
def f(x, y):
- return {'a': x, 'b': y + x}
+ return {"a": x, "b": y + x}
+
inp = [torch.randn(3, requires_grad=True), torch.randn(3)]
self.verify_aot_autograd(f, inp)
@@ -2174,7 +2561,7 @@ def forward(self, tangents_1):
b = torch.randn(3, requires_grad=True)
def inp_callable():
- inps = [{'a': a, 'b': b}]
+ inps = [{"a": a, "b": b}]
return inps, inps
self.verify_aot_autograd(f, inp_callable)
@@ -2200,19 +2587,22 @@ def forward(self, tangents_1):
def list_nop(f, _):
def g(inps):
return f(*inps)
+
g._boxed_call = True
return g
def f(a, b, c):
return a.sin() * b.cos() * c.sin()
+
f = aot_function(f, list_nop)
inp = [torch.randn(5, requires_grad=True) for _ in range(3)]
f(*inp).sum().backward()
- @patch('torch._functorch.aot_autograd.AOT_COUNTER', new_callable=itertools.count)
+ @patch("torch._functorch.aot_autograd.AOT_COUNTER", new_callable=itertools.count)
def test_compilation_context(self, counter):
def f(x):
return x.sin().sin()
+
count = []
def compiler(fx_g, _):
@@ -2225,7 +2615,10 @@ def forward(self, tangents_1):
f = aot_function(f, compiler)
f(torch.randn(5))
out.sum().backward()
- self.assertExpectedInline(str(count), """[(['0_forward'], 4), (['1_inference'], 4), (['0_backward'], 8)]""")
+ self.assertExpectedInline(
+ str(count),
+ """[(['0_forward'], 4), (['1_inference'], 4), (['0_backward'], 8)]""",
+ )
def test_dupe_arg(self):
def f(x, y):
@@ -2248,6 +2641,7 @@ def forward(self, tangents_1):
def f(a, b, a_):
a[0].add_(1)
return a_
+
f_compiled = aot_function(f, nop)
a = torch.ones(2)
b = torch.ones(2)
@@ -2260,7 +2654,7 @@ def forward(self, tangents_1):
self.assertEqual(out_ref, out_test)
self.assertEqual(a, a2)
- @patch('torch._functorch.aot_autograd.AOT_COUNTER', new_callable=itertools.count)
+ @patch("torch._functorch.aot_autograd.AOT_COUNTER", new_callable=itertools.count)
@patch("torch._functorch.config.debug_assert", True)
def test_invalid_dupe_left_bias(self, counter):
# This test checks that, just because only the first
@@ -2278,23 +2672,22 @@ def forward(self, tangents_1):
fxx = aot_module_simplified(F(), (x, x), nop)
self.assertExpectedRaisesInline(
- AssertionError, lambda: fxx(x, y),
- """At compilation time, graph 2 was compiled under the assumption that input 1 would be a duplicate of input 0, but at runtime this was not the case. This indicates a guard bug in AOTAutograd or Dynamo, please file a bug to PyTorch.""" # noqa: B950
+ AssertionError,
+ lambda: fxx(x, y),
+ """At compilation time, graph 2 was compiled under the assumption that input 1 would be a duplicate of input 0, but at runtime this was not the case. This indicates a guard bug in AOTAutograd or Dynamo, please file a bug to PyTorch.""", # noqa: B950
)
-
- @patch('torch._functorch.aot_autograd.AOT_COUNTER', new_callable=itertools.count)
+ @patch("torch._functorch.aot_autograd.AOT_COUNTER", new_callable=itertools.count)
@patch("torch._functorch.config.debug_assert", True)
def test_invalid_dupe(self, counter):
self._test_invalid_dupe(counter, fake=False)
# See Note: Dynamo recompilation guarding invalid grad for why this test exists
- @patch('torch._functorch.aot_autograd.AOT_COUNTER', new_callable=itertools.count)
+ @patch("torch._functorch.aot_autograd.AOT_COUNTER", new_callable=itertools.count)
@patch("torch._functorch.config.debug_assert", True)
def test_invalid_dupe_fake(self, counter):
self._test_invalid_dupe(counter, fake=True)
-
def _test_invalid_dupe(self, counter, fake):
class F(torch.nn.Module):
def forward(self, x, y):
@@ -2335,18 +2728,18 @@ def forward(self, tangents_1):
x = torch.randn(3, 3, requires_grad=True).clone()
y = torch.randn(3, 3, requires_grad=True).clone()
self.assertExpectedRaisesInline(
- AssertionError, lambda: fxx(x, y),
- """At compilation time, graph 1 was compiled under the assumption that input 1 would be a duplicate of input 0, but at runtime this was not the case. This indicates a guard bug in AOTAutograd or Dynamo, please file a bug to PyTorch.""" # noqa: B950
+ AssertionError,
+ lambda: fxx(x, y),
+ """At compilation time, graph 1 was compiled under the assumption that input 1 would be a duplicate of input 0, but at runtime this was not the case. This indicates a guard bug in AOTAutograd or Dynamo, please file a bug to PyTorch.""", # noqa: B950
)
-
- @patch('torch._functorch.aot_autograd.AOT_COUNTER', new_callable=itertools.count)
+ @patch("torch._functorch.aot_autograd.AOT_COUNTER", new_callable=itertools.count)
@patch("torch._functorch.config.debug_assert", True)
def test_invalid_requires_grad(self, counter):
self._test_invalid_requires_grad(counter, fake=False)
# See Note: Dynamo recompilation guarding invalid grad for why this test exists
- @patch('torch._functorch.aot_autograd.AOT_COUNTER', new_callable=itertools.count)
+ @patch("torch._functorch.aot_autograd.AOT_COUNTER", new_callable=itertools.count)
@patch("torch._functorch.config.debug_assert", True)
def test_invalid_requires_grad_fake(self, counter):
self._test_invalid_requires_grad(counter, fake=True)
@@ -2384,8 +2777,9 @@ def forward(self, tangents_1):
compare_equal_outs_and_grads(self, F(), fxz, (x, z))
self.assertExpectedRaisesInline(
- AssertionError, lambda: fxz(x, y),
- """At compilation time, graph 1 was compiled under the assumption that input 1 would not require grad, but at runtime this was not the case. This indicates a guard bug in AOTAutograd or Dynamo, please file a bug to PyTorch.""" # noqa: B950
+ AssertionError,
+ lambda: fxz(x, y),
+ """At compilation time, graph 1 was compiled under the assumption that input 1 would not require grad, but at runtime this was not the case. This indicates a guard bug in AOTAutograd or Dynamo, please file a bug to PyTorch.""", # noqa: B950
)
def test_custom_autograd(self):
@@ -2413,11 +2807,14 @@ def forward(self, tangents_1):
@unittest.skipIf(not torch.cuda.is_available(), "CUDA is unavailable")
def test_nonidempotent_amp(self):
def f(self_s_emb, add_3):
- einsum_2 = torch.functional.einsum('ah,th->t', self_s_emb, add_3)
+ einsum_2 = torch.functional.einsum("ah,th->t", self_s_emb, add_3)
log_softmax_2 = einsum_2.log_softmax(-1)
return (log_softmax_2,)
- args = [torch.rand((1, 256), dtype=torch.float32, device='cuda'), torch.rand((30, 256), dtype=torch.float16, device='cuda')]
+ args = [
+ torch.rand((1, 256), dtype=torch.float32, device="cuda"),
+ torch.rand((30, 256), dtype=torch.float16, device="cuda"),
+ ]
with torch.cuda.amp.autocast(enabled=True):
self.verify_aot_autograd(f, args)
@@ -2432,8 +2829,13 @@ def forward(self, tangents_1):
device = "cuda"
input_dtype = torch.float16
param_dtype = torch.float32
- weight, bias = (torch.ones(64, device=device, dtype=param_dtype, requires_grad=True) for _ in range(2))
- running_mean, running_var = (torch.ones(64, device=device, dtype=param_dtype) for _ in range(2))
+ weight, bias = (
+ torch.ones(64, device=device, dtype=param_dtype, requires_grad=True)
+ for _ in range(2)
+ )
+ running_mean, running_var = (
+ torch.ones(64, device=device, dtype=param_dtype) for _ in range(2)
+ )
def bn(x):
return torch.ops.aten.cudnn_batch_norm(
@@ -2446,10 +2848,15 @@ def forward(self, tangents_1):
0.1,
1e-05,
)
- inp = torch.ones(torch.Size([16, 64, 112, 112]), dtype=input_dtype, device=device)
+
+ inp = torch.ones(
+ torch.Size([16, 64, 112, 112]), dtype=input_dtype, device=device
+ )
ref = bn(inp)
- cudnn_batch_norm_decomp = torch._decomp.get_decompositions({torch.ops.aten.cudnn_batch_norm})
+ cudnn_batch_norm_decomp = torch._decomp.get_decompositions(
+ {torch.ops.aten.cudnn_batch_norm}
+ )
aot_fn = make_fx(bn, decomposition_table=cudnn_batch_norm_decomp)(inp)
res = aot_fn(inp)
for a, b in zip(ref, res):
@@ -2477,7 +2884,14 @@ def forward(self, tangents_1):
# TODO: assert outputs of fwd graph trace to correct symint
# e2e test that fails without symint clone fix
- af = aot_function(f, nop, partition_fn=partial(min_cut_rematerialization_partition, compiler="inductor"), dynamic=True)
+ af = aot_function(
+ f,
+ nop,
+ partition_fn=partial(
+ min_cut_rematerialization_partition, compiler="inductor"
+ ),
+ dynamic=True,
+ )
out = af(inp)
self.assertEqual(out, f(inp))
@@ -2544,7 +2958,9 @@ def forward(self, tangents_1):
fw_graph_cell = [None]
compiled_f = aot_module(
model_for_compile,
- fw_compiler=make_boxed_compiler(partial(extract_graph, graph_cell=fw_graph_cell)),
+ fw_compiler=make_boxed_compiler(
+ partial(extract_graph, graph_cell=fw_graph_cell)
+ ),
bw_compiler=nop,
keep_inference_input_mutations=True,
)
@@ -2554,7 +2970,9 @@ def forward(self, tangents_1):
out_ref = model_for_eager(inp_ref.clone())
out_test = compiled_f(inp_test.clone())
- self.assertExpectedInline(fw_graph_cell[0].code.strip(), """\
+ self.assertExpectedInline(
+ fw_graph_cell[0].code.strip(),
+ """\
def forward(self, primals_1, primals_2, primals_3, primals_4):
add = torch.ops.aten.add.Tensor(primals_3, 1)
mul = torch.ops.aten.mul.Tensor(primals_1, primals_4)
@@ -2563,7 +2981,8 @@ def forward(self, primals_1, primals_2, primals_3, primals_4):
sum_2 = torch.ops.aten.sum.default(add)
add_1 = torch.ops.aten.add.Tensor(sum_1, sum_2); sum_1 = sum_2 = None
copy_ = torch.ops.aten.copy_.default(primals_3, add); primals_3 = add = None
- return [add_1, primals_1, primals_2, primals_4, mul]""")
+ return [add_1, primals_1, primals_2, primals_4, mul]""",
+ )
self.assertEqual(out_ref, out_test)
@@ -2581,7 +3000,9 @@ def forward(self, primals_1, primals_2, primals_3, primals_4):
def __init__(self):
super().__init__()
self.register_buffer("buf", torch.ones(4, 4))
- self.w = torch.nn.Parameter(torch.Tensor([[4, 5], [1, 2], [6, 7], [8, 9]]))
+ self.w = torch.nn.Parameter(
+ torch.Tensor([[4, 5], [1, 2], [6, 7], [8, 9]])
+ )
def forward(self, x):
self.buf.add_(1)
@@ -2593,7 +3014,9 @@ def forward(self, primals_1, primals_2, primals_3, primals_4):
fw_graph_cell = [None]
compiled_f = aot_module(
model_for_compile,
- fw_compiler=make_boxed_compiler(partial(extract_graph, graph_cell=fw_graph_cell)),
+ fw_compiler=make_boxed_compiler(
+ partial(extract_graph, graph_cell=fw_graph_cell)
+ ),
bw_compiler=nop,
keep_inference_input_mutations=True,
)
@@ -2603,7 +3026,9 @@ def forward(self, primals_1, primals_2, primals_3, primals_4):
out_ref = model_for_eager(inp_ref.clone())
out_test = compiled_f(inp_test.clone())
- self.assertExpectedInline(fw_graph_cell[0].code.strip(), """\
+ self.assertExpectedInline(
+ fw_graph_cell[0].code.strip(),
+ """\
def forward(self, primals_1, primals_2, primals_3):
add = torch.ops.aten.add.Tensor(primals_2, 1)
mm = torch.ops.aten.mm.default(primals_1, primals_3)
@@ -2611,7 +3036,8 @@ def forward(self, primals_1, primals_2, primals_3):
sum_2 = torch.ops.aten.sum.default(add)
add_1 = torch.ops.aten.add.Tensor(sum_1, sum_2); sum_1 = sum_2 = None
copy_ = torch.ops.aten.copy_.default(primals_2, add); primals_2 = add = None
- return [add_1, primals_1, primals_3]""")
+ return [add_1, primals_1, primals_3]""",
+ )
self.assertEqual(out_ref, out_test)
out_ref.sum().backward()
@@ -2640,8 +3066,12 @@ def forward(self, primals_1, primals_2, primals_3):
bw_graph_cell = [None]
compiled_f = aot_module(
model_for_compile,
- fw_compiler=make_boxed_compiler(partial(extract_graph, graph_cell=fw_graph_cell)),
- bw_compiler=make_boxed_compiler(partial(extract_graph, graph_cell=bw_graph_cell)),
+ fw_compiler=make_boxed_compiler(
+ partial(extract_graph, graph_cell=fw_graph_cell)
+ ),
+ bw_compiler=make_boxed_compiler(
+ partial(extract_graph, graph_cell=bw_graph_cell)
+ ),
keep_inference_input_mutations=True,
)
inp_ref = torch.ones(20, 100, requires_grad=True)
@@ -2650,7 +3080,9 @@ def forward(self, primals_1, primals_2, primals_3):
out_ref = model_for_eager(inp_ref.clone())
out_test = compiled_f(inp_test.clone())
- self.assertExpectedInline(fw_graph_cell[0].code.strip(), """\
+ self.assertExpectedInline(
+ fw_graph_cell[0].code.strip(),
+ """\
def forward(self, primals_1, primals_2, primals_3, primals_4, primals_5, primals_6):
add = torch.ops.aten.add.Tensor(primals_5, 1)
_native_batch_norm_legit_functional = torch.ops.aten._native_batch_norm_legit_functional.default(primals_6, primals_1, primals_2, primals_3, primals_4, True, 0.1, 1e-05); primals_2 = None
@@ -2662,7 +3094,8 @@ def forward(self, primals_1, primals_2, primals_3, primals_4, primals_5, primals
copy_ = torch.ops.aten.copy_.default(primals_3, getitem_3); primals_3 = None
copy__1 = torch.ops.aten.copy_.default(primals_4, getitem_4); primals_4 = None
copy__2 = torch.ops.aten.copy_.default(primals_5, add); primals_5 = add = None
- return [getitem, primals_1, primals_6, getitem_1, getitem_2, getitem_3, getitem_4]""") # noqa: B950
+ return [getitem, primals_1, primals_6, getitem_1, getitem_2, getitem_3, getitem_4]""", # noqa: B950
+ )
self.assertEqual(out_ref, out_test)
@@ -2673,13 +3106,16 @@ def forward(self, primals_1, primals_2, primals_3, primals_4, primals_5, primals
compile_grads = [p.grad for _, p in model_for_compile.named_parameters()]
self.assertEqual(eager_grads, compile_grads)
- self.assertExpectedInline(bw_graph_cell[0].code.strip(), """\
+ self.assertExpectedInline(
+ bw_graph_cell[0].code.strip(),
+ """\
def forward(self, primals_1, primals_6, getitem_1, getitem_2, getitem_3, getitem_4, tangents_1):
native_batch_norm_backward = torch.ops.aten.native_batch_norm_backward.default(tangents_1, primals_6, primals_1, getitem_3, getitem_4, getitem_1, getitem_2, True, 1e-05, [True, True, True]); tangents_1 = primals_6 = primals_1 = getitem_3 = getitem_4 = getitem_1 = getitem_2 = None
getitem_5 = native_batch_norm_backward[0]
getitem_6 = native_batch_norm_backward[1]
getitem_7 = native_batch_norm_backward[2]; native_batch_norm_backward = None
- return [getitem_6, getitem_7, None, None, None, getitem_5]""") # noqa: B950
+ return [getitem_6, getitem_7, None, None, None, getitem_5]""", # noqa: B950
+ )
self.assertEqual(inp_ref.grad, inp_test.grad)
@@ -2691,32 +3127,48 @@ def forward(self, primals_1, primals_6, getitem_1, getitem_2, getitem_3, getitem
bw_graph_cell = [None]
compiled_f = aot_function(
f,
- fw_compiler=make_boxed_compiler(partial(extract_graph, graph_cell=fw_graph_cell)),
- bw_compiler=make_boxed_compiler(partial(extract_graph, graph_cell=bw_graph_cell)),
+ fw_compiler=make_boxed_compiler(
+ partial(extract_graph, graph_cell=fw_graph_cell)
+ ),
+ bw_compiler=make_boxed_compiler(
+ partial(extract_graph, graph_cell=bw_graph_cell)
+ ),
keep_inference_input_mutations=True,
)
- inp_ref = (torch.ones(20, 100, requires_grad=False), torch.ones(20, 100, requires_grad=True))
- inp_test = (torch.ones(20, 100, requires_grad=False), torch.ones(20, 100, requires_grad=True))
+ inp_ref = (
+ torch.ones(20, 100, requires_grad=False),
+ torch.ones(20, 100, requires_grad=True),
+ )
+ inp_test = (
+ torch.ones(20, 100, requires_grad=False),
+ torch.ones(20, 100, requires_grad=True),
+ )
out_ref = f(*inp_ref)
out_test = compiled_f(*inp_test)
# There is no copy_ method
- self.assertExpectedInline(fw_graph_cell[0].code.strip(), """\
+ self.assertExpectedInline(
+ fw_graph_cell[0].code.strip(),
+ """\
def forward(self, primals_1, primals_2):
clone = torch.ops.aten.clone.default(primals_1); primals_1 = None
add = torch.ops.aten.add.Tensor(clone, primals_2); clone = primals_2 = None
- return [add, add]""") # noqa: B950
+ return [add, add]""",
+ ) # noqa: B950
self.assertEqual(out_ref, out_test)
out_ref.sum().backward()
out_test.sum().backward()
- self.assertExpectedInline(bw_graph_cell[0].code.strip(), """\
+ self.assertExpectedInline(
+ bw_graph_cell[0].code.strip(),
+ """\
def forward(self, tangents_1):
- return [None, tangents_1]""") # noqa: B950
+ return [None, tangents_1]""",
+ ) # noqa: B950
def test_real_weights_in_symbolic_mode(self):
from functorch.experimental import functionalize
@@ -2737,7 +3189,13 @@ def forward(self, tangents_1):
gm = make_fx(m, tracing_mode="symbolic", _allow_non_fake_inputs=True)(inp)
self.assertEqual(gm(torch.ones(2, 5)), m(torch.ones(2, 5)))
- gm_functionalized = make_fx(functionalize(gm,), tracing_mode="symbolic", _allow_non_fake_inputs=True)(inp)
+ gm_functionalized = make_fx(
+ functionalize(
+ gm,
+ ),
+ tracing_mode="symbolic",
+ _allow_non_fake_inputs=True,
+ )(inp)
self.assertEqual(gm_functionalized(torch.ones(2, 5)), m(torch.ones(2, 5)))
inp_count = 0
@@ -2756,11 +3214,14 @@ def forward(self, tangents_1):
# No more param lifting
self.assertEqual(inp_count, 1)
- with self.assertRaisesRegex(Exception, "Please convert all Tensors to FakeTensors"):
- make_fx(m, tracing_mode="symbolic", _allow_non_fake_inputs=False)(torch.randn(2, 5))
+ with self.assertRaisesRegex(
+ Exception, "Please convert all Tensors to FakeTensors"
+ ):
+ make_fx(m, tracing_mode="symbolic", _allow_non_fake_inputs=False)(
+ torch.randn(2, 5)
+ )
def test_real_weights_in_symbolic_mode_with_inplace_ops(self):
-
class M(torch.nn.Module):
def __init__(self):
super().__init__()
@@ -2809,7 +3270,9 @@ def forward(self, tangents_1):
out = f(inp)
self.assertIsNotNone(out.grad_fn)
- self.assertExpectedInline(str(out.grad_fn.__class__), """<class 'ViewBackward0'>""")
+ self.assertExpectedInline(
+ str(out.grad_fn.__class__), """<class 'ViewBackward0'>"""
+ )
def test_output_aliases_intermediate_view_meta_replay(self):
@self._compile_and_erase_bases(0, 1)
@@ -2821,10 +3284,14 @@ def forward(self, tangents_1):
out1, out2 = f(inp)
self.assertIsNotNone(out1.grad_fn)
- self.assertExpectedInline(str(out1.grad_fn.__class__), """<class 'ViewBackward0'>""")
+ self.assertExpectedInline(
+ str(out1.grad_fn.__class__), """<class 'ViewBackward0'>"""
+ )
self.assertIsNotNone(out2.grad_fn)
- self.assertExpectedInline(str(out2.grad_fn.__class__), """<class 'ViewBackward0'>""")
+ self.assertExpectedInline(
+ str(out2.grad_fn.__class__), """<class 'ViewBackward0'>"""
+ )
def test_output_aliases_output_view_meta_replay(self):
@self._compile_and_erase_bases(1)
@@ -2837,7 +3304,9 @@ def forward(self, tangents_1):
self.assertEqual(out1.untyped_storage(), out2.untyped_storage())
self.assertIsNotNone(out2.grad_fn)
- self.assertExpectedInline(str(out2.grad_fn.__class__), """<class 'ViewBackward0'>""")
+ self.assertExpectedInline(
+ str(out2.grad_fn.__class__), """<class 'ViewBackward0'>"""
+ )
def extract_graph(fx_g, _, graph_cell):
@@ -2849,9 +3318,9 @@ def get_ins_outs(fx_g):
ins = []
outs = []
for n in fx_g.graph.nodes:
- if n.op == 'placeholder':
+ if n.op == "placeholder":
ins.append(n)
- elif n.op == 'output':
+ elif n.op == "output":
outs = tuple(n.args[0])
return ins, outs
@@ -2860,17 +3329,22 @@ def get_num_ins_outs(fx_g):
return tuple(len(i) for i in get_ins_outs(fx_g))
-def get_fw_bw_graph(f, inps, partitioner=min_cut_rematerialization_partition, dynamic=False):
+def get_fw_bw_graph(
+ f, inps, partitioner=min_cut_rematerialization_partition, dynamic=False
+):
fw_graph_cell = [None]
bw_graph_cell = [None]
- aot_function(f,
- fw_compiler=partial(extract_graph, graph_cell=fw_graph_cell),
- bw_compiler=partial(extract_graph, graph_cell=bw_graph_cell),
- partition_fn=partitioner,
- decompositions=default_decompositions,
- dynamic=dynamic)(*inps).sum().backward()
+ aot_function(
+ f,
+ fw_compiler=partial(extract_graph, graph_cell=fw_graph_cell),
+ bw_compiler=partial(extract_graph, graph_cell=bw_graph_cell),
+ partition_fn=partitioner,
+ decompositions=default_decompositions,
+ dynamic=dynamic,
+ )(*inps).sum().backward()
return (fw_graph_cell[0], bw_graph_cell[0])
+
class TestMod(torch.nn.Module):
def __init__(self, fn):
super().__init__()
@@ -2880,8 +3354,8 @@ class TestMod(torch.nn.Module):
def forward(self, *args):
return self.fn(self.p, *args)
-class TestAOTExport(AOTTestCase):
+class TestAOTExport(AOTTestCase):
def test_aot_export_ban_dropout_mut_pre_dispatch(self):
def fn(p, x):
y = torch.ops.aten.dropout.default(x, 0.1, train=False)
@@ -2891,15 +3365,20 @@ class TestAOTExport(AOTTestCase):
mod = TestMod(fn)
inp = torch.randn(2, 2)
- with self.assertRaisesRegex(RuntimeError, "cannot mutate tensors with frozen storage"):
+ with self.assertRaisesRegex(
+ RuntimeError, "cannot mutate tensors with frozen storage"
+ ):
aot_export_module(mod, [inp], trace_joint=False, pre_dispatch=True)
gm, _ = aot_export_module(mod, [inp], trace_joint=False, pre_dispatch=False)
- self.assertExpectedInline(str(gm.code).strip(), """\
+ self.assertExpectedInline(
+ str(gm.code).strip(),
+ """\
def forward(self, arg0_1, arg1_1):
clone = torch.ops.aten.clone.default(arg1_1); arg1_1 = None
add = torch.ops.aten.add.Tensor(clone, 1); clone = None
- return (add,)""")
+ return (add,)""",
+ )
fw_graph_cell = [None]
bw_graph_cell = [None]
@@ -2910,16 +3389,19 @@ def forward(self, arg0_1, arg1_1):
bw_compiler=partial(extract_graph, graph_cell=bw_graph_cell),
partition_fn=default_partition,
decompositions=default_decompositions,
- dynamic=True)(*inp)
+ dynamic=True,
+ )(*inp)
fw_graph = fw_graph_cell[0]
bw_graph = bw_graph_cell[0]
- self.assertExpectedInline(str(fw_graph.code).strip(), """\
+ self.assertExpectedInline(
+ str(fw_graph.code).strip(),
+ """\
def forward(self, arg0_1, arg1_1):
clone = torch.ops.aten.clone.default(arg1_1); arg1_1 = None
add = torch.ops.aten.add.Tensor(clone, 1); clone = None
- return (add,)""")
-
+ return (add,)""",
+ )
def test_aot_export_predispatch_func_simple(self):
def fn(p, x):
@@ -2933,7 +3415,9 @@ def forward(self, arg0_1, arg1_1):
with torch.no_grad():
gm, _ = aot_export_module(mod, [inp], trace_joint=False, pre_dispatch=True)
- self.assertExpectedInline(str(gm.code).strip(), """\
+ self.assertExpectedInline(
+ str(gm.code).strip(),
+ """\
def forward(self, arg0_1, arg1_1):
add = torch.ops.aten.add.Tensor(arg1_1, 2)
_set_grad_enabled = torch._C._set_grad_enabled(False)
@@ -2941,7 +3425,8 @@ def forward(self, arg0_1, arg1_1):
_set_grad_enabled_1 = torch._C._set_grad_enabled(False)
mul = torch.ops.aten.mul.Tensor(arg1_1, 2); arg1_1 = None
add_2 = torch.ops.aten.add.Tensor(mul, add_1); mul = add_1 = None
- return (add_2,)""")
+ return (add_2,)""",
+ )
def test_aot_export_predispatch_func_composite_implicit(self):
def fn(p, x):
@@ -2955,7 +3440,9 @@ def forward(self, arg0_1, arg1_1):
with torch.no_grad():
gm, _ = aot_export_module(mod, [inp], trace_joint=False, pre_dispatch=True)
- self.assertExpectedInline(str(gm.code).strip(), """\
+ self.assertExpectedInline(
+ str(gm.code).strip(),
+ """\
def forward(self, arg0_1, arg1_1):
_set_grad_enabled = torch._C._set_grad_enabled(True)
matmul = torch.ops.aten.matmul.default(arg1_1, arg1_1)
@@ -2964,7 +3451,8 @@ def forward(self, arg0_1, arg1_1):
sum_1 = torch.ops.aten.sum.default(arg1_1); arg1_1 = None
sum_2 = torch.ops.aten.sum.default(add); add = None
add_1 = torch.ops.aten.add.Tensor(sum_1, sum_2); sum_1 = sum_2 = None
- return (add_1,)""")
+ return (add_1,)""",
+ )
def test_aot_export_predispatch_composite_implicit_inplace(self):
def fn(x, p):
@@ -2974,11 +3462,14 @@ def forward(self, arg0_1, arg1_1):
inp = torch.randn(2, 2)
gm, _ = aot_export_module(mod, [inp], trace_joint=False, pre_dispatch=True)
- self.assertExpectedInline(str(gm.code).strip(), """\
+ self.assertExpectedInline(
+ str(gm.code).strip(),
+ """\
def forward(self, arg0_1, arg1_1):
clone = torch.ops.aten.clone.default(arg0_1); arg0_1 = None
abs_1 = torch.ops.aten.abs.default(clone); clone = None
- return (abs_1,)""")
+ return (abs_1,)""",
+ )
def test_aot_export_predispatch_composite_implicit_linear(self):
class MM(torch.nn.Module):
@@ -2993,10 +3484,13 @@ def forward(self, arg0_1, arg1_1):
inp = torch.randn(2, 2)
gm, _ = aot_export_module(mod, [inp], trace_joint=False, pre_dispatch=True)
- self.assertExpectedInline(str(gm.code).strip(), """\
+ self.assertExpectedInline(
+ str(gm.code).strip(),
+ """\
def forward(self, arg0_1, arg1_1, arg2_1):
linear = torch.ops.aten.linear.default(arg2_1, arg0_1, arg1_1); arg2_1 = arg0_1 = arg1_1 = None
- return (linear,)""")
+ return (linear,)""",
+ )
@unittest.expectedFailure
def test_aot_export_predispatch_outdtype(self):
@@ -3008,16 +3502,18 @@ def forward(self, arg0_1, arg1_1, arg2_1):
def forward(self, x):
y = x + 2
y.add_(5)
- return (out_dtype(
- torch.ops.aten.mm.default, torch.int32, y, self.weight
- ),)
+ return (
+ out_dtype(torch.ops.aten.mm.default, torch.int32, y, self.weight),
+ )
weight = torch.randint(-128, 127, (5, 5), dtype=torch.int8)
mod = M(weight)
inp = torch.randint(-128, 127, (5, 5), dtype=torch.int8)
gm, _ = aot_export_module(mod, [inp], trace_joint=False, pre_dispatch=True)
- self.assertExpectedInline(str(gm.code).strip(), """\
+ self.assertExpectedInline(
+ str(gm.code).strip(),
+ """\
def forward(self, arg0_1, arg1_1):
_set_grad_enabled = torch._C._set_grad_enabled(True)
mm = torch.ops.aten.mm.default(arg1_1, arg1_1)
@@ -3026,7 +3522,8 @@ def forward(self, arg0_1, arg1_1):
sum_1 = torch.ops.aten.sum.default(arg1_1); arg1_1 = None
sum_2 = torch.ops.aten.sum.default(add); add = None
add_1 = torch.ops.aten.add.Tensor(sum_1, sum_2); sum_1 = sum_2 = None
- return (add_1,)""")
+ return (add_1,)""",
+ )
def test_aot_export_predispatch_func_view(self):
def fn(p, x):
@@ -3038,7 +3535,9 @@ def forward(self, arg0_1, arg1_1):
inp = torch.randn(2, 2)
gm, _ = aot_export_module(mod, [inp], trace_joint=False, pre_dispatch=True)
- self.assertExpectedInline(str(gm.code).strip(), """\
+ self.assertExpectedInline(
+ str(gm.code).strip(),
+ """\
def forward(self, arg0_1, arg1_1):
matmul = torch.ops.aten.matmul.default(arg1_1, arg1_1)
add = torch.ops.aten.add.Tensor(matmul, 2); matmul = None
@@ -3046,13 +3545,14 @@ def forward(self, arg0_1, arg1_1):
view_1 = torch.ops.aten.view.default(add, [1, 4]); add = None
sum_2 = torch.ops.aten.sum.default(view_1); view_1 = None
add_1 = torch.ops.aten.add.Tensor(sum_1, sum_2); sum_1 = sum_2 = None
- return (add_1,)""")
+ return (add_1,)""",
+ )
def test_aot_export_predispatch_buffer_mutation_metadata(self):
class Foo(torch.nn.Module):
def __init__(self):
super().__init__()
- self.register_buffer('foo', torch.zeros(2, 2))
+ self.register_buffer("foo", torch.zeros(2, 2))
def forward(self, x):
self.foo.add_(4)
@@ -3060,14 +3560,19 @@ def forward(self, arg0_1, arg1_1):
inp = torch.randn(2, 2)
- gm, graph_sig = aot_export_module(Foo(), [inp], trace_joint=False, pre_dispatch=True)
- self.assertExpectedInline(str(gm.code).strip(), """\
+ gm, graph_sig = aot_export_module(
+ Foo(), [inp], trace_joint=False, pre_dispatch=True
+ )
+ self.assertExpectedInline(
+ str(gm.code).strip(),
+ """\
def forward(self, arg0_1, arg1_1):
add = torch.ops.aten.add.Tensor(arg0_1, 4); arg0_1 = None
sum_1 = torch.ops.aten.sum.default(arg1_1); arg1_1 = None
sum_2 = torch.ops.aten.sum.default(add)
add_1 = torch.ops.aten.add.Tensor(sum_1, sum_2); sum_1 = sum_2 = None
- return (add, add_1)""")
+ return (add, add_1)""",
+ )
eager_mod = Foo()
output_1, output_2 = gm(torch.zeros(2, 2), inp)
eager_output = eager_mod(inp)
@@ -3092,7 +3597,9 @@ def forward(self, arg0_1, arg1_1):
with torch.no_grad():
gm, _ = aot_export_module(mod, [inp], trace_joint=False, pre_dispatch=True)
- self.assertExpectedInline(str(gm.code).strip(), """\
+ self.assertExpectedInline(
+ str(gm.code).strip(),
+ """\
def forward(self, arg0_1, arg1_1):
_set_grad_enabled = torch._C._set_grad_enabled(True)
add = torch.ops.aten.add.Tensor(arg1_1, 5)
@@ -3102,10 +3609,13 @@ def forward(self, arg0_1, arg1_1):
sin = torch.ops.aten.sin.default(add_2); add_2 = None
add_3 = torch.ops.aten.add.Tensor(cos, sin); cos = sin = None
_set_grad_enabled_1 = torch._C._set_grad_enabled(False)
- return (add_3,)""")
+ return (add_3,)""",
+ )
@unittest.skipIf(IS_WINDOWS, "Windows isn't supported for this case")
- @unittest.skipIf(not torchdynamo.is_dynamo_supported(), "TorchDynamo is not supported")
+ @unittest.skipIf(
+ not torchdynamo.is_dynamo_supported(), "TorchDynamo is not supported"
+ )
def test_aot_export_predispatch_with_cond_nested(self):
class M(torch.nn.Module):
def __init__(self):
@@ -3124,7 +3634,9 @@ def forward(self, arg0_1, arg1_1):
def true_false_fn(x):
return x.cos()
- return torch.cond(y.cos().shape[0] > 5, true_true_fn, true_false_fn, [y.cos()])
+ return torch.cond(
+ y.cos().shape[0] > 5, true_true_fn, true_false_fn, [y.cos()]
+ )
def false_fn(x):
z = x.cos()
@@ -3136,7 +3648,9 @@ def forward(self, arg0_1, arg1_1):
inp = torch.randn(2, 2)
gm, _ = aot_export_module(M(), [inp], trace_joint=False, pre_dispatch=True)
- self.assertExpectedInline(str(gm.code).strip(), """\
+ self.assertExpectedInline(
+ str(gm.code).strip(),
+ """\
def forward(self, arg0_1):
true_graph_0 = self.true_graph_0
false_graph_0 = self.false_graph_0
@@ -3144,9 +3658,12 @@ def forward(self, arg0_1):
getitem = conditional[0]; conditional = None
add = torch.ops.aten.add.Tensor(getitem, 3)
add_1 = torch.ops.aten.add.Tensor(getitem, 4); getitem = None
- return (add, add_1)""") # noqa: B950
+ return (add, add_1)""", # noqa: B950
+ )
- self.assertExpectedInline(str(gm.true_graph_0.code).strip(), """\
+ self.assertExpectedInline(
+ str(gm.true_graph_0.code).strip(),
+ """\
def forward(self, arg0_1):
sin = torch.ops.aten.sin.default(arg0_1); arg0_1 = None
add = torch.ops.aten.add.Tensor(sin, 5); sin = None
@@ -3156,17 +3673,23 @@ def forward(self, arg0_1):
false_graph_0 = self.false_graph_0
conditional = torch.ops.higher_order.cond(False, true_graph_0, false_graph_0, [cos_1]); true_graph_0 = false_graph_0 = cos_1 = None
getitem = conditional[0]; conditional = None
- return (getitem,)""") # noqa: B950
+ return (getitem,)""", # noqa: B950
+ )
- self.assertExpectedInline(str(gm.true_graph_0.true_graph_0.code).strip(), """\
+ self.assertExpectedInline(
+ str(gm.true_graph_0.true_graph_0.code).strip(),
+ """\
def forward(self, arg0_1):
sin = torch.ops.aten.sin.default(arg0_1); arg0_1 = None
add = torch.ops.aten.add.Tensor(sin, 7); sin = None
sin_1 = torch.ops.aten.sin.default(add); add = None
- return (sin_1,)""")
+ return (sin_1,)""",
+ )
@unittest.skipIf(IS_WINDOWS, "Windows isn't supported for this case")
- @unittest.skipIf(not torchdynamo.is_dynamo_supported(), "TorchDynamo is not supported")
+ @unittest.skipIf(
+ not torchdynamo.is_dynamo_supported(), "TorchDynamo is not supported"
+ )
def test_aot_export_predispatch_map_1(self):
class M(torch.nn.Module):
def __init__(self):
@@ -3186,13 +3709,20 @@ def forward(self, arg0_1):
a.add_(5)
return a + y
- return z + control_flow.map(f, z, r).sum() + control_flow.map(f, z, r).sum()
+ return (
+ z
+ + control_flow.map(f, z, r).sum()
+ + control_flow.map(f, z, r).sum()
+ )
a = torch.cond(x.shape[0] > 4, true_fn, false_fn, [x, y])
return (a + 3, a + 4)
+
inps = [torch.randn(2, 2), torch.ones(2)]
gm, _ = aot_export_module(M(), inps, trace_joint=False, pre_dispatch=True)
- self.assertExpectedInline(str(gm.code).strip(), """\
+ self.assertExpectedInline(
+ str(gm.code).strip(),
+ """\
def forward(self, arg0_1, arg1_1):
true_graph_0 = self.true_graph_0
false_graph_0 = self.false_graph_0
@@ -3200,16 +3730,22 @@ def forward(self, arg0_1, arg1_1):
getitem = conditional[0]; conditional = None
add = torch.ops.aten.add.Tensor(getitem, 3)
add_1 = torch.ops.aten.add.Tensor(getitem, 4); getitem = None
- return (add, add_1)""") # noqa: B950
- self.assertExpectedInline(str(gm.true_graph_0.code).strip(), """\
+ return (add, add_1)""", # noqa: B950
+ )
+ self.assertExpectedInline(
+ str(gm.true_graph_0.code).strip(),
+ """\
def forward(self, arg0_1, arg1_1):
sin = torch.ops.aten.sin.default(arg0_1); arg0_1 = None
add = torch.ops.aten.add.Tensor(sin, 5); sin = None
cos = torch.ops.aten.cos.default(add); add = None
sum_1 = torch.ops.aten.sum.default(arg1_1); arg1_1 = None
add_1 = torch.ops.aten.add.Tensor(cos, sum_1); cos = sum_1 = None
- return (add_1,)""")
- self.assertExpectedInline(str(gm.false_graph_0.code).strip(), """\
+ return (add_1,)""",
+ )
+ self.assertExpectedInline(
+ str(gm.false_graph_0.code).strip(),
+ """\
def forward(self, arg0_1, arg1_1):
cos = torch.ops.aten.cos.default(arg0_1); arg0_1 = None
select = torch.ops.aten.select.int(cos, 0, 0)
@@ -3224,13 +3760,17 @@ def forward(self, arg0_1, arg1_1):
getitem_1 = map_impl_1[0]; map_impl_1 = None
sum_2 = torch.ops.aten.sum.default(getitem_1); getitem_1 = None
add_1 = torch.ops.aten.add.Tensor(add, sum_2); add = sum_2 = None
- return (add_1,)""")
- self.assertExpectedInline(str(gm.false_graph_0.body_graph_0.code).strip(), """\
+ return (add_1,)""",
+ )
+ self.assertExpectedInline(
+ str(gm.false_graph_0.body_graph_0.code).strip(),
+ """\
def forward(self, arg0_1, arg1_1):
cos = torch.ops.aten.cos.default(arg0_1); arg0_1 = None
add = torch.ops.aten.add.Tensor(cos, 5); cos = None
add_1 = torch.ops.aten.add.Tensor(add, arg1_1); add = arg1_1 = None
- return (add_1,)""")
+ return (add_1,)""",
+ )
def test_aot_export_predispatch_map_2(self):
class M(torch.nn.Module):
@@ -3249,7 +3789,9 @@ def forward(self, arg0_1, arg1_1):
inps = [torch.randn(2, 2), torch.ones(2)]
gm, _ = aot_export_module(M(), inps, trace_joint=False, pre_dispatch=True)
- self.assertExpectedInline(str(gm.code).strip(), """\
+ self.assertExpectedInline(
+ str(gm.code).strip(),
+ """\
def forward(self, arg0_1, arg1_1):
cos = torch.ops.aten.cos.default(arg0_1); arg0_1 = None
body_graph_0 = self.body_graph_0
@@ -3257,16 +3799,22 @@ def forward(self, arg0_1, arg1_1):
getitem = map_impl[0]; map_impl = None
sum_1 = torch.ops.aten.sum.default(getitem); getitem = None
add = torch.ops.aten.add.Tensor(cos, sum_1); cos = sum_1 = None
- return (add,)""") # noqa: B950
- self.assertExpectedInline(str(gm.body_graph_0.code).strip(), """\
+ return (add,)""",
+ ) # noqa: B950
+ self.assertExpectedInline(
+ str(gm.body_graph_0.code).strip(),
+ """\
def forward(self, arg0_1, arg1_1):
cos = torch.ops.aten.cos.default(arg0_1); arg0_1 = None
add = torch.ops.aten.add.Tensor(cos, 5); cos = None
add_1 = torch.ops.aten.add.Tensor(add, arg1_1); add = arg1_1 = None
- return [add_1]""")
+ return [add_1]""",
+ )
@unittest.skipIf(IS_WINDOWS, "Windows isn't supported for this case")
- @unittest.skipIf(not torchdynamo.is_dynamo_supported(), "TorchDynamo is not supported")
+ @unittest.skipIf(
+ not torchdynamo.is_dynamo_supported(), "TorchDynamo is not supported"
+ )
def test_aot_export_predispatch_with_cond(self):
class M(torch.nn.Module):
def __init__(self):
@@ -3289,7 +3837,9 @@ def forward(self, arg0_1, arg1_1):
inp = torch.randn(2, 2)
gm, _ = aot_export_module(M(), [inp], trace_joint=False, pre_dispatch=True)
- self.assertExpectedInline(str(gm.code).strip(), """\
+ self.assertExpectedInline(
+ str(gm.code).strip(),
+ """\
def forward(self, arg0_1):
true_graph_0 = self.true_graph_0
false_graph_0 = self.false_graph_0
@@ -3297,15 +3847,19 @@ def forward(self, arg0_1):
getitem = conditional[0]; conditional = None
add = torch.ops.aten.add.Tensor(getitem, 3)
add_1 = torch.ops.aten.add.Tensor(getitem, 4); getitem = None
- return (add, add_1)""") # noqa: B950
- self.assertExpectedInline(str(gm.true_graph_0.code).strip(), """\
+ return (add, add_1)""", # noqa: B950
+ )
+ self.assertExpectedInline(
+ str(gm.true_graph_0.code).strip(),
+ """\
def forward(self, arg0_1):
sin = torch.ops.aten.sin.default(arg0_1); arg0_1 = None
randn = torch.ops.aten.randn.default([2, 2], device = device(type='cpu'), pin_memory = False)
linear = torch.ops.aten.linear.default(sin, randn); sin = randn = None
add = torch.ops.aten.add.Tensor(linear, 5); linear = None
cos = torch.ops.aten.cos.default(add); add = None
- return (cos,)""")
+ return (cos,)""",
+ )
def test_aot_export_predispatch_conv_and_bn(self):
class ConvBatchnorm(torch.nn.Module):
@@ -3324,7 +3878,9 @@ def forward(self, arg0_1):
inp = torch.randn(1, 1, 3, 3)
gm, _ = aot_export_module(mod, [inp], trace_joint=False, pre_dispatch=True)
- self.assertExpectedInline(str(gm.code).strip(), """\
+ self.assertExpectedInline(
+ str(gm.code).strip(),
+ """\
def forward(self, arg0_1, arg1_1, arg2_1, arg3_1, arg4_1, arg5_1, arg6_1, arg7_1):
conv2d = torch.ops.aten.conv2d.default(arg7_1, arg0_1, arg1_1); arg7_1 = arg0_1 = arg1_1 = None
add = torch.ops.aten.add.Tensor(arg6_1, 1); arg6_1 = None
@@ -3332,7 +3888,8 @@ def forward(self, arg0_1, arg1_1, arg2_1, arg3_1, arg4_1, arg5_1, arg6_1, arg7_1
getitem = _native_batch_norm_legit_functional[0]
getitem_3 = _native_batch_norm_legit_functional[3]
getitem_4 = _native_batch_norm_legit_functional[4]; _native_batch_norm_legit_functional = None
- return (getitem_3, getitem_4, add, getitem)""") # noqa: B950
+ return (getitem_3, getitem_4, add, getitem)""", # noqa: B950
+ )
def test_aot_export_predispatch_reshape(self):
class Reshape(torch.nn.Module):
@@ -3344,11 +3901,14 @@ def forward(self, arg0_1, arg1_1, arg2_1, arg3_1, arg4_1, arg5_1, arg6_1, arg7_1
inp = torch.randn(2, 8)
gm, _ = aot_export_module(mod, [inp], trace_joint=False, pre_dispatch=True)
- self.assertExpectedInline(str(gm.code).strip(), """\
+ self.assertExpectedInline(
+ str(gm.code).strip(),
+ """\
def forward(self, arg0_1):
view = torch.ops.aten.view.default(arg0_1, [4, 4]); arg0_1 = None
sum_1 = torch.ops.aten.sum.default(view); view = None
- return (sum_1,)""") # noqa: B950
+ return (sum_1,)""",
+ ) # noqa: B950
def test_aot_export_predispatch_contiguous(self):
class Cont(torch.nn.Module):
@@ -3360,10 +3920,13 @@ def forward(self, arg0_1):
inp = torch.randn(2, 8)
gm, _ = aot_export_module(mod, [inp], trace_joint=False, pre_dispatch=True)
- self.assertExpectedInline(str(gm.code).strip(), """\
+ self.assertExpectedInline(
+ str(gm.code).strip(),
+ """\
def forward(self, arg0_1):
sum_1 = torch.ops.aten.sum.default(arg0_1); arg0_1 = None
- return (sum_1,)""") # noqa: B950
+ return (sum_1,)""",
+ ) # noqa: B950
def test_aot_export_module_joint(self):
class ConvBatchnormRelu(torch.nn.Module):
@@ -3383,13 +3946,17 @@ def forward(self, arg0_1):
mod.train()
inp = torch.randn(1, 1, 3, 3)
o_ref = mod(inp)
- fx_g, signature = aot_export_module(mod, [inp], trace_joint=True, output_loss_index=0)
+ fx_g, signature = aot_export_module(
+ mod, [inp], trace_joint=True, output_loss_index=0
+ )
# Some important characteristics of the exported graph below:
# 8 arguments: 2 params from conv, 2 params from batchnorm, 2 buffers from 1 batchnorm, 1 user input
# 9 outputs: 3 mutated buffers (from batchnorm), 2 user outputs and 4 gradients (since there were 4 parameters)
for node in fx_g.graph.nodes:
node.meta.pop("stack_trace", None)
- self.assertExpectedInline(fx_g.print_readable(print_output=False), """\
+ self.assertExpectedInline(
+ fx_g.print_readable(print_output=False),
+ """\
class <lambda>(torch.nn.Module):
def forward(self, arg0_1: "f32[3, 1, 1, 1]", arg1_1: "f32[3]", arg2_1: "f32[3]", arg3_1: "f32[3]", arg4_1: "f32[3]", arg5_1: "f32[3]", arg6_1: "i64[]", arg7_1: "f32[1, 1, 3, 3]"):
# No stacktrace found for following nodes
@@ -3430,25 +3997,51 @@ class <lambda>(torch.nn.Module):
getitem_9: "f32[3, 1, 1, 1]" = convolution_backward[1]
getitem_10: "f32[3]" = convolution_backward[2]; convolution_backward = None
return (getitem_3, getitem_4, add, sum_1, detach_10, getitem_9, getitem_10, getitem_6, getitem_7)
- """) # noqa: B950
-
+ """, # noqa: B950
+ )
- self.assertExpectedInline(str(signature.parameters), """['conv.weight', 'conv.bias', 'bn.weight', 'bn.bias']""")
- self.assertExpectedInline(str(signature.buffers), """['bn.running_mean', 'bn.running_var', 'bn.num_batches_tracked']""")
+ self.assertExpectedInline(
+ str(signature.parameters),
+ """['conv.weight', 'conv.bias', 'bn.weight', 'bn.bias']""",
+ )
+ self.assertExpectedInline(
+ str(signature.buffers),
+ """['bn.running_mean', 'bn.running_var', 'bn.num_batches_tracked']""",
+ )
self.assertExpectedInline(str(signature.user_inputs), """['arg7_1']""")
- self.assertExpectedInline(str(signature.inputs_to_parameters), """{'arg0_1': 'conv.weight', 'arg1_1': 'conv.bias', 'arg2_1': 'bn.weight', 'arg3_1': 'bn.bias'}""") # noqa: B950
- self.assertExpectedInline(str(signature.inputs_to_buffers), """{'arg4_1': 'bn.running_mean', 'arg5_1': 'bn.running_var', 'arg6_1': 'bn.num_batches_tracked'}""") # noqa: B950
- self.assertExpectedInline(str(signature.buffers_to_mutate), """{'getitem_3': 'bn.running_mean', 'getitem_4': 'bn.running_var', 'add': 'bn.num_batches_tracked'}""") # noqa: B950
- self.assertExpectedInline(str(signature.backward_signature.gradients_to_parameters), """{'getitem_9': 'conv.weight', 'getitem_10': 'conv.bias', 'getitem_6': 'bn.weight', 'getitem_7': 'bn.bias'}""") # noqa: B950
- self.assertExpectedInline(str(signature.backward_signature.gradients_to_user_inputs), """{}""")
- self.assertExpectedInline(str(signature.backward_signature.loss_output), """getitem_3""")
+ self.assertExpectedInline(
+ str(signature.inputs_to_parameters),
+ """{'arg0_1': 'conv.weight', 'arg1_1': 'conv.bias', 'arg2_1': 'bn.weight', 'arg3_1': 'bn.bias'}""",
+ ) # noqa: B950
+ self.assertExpectedInline(
+ str(signature.inputs_to_buffers),
+ """{'arg4_1': 'bn.running_mean', 'arg5_1': 'bn.running_var', 'arg6_1': 'bn.num_batches_tracked'}""",
+ ) # noqa: B950
+ self.assertExpectedInline(
+ str(signature.buffers_to_mutate),
+ """{'getitem_3': 'bn.running_mean', 'getitem_4': 'bn.running_var', 'add': 'bn.num_batches_tracked'}""",
+ ) # noqa: B950
+ self.assertExpectedInline(
+ str(signature.backward_signature.gradients_to_parameters),
+ """{'getitem_9': 'conv.weight', 'getitem_10': 'conv.bias', 'getitem_6': 'bn.weight', 'getitem_7': 'bn.bias'}""",
+ ) # noqa: B950
+ self.assertExpectedInline(
+ str(signature.backward_signature.gradients_to_user_inputs), """{}"""
+ )
+ self.assertExpectedInline(
+ str(signature.backward_signature.loss_output), """getitem_3"""
+ )
# Also check the inference graph
# Main important thing here is that there are 5 total outputs: 3 total mutated buffers (from batchnorm), 2 user outputs.
- fx_g_inference, signature_inference = aot_export_module(mod, [inp], trace_joint=False)
+ fx_g_inference, signature_inference = aot_export_module(
+ mod, [inp], trace_joint=False
+ )
for node in fx_g_inference.graph.nodes:
node.meta.pop("stack_trace", None)
- self.assertExpectedInline(fx_g_inference.print_readable(print_output=False), """\
+ self.assertExpectedInline(
+ fx_g_inference.print_readable(print_output=False),
+ """\
class <lambda>(torch.nn.Module):
def forward(self, arg0_1: "f32[3, 1, 1, 1]", arg1_1: "f32[3]", arg2_1: "f32[3]", arg3_1: "f32[3]", arg4_1: "f32[3]", arg5_1: "f32[3]", arg6_1: "i64[]", arg7_1: "f32[1, 1, 3, 3]"):
# No stacktrace found for following nodes
@@ -3464,7 +4057,8 @@ class <lambda>(torch.nn.Module):
detach_1: "f32[1, 3, 3, 3]" = torch.ops.aten.detach.default(detach); detach = None
detach_2: "f32[1, 3, 3, 3]" = torch.ops.aten.detach.default(detach_1); detach_1 = None
return (getitem_3, getitem_4, add, sum_1, detach_2)
- """) # noqa: B950
+ """, # noqa: B950
+ )
# Some important characteristics of the exported graph below:
# 8 arguments: 2 params from conv, 2 params from batchnorm, 2 buffers from 1 batchnorm, 1 user input
# 9 outputs: 2 mutated buffers (from batchnorm), 2 user outputs and 4 gradients (since there were 4 parameters)
@@ -3491,7 +4085,9 @@ class <lambda>(torch.nn.Module):
y3 = y.clone().detach().requires_grad_(True)
f_graph_joint = aot_export_joint_simple(f, [x, y], trace_joint=True)
num_fw_outputs = 2
- fw_g, bw_g = default_partition(f_graph_joint, [x, y], num_fwd_outputs=num_fw_outputs)
+ fw_g, bw_g = default_partition(
+ f_graph_joint, [x, y], num_fwd_outputs=num_fw_outputs
+ )
out_ref2 = f(x2, y2)
fw_outs = fw_g(x3, y3)
out_test2, activations = fw_outs[:num_fw_outputs], fw_outs[num_fw_outputs:]
@@ -3508,6 +4104,7 @@ class <lambda>(torch.nn.Module):
def fn(p, x):
x.t_()
return (x * 2,)
+
mod = TestMod(fn)
inp = torch.randn(2, 4)
with self.assertRaisesRegex(
@@ -3530,14 +4127,17 @@ class <lambda>(torch.nn.Module):
mod = M()
inp = torch.ones(6, 4)
gm, sig = aot_export_module(mod, [inp], trace_joint=False)
- self.assertExpectedInline(str(gm.code).strip(), """\
+ self.assertExpectedInline(
+ str(gm.code).strip(),
+ """\
def forward(self, arg0_1, arg1_1):
add = torch.ops.aten.add.Tensor(arg1_1, 4); arg1_1 = None
cos = torch.ops.aten.cos.default(add)
sum_1 = torch.ops.aten.sum.default(cos); cos = None
sum_2 = torch.ops.aten.sum.default(arg0_1); arg0_1 = None
add_1 = torch.ops.aten.add.Tensor(sum_1, sum_2); sum_1 = sum_2 = None
- return (add, add_1)""") # noqa: B950
+ return (add, add_1)""",
+ ) # noqa: B950
self.assertEqual(sig.user_inputs_to_mutate, {"add": "arg1_1"})
def test_aot_export_forward_mutation_multiple_mut(self):
@@ -3549,12 +4149,17 @@ def forward(self, arg0_1, arg1_1):
def forward(self, x, y):
y.add_(4)
self.buffer1.add_(5)
- return (x.cos().sum() + y.sin().sum(), self.buffer1.sum(),)
+ return (
+ x.cos().sum() + y.sin().sum(),
+ self.buffer1.sum(),
+ )
mod = M()
inp = [torch.ones(6, 4), torch.zeros(6, 4)]
gm, sig = aot_export_module(mod, inp, trace_joint=False)
- self.assertExpectedInline(str(gm.code).strip(), """\
+ self.assertExpectedInline(
+ str(gm.code).strip(),
+ """\
def forward(self, arg0_1, arg1_1, arg2_1):
add = torch.ops.aten.add.Tensor(arg2_1, 4); arg2_1 = None
add_1 = torch.ops.aten.add.Tensor(arg0_1, 5); arg0_1 = None
@@ -3564,7 +4169,8 @@ def forward(self, arg0_1, arg1_1, arg2_1):
sum_2 = torch.ops.aten.sum.default(sin); sin = None
add_2 = torch.ops.aten.add.Tensor(sum_1, sum_2); sum_1 = sum_2 = None
sum_3 = torch.ops.aten.sum.default(add_1)
- return (add_1, add, add_2, sum_3)""") # noqa: B950
+ return (add_1, add, add_2, sum_3)""",
+ ) # noqa: B950
self.assertEqual(sig.user_inputs_to_mutate, {"add": "arg2_1"})
self.assertEqual(sig.buffers_to_mutate, {"add_1": "buffer1"})
@@ -3577,7 +4183,8 @@ def forward(self, arg0_1, arg1_1, arg2_1):
mod = M()
inp = torch.randn(2, requires_grad=True)
with self.assertRaisesRegex(
- RuntimeError, "Found a graph input that requires gradients, and received a mutation"
+ RuntimeError,
+ "Found a graph input that requires gradients, and received a mutation",
):
aot_export_module(mod, [inp], trace_joint=False)
@@ -3585,10 +4192,12 @@ def forward(self, arg0_1, arg1_1, arg2_1):
def fn(p, x):
p.mul_(2)
return (p + x,)
+
mod = TestMod(fn)
inp = torch.randn(2)
with self.assertRaisesRegex(
- RuntimeError, "Found a graph input that requires gradients, and received a mutation"
+ RuntimeError,
+ "Found a graph input that requires gradients, and received a mutation",
):
aot_export_joint_simple(fn, [mod.p, inp], trace_joint=False)
aot_export_joint_simple(fn, [mod.p, inp], trace_joint=True)
@@ -3598,6 +4207,7 @@ def forward(self, arg0_1, arg1_1, arg2_1):
def fn(p, x, y):
x.mul_(2)
return (x + y,)
+
mod = TestMod(fn)
inp = torch.randn(2)
inp2 = inp.view(-1)
@@ -3612,6 +4222,7 @@ def forward(self, arg0_1, arg1_1, arg2_1):
def fn(p, x, y):
x.mul_(2)
return (x + y,)
+
mod = TestMod(fn)
inp = torch.randn(2)
with self.assertRaisesRegex(
@@ -3625,15 +4236,19 @@ def forward(self, arg0_1, arg1_1, arg2_1):
def fn(p, x):
out = p * x
return out, out.sum()
+
mod = TestMod(fn)
inp = torch.randn(2)
with self.assertRaisesRegex(
- RuntimeError, "Found an output of the forward that requires gradients, that was not"
+ RuntimeError,
+ "Found an output of the forward that requires gradients, that was not",
):
aot_export_module(mod, [inp], trace_joint=True, output_loss_index=1)
@unittest.skipIf(IS_WINDOWS, "Windows isn't supported for this case")
- @unittest.skipIf(not torch._dynamo.is_dynamo_supported(), "Cond needs dynamo to run")
+ @unittest.skipIf(
+ not torch._dynamo.is_dynamo_supported(), "Cond needs dynamo to run"
+ )
def test_aot_export_with_torch_cond(self):
class M(torch.nn.Module):
def __init__(self):
@@ -3655,7 +4270,9 @@ def forward(self, arg0_1, arg1_1, arg2_1):
inp = torch.randn(3, 4)
gm, _ = aot_export_module(M(), (inp,), trace_joint=False)
- self.assertExpectedInline(gm.code.strip(), """\
+ self.assertExpectedInline(
+ gm.code.strip(),
+ """\
def forward(self, arg0_1):
true_graph_0 = self.true_graph_0
false_graph_0 = self.false_graph_0
@@ -3663,30 +4280,39 @@ def forward(self, arg0_1):
getitem = conditional[0]; conditional = None
add = torch.ops.aten.add.Tensor(getitem, 3)
add_1 = torch.ops.aten.add.Tensor(getitem, 4); getitem = None
- return (add, add_1)""") # noqa: B950
+ return (add, add_1)""", # noqa: B950
+ )
- self.assertExpectedInline(gm.true_graph_0.code.strip(), """\
+ self.assertExpectedInline(
+ gm.true_graph_0.code.strip(),
+ """\
def forward(self, arg0_1):
add = torch.ops.aten.add.Tensor(arg0_1, 4)
add_1 = torch.ops.aten.add.Tensor(add, 5); add = None
cos = torch.ops.aten.cos.default(arg0_1); arg0_1 = None
- return (cos,)""")
+ return (cos,)""",
+ )
- self.assertExpectedInline(gm.false_graph_0.code.strip(), """\
+ self.assertExpectedInline(
+ gm.false_graph_0.code.strip(),
+ """\
def forward(self, arg0_1):
add = torch.ops.aten.add.Tensor(arg0_1, 5)
add_1 = torch.ops.aten.add.Tensor(add, 6); add = None
sin = torch.ops.aten.sin.default(arg0_1); arg0_1 = None
- return (sin,)""")
+ return (sin,)""",
+ )
def test_aot_export_simplified_pytrees_banned(self):
def fn(inps):
return (inps[0] + inps[1],)
+
inp1 = torch.randn(2)
inp2 = torch.randn(2)
inps = [inp1, inp2]
with self.assertRaisesRegex(
- RuntimeError, "aot_export_joint_simple requires individual inputs not to be pytrees"
+ RuntimeError,
+ "aot_export_joint_simple requires individual inputs not to be pytrees",
):
aot_export_joint_simple(fn, [inps], trace_joint=False)
aot_export_joint_simple(fn, [inps], trace_joint=True)
@@ -3694,10 +4320,14 @@ def forward(self, arg0_1):
def test_aot_export_functionalized_rng_banned(self):
def fn(p, x):
return (p + x,)
+
mod = TestMod(fn)
inp = torch.randn(2)
- with patch("functorch.compile.config.functionalize_rng_ops", True), self.assertRaisesRegex(
- RuntimeError, "Functionalized RNG is not currently supported in the aot_export"
+ with patch(
+ "functorch.compile.config.functionalize_rng_ops", True
+ ), self.assertRaisesRegex(
+ RuntimeError,
+ "Functionalized RNG is not currently supported in the aot_export",
):
aot_export_joint_simple(fn, [mod.p, inp], trace_joint=False)
aot_export_joint_simple(fn, [mod.p, inp], trace_joint=True)
@@ -3723,7 +4353,9 @@ class TestPartitioning(AOTTestCase):
def compile_fn(x, _):
return x
- compiled_fn = compiled_function(fn, compile_fn, compile_fn, min_cut_rematerialization_partition)
+ compiled_fn = compiled_function(
+ fn, compile_fn, compile_fn, min_cut_rematerialization_partition
+ )
res = compiled_fn(res_a, res_b)
res.sum().backward()
assert torch.allclose(ref, res, atol=1e-3, rtol=1e-3)
@@ -3736,22 +4368,28 @@ class TestPartitioning(AOTTestCase):
class MockModule(torch.nn.Module):
def __init__(self):
super().__init__()
- self.weight = torch.nn.Parameter(torch.randn(3072, 768, requires_grad=True))
+ self.weight = torch.nn.Parameter(
+ torch.randn(3072, 768, requires_grad=True)
+ )
self.bias = torch.nn.Parameter(torch.randn(3072, requires_grad=True))
def forward(self, add_4):
- linear_4 = torch.nn.functional.linear(add_4, self.weight, bias=self.bias)
+ linear_4 = torch.nn.functional.linear(
+ add_4, self.weight, bias=self.bias
+ )
gelu = torch.nn.functional.gelu(linear_4)
return gelu
def check_meta_tensor(fx_g, _):
for node in fx_g.graph.nodes:
- if node.op != 'output':
- assert 'tensor_meta' in node.meta
+ if node.op != "output":
+ assert "tensor_meta" in node.meta
return fx_g
inp0 = torch.randn(16, 128, 768, requires_grad=True)
- inputs = [inp0, ]
+ inputs = [
+ inp0,
+ ]
mod = MockModule().to(device="cpu")
aot_mod = aot_module(mod, fw_compiler=check_meta_tensor)
aot_mod(*inputs)
@@ -3760,16 +4398,20 @@ class TestPartitioning(AOTTestCase):
mod = nn.LayerNorm([10])
def f(x, mod_weight, mod_bias):
- return torch.nn.functional.layer_norm(x, [10], mod_weight, mod_bias, eps=1e-6)
+ return torch.nn.functional.layer_norm(
+ x, [10], mod_weight, mod_bias, eps=1e-6
+ )
- fw_graph, bw_graph = get_fw_bw_graph(f, [torch.randn(3, 10, requires_grad=True), mod.weight, mod.bias],
- partitioner=default_partition)
+ fw_graph, bw_graph = get_fw_bw_graph(
+ f,
+ [torch.randn(3, 10, requires_grad=True), mod.weight, mod.bias],
+ partitioner=default_partition,
+ )
self.assertEqual(get_num_ins_outs(fw_graph), (3, 6))
self.assertEqual(get_num_ins_outs(bw_graph), (6, 3))
@unittest.skipIf(not USE_NETWORKX, "networkx not available")
def test_min_cut_partitioner_save_shape(self):
-
def f(x):
s = x.sum(dim=1)
return s
@@ -3800,6 +4442,7 @@ class TestPartitioning(AOTTestCase):
x = sb[0] + sc[0]
a_sz = (x, a.size(0))
return torch.cat([a.expand(a_sz), b, c])
+
fw_graph, bw_graph = get_fw_bw_graph(f, inp, dynamic=True)
self.assertEqual(get_num_ins_outs(fw_graph), (3, 4))
self.assertEqual(get_num_ins_outs(bw_graph), (4, 3))
@@ -3807,7 +4450,6 @@ class TestPartitioning(AOTTestCase):
self.assertTrue(all(is_sym_node(n) for n in outs[1:]))
def test_default_partitioner_output_tensor_shape_tensor(self):
-
inp = [
torch.randn(10, requires_grad=True),
torch.randn((3, 10), requires_grad=True),
@@ -3823,7 +4465,9 @@ class TestPartitioning(AOTTestCase):
a_sz = (x, a.size(0))
cat = torch.cat([a.expand(a_sz), b, c])
mm = torch.mm(cat, d)
- mm2 = torch.mm(mm, a.view(mm.size(1), a.size(0))) # this saves 4 new ints for backward. why?
+ mm2 = torch.mm(
+ mm, a.view(mm.size(1), a.size(0))
+ ) # this saves 4 new ints for backward. why?
# and what do i have to do to make it save a tensor for backward?
return cat, sb, c, mm2
@@ -3835,7 +4479,8 @@ class TestPartitioning(AOTTestCase):
bw_compiler=partial(extract_graph, graph_cell=bw_graph_cell),
partition_fn=default_partition,
decompositions=default_decompositions,
- dynamic=True)(*inp)
+ dynamic=True,
+ )(*inp)
fw_graph = fw_graph_cell[0]
(compiled_outs[0].sum() + compiled_outs[2].sum()).backward()
bw_graph = bw_graph_cell[0]
@@ -3859,7 +4504,7 @@ class TestPartitioning(AOTTestCase):
# Of the 5 original forward outputs, the 4th (c) is an input,
# which won't show up in the compiled forward graph
[False, True, True, False, False] + [False] * 4 + [True] * 4,
- [is_sym_node(n) for n in fw_graph_out_nodes]
+ [is_sym_node(n) for n in fw_graph_out_nodes],
)
real_outs = f(*inp)
@@ -3871,7 +4516,6 @@ class TestPartitioning(AOTTestCase):
@unittest.skipIf(not USE_NETWORKX, "networkx not available")
def test_min_cut_partitioner_output_tensor_shape_tensor(self):
-
inp = [
torch.randn(10, requires_grad=True),
torch.randn((3, 10), requires_grad=True),
@@ -3887,7 +4531,9 @@ class TestPartitioning(AOTTestCase):
a_sz = (x, a.size(0))
cat = torch.cat([a.expand(a_sz), b, c])
mm = torch.mm(cat, d)
- mm2 = torch.mm(mm, a.view(mm.size(1), a.size(0))) # this saves 4 new ints for backward. why?
+ mm2 = torch.mm(
+ mm, a.view(mm.size(1), a.size(0))
+ ) # this saves 4 new ints for backward. why?
# and what do i have to do to make it save a tensor for backward?
return cat, sb, c, mm2
@@ -3899,7 +4545,8 @@ class TestPartitioning(AOTTestCase):
bw_compiler=partial(extract_graph, graph_cell=bw_graph_cell),
partition_fn=min_cut_rematerialization_partition,
decompositions=default_decompositions,
- dynamic=True)(*inp)
+ dynamic=True,
+ )(*inp)
fw_graph = fw_graph_cell[0]
(compiled_outs[0].sum() + compiled_outs[2].sum()).backward()
bw_graph = bw_graph_cell[0]
@@ -3912,7 +4559,7 @@ class TestPartitioning(AOTTestCase):
# then 4 tensors (transposes of matricies used for mm) are saved
# finally 3 symints are saved
[False, True, True, False, False] + [False] * 4 + [True] * 3,
- [is_sym_node(n) for n in fw_graph_out_nodes]
+ [is_sym_node(n) for n in fw_graph_out_nodes],
)
real_outs = f(*inp)
@@ -3935,7 +4582,9 @@ class TestPartitioning(AOTTestCase):
x = a + b + c + d
return x.cos().cos()
- fw_graph, bw_graph = get_fw_bw_graph(f, [torch.randn(3, requires_grad=True) for _ in range(4)])
+ fw_graph, bw_graph = get_fw_bw_graph(
+ f, [torch.randn(3, requires_grad=True) for _ in range(4)]
+ )
self.assertEqual(get_num_ins_outs(fw_graph), (4, 2))
self.assertEqual(get_num_ins_outs(bw_graph), (2, 4))
@@ -3945,9 +4594,13 @@ class TestPartitioning(AOTTestCase):
return x * x * x
recomputable_ops = []
- partition_fn = partial(min_cut_rematerialization_partition, recomputable_ops=recomputable_ops)
+ partition_fn = partial(
+ min_cut_rematerialization_partition, recomputable_ops=recomputable_ops
+ )
- fw_graph, bw_graph = get_fw_bw_graph(f, [torch.randn(3, requires_grad=True)], partition_fn)
+ fw_graph, bw_graph = get_fw_bw_graph(
+ f, [torch.randn(3, requires_grad=True)], partition_fn
+ )
# Expected forward graph:
# opcode name target args kwargs
# ------------- --------- --------------- -------------------------- --------
@@ -3971,8 +4624,12 @@ class TestPartitioning(AOTTestCase):
self.assertEqual(get_num_ins_outs(bw_graph), (3, 1))
recomputable_ops = [torch.ops.aten.mul]
- partition_fn = partial(min_cut_rematerialization_partition, recomputable_ops=recomputable_ops)
- fw_graph, bw_graph = get_fw_bw_graph(f, [torch.randn(3, requires_grad=True)], partition_fn)
+ partition_fn = partial(
+ min_cut_rematerialization_partition, recomputable_ops=recomputable_ops
+ )
+ fw_graph, bw_graph = get_fw_bw_graph(
+ f, [torch.randn(3, requires_grad=True)], partition_fn
+ )
# Expected forward graph:
# opcode name target args kwargs
# ------------- --------- --------------- ---------------------- --------
@@ -4029,14 +4686,15 @@ class TestPartitioning(AOTTestCase):
return torch.mul(x, x)
inference_graph_cell = [None]
- inference_compiler = make_boxed_compiler(partial(extract_graph, graph_cell=inference_graph_cell))
+ inference_compiler = make_boxed_compiler(
+ partial(extract_graph, graph_cell=inference_graph_cell)
+ )
aot_fn = aot_function(generate, nop, inference_compiler=inference_compiler)
# Even though x requires grad, we should still get an inference graph
x = torch.randn(4, requires_grad=True)
res = aot_fn(x)
self.assertTrue(inference_graph_cell[0] is not None)
-
@unittest.skipIf(not torch.cuda.is_available(), "CUDA is unavailable")
@unittest.skipIf(not USE_TORCHVISION, "test requires torchvision")
def test_autocast(self):
@@ -4053,7 +4711,6 @@ class TestPartitioning(AOTTestCase):
class TestAOTDispatch(AOTTestCase):
-
# Tests to add cases for (non-exhaustive list, mostly for my notes):
# - subclass / mode introduced in the middle of the compiled fn
# - various input mutation / intermediate base tests
@@ -4085,7 +4742,7 @@ class TestAOTDispatch(AOTTestCase):
f,
fw_compiler=partial(extract_graph, graph_cell=fw_graph_cell),
bw_compiler=partial(extract_graph, graph_cell=bw_graph_cell),
- partition_fn=min_cut_rematerialization_partition
+ partition_fn=min_cut_rematerialization_partition,
)
out_ref = f(a_ref, b_ref)
out_test = compiled_f(a_test, b_test)
@@ -4109,14 +4766,17 @@ class TestAOTDispatch(AOTTestCase):
# while the graph itself returns two outputs (add, add_1)
# - add, add_1 correspond to the two inner dense tensors that will be wrapped
# - into a single TwoTensor output.
- self.assertExpectedInline(fw_graph_cell[0].code.strip(), """\
+ self.assertExpectedInline(
+ fw_graph_cell[0].code.strip(),
+ """\
def forward(self, primals_1, primals_2, primals_3):
mul = torch.ops.aten.mul.Tensor(primals_1, 6); primals_1 = None
mul_1 = torch.ops.aten.mul.Tensor(primals_2, 6); primals_2 = None
div = torch.ops.aten.div.Tensor(primals_3, 2); primals_3 = None
add = torch.ops.aten.add.Tensor(mul, div); mul = None
add_1 = torch.ops.aten.add.Tensor(mul_1, div); mul_1 = div = None
- return [add, add_1]""")
+ return [add, add_1]""",
+ )
# Important pieces of the graph:
# - 4 total dense outputs.
@@ -4125,13 +4785,16 @@ def forward(self, primals_1, primals_2, primals_3):
# so (mul_2, mul_3) will be wrapped into a.grad
# and (div_1, div_2) will be wrapped into b.grad
# - 4 total dense outputs,
- self.assertExpectedInline(bw_graph_cell[0].code.strip(), """\
+ self.assertExpectedInline(
+ bw_graph_cell[0].code.strip(),
+ """\
def forward(self, tangents_1, tangents_2):
div_1 = torch.ops.aten.div.Tensor(tangents_1, 2)
div_2 = torch.ops.aten.div.Tensor(tangents_2, 2)
mul_2 = torch.ops.aten.mul.Tensor(tangents_1, 6); tangents_1 = None
mul_3 = torch.ops.aten.mul.Tensor(tangents_2, 6); tangents_2 = None
- return [mul_2, mul_3, div_1, div_2]""")
+ return [mul_2, mul_3, div_1, div_2]""",
+ )
def test_aot_dispatch_inference(self):
# a is a subclass, b is not
@@ -4154,7 +4817,7 @@ def forward(self, tangents_1, tangents_2):
f,
fw_compiler=nop,
bw_compiler=nop,
- partition_fn=min_cut_rematerialization_partition
+ partition_fn=min_cut_rematerialization_partition,
)
out_ref = f(a_ref, b_ref)
out_test = compiled_f(a_test, b_test)
@@ -4190,7 +4853,7 @@ def forward(self, tangents_1, tangents_2):
f,
fw_compiler=nop,
bw_compiler=nop,
- partition_fn=min_cut_rematerialization_partition
+ partition_fn=min_cut_rematerialization_partition,
)
out_ref = f(a_ref, b_ref)
out_test = compiled_f(a_test, b_test)
@@ -4204,7 +4867,7 @@ def forward(self, tangents_1, tangents_2):
# This will eventually require a repartition + recompile
with self.assertRaisesRegex(
AssertionError,
- "incorrectly attempted to compile the backward with incorrect subclass metadata"
+ "incorrectly attempted to compile the backward with incorrect subclass metadata",
):
(out_test[0] + out_test[1]).sum().backward()
@@ -4227,7 +4890,7 @@ def forward(self, tangents_1, tangents_2):
f,
fw_compiler=nop,
bw_compiler=nop,
- partition_fn=min_cut_rematerialization_partition
+ partition_fn=min_cut_rematerialization_partition,
)
out_ref1, out_ref2 = f(a_ref, b_ref)
out_test1, out_test2 = compiled_f(a_test, b_test)
@@ -4267,7 +4930,7 @@ def forward(self, tangents_1, tangents_2):
f,
fw_compiler=nop,
bw_compiler=nop,
- partition_fn=min_cut_rematerialization_partition
+ partition_fn=min_cut_rematerialization_partition,
)
out_ref = f(a_ref, b_ref)
out_test = compiled_f(a_test, b_test)
@@ -4300,7 +4963,12 @@ def forward(self, tangents_1, tangents_2):
b1_ref = torch.arange(9, requires_grad=True, dtype=torch.float32).reshape(3, 3)
b2_ref = torch.arange(9, requires_grad=True, dtype=torch.float32).reshape(3, 3)
b_ref_base = TwoTensor(b1_ref, b2_ref)
- a_ref_base = torch.arange(9, dtype=torch.float32).reshape(3, 3).detach().requires_grad_(True)
+ a_ref_base = (
+ torch.arange(9, dtype=torch.float32)
+ .reshape(3, 3)
+ .detach()
+ .requires_grad_(True)
+ )
b_ref = b_ref_base + 1
a_ref = a_ref_base + 1
@@ -4315,7 +4983,7 @@ def forward(self, tangents_1, tangents_2):
f,
fw_compiler=nop,
bw_compiler=nop,
- partition_fn=min_cut_rematerialization_partition
+ partition_fn=min_cut_rematerialization_partition,
)
out_ref = f(a_ref, b_ref)
out_test = compiled_f(a_test, b_test)
@@ -4350,7 +5018,12 @@ def forward(self, tangents_1, tangents_2):
b1_ref = torch.arange(9, requires_grad=True, dtype=torch.float32).reshape(3, 3)
b2_ref = torch.arange(9, requires_grad=True, dtype=torch.float32).reshape(3, 3)
b_ref_base = TwoTensor(b1_ref, b2_ref)
- a_ref_base = torch.arange(9, dtype=torch.float32).reshape(3, 3).detach().requires_grad_(True)
+ a_ref_base = (
+ torch.arange(9, dtype=torch.float32)
+ .reshape(3, 3)
+ .detach()
+ .requires_grad_(True)
+ )
b_ref = b_ref_base + 1
a_ref = a_ref_base + 1
@@ -4365,7 +5038,7 @@ def forward(self, tangents_1, tangents_2):
f,
fw_compiler=nop,
bw_compiler=nop,
- partition_fn=min_cut_rematerialization_partition
+ partition_fn=min_cut_rematerialization_partition,
)
out_ref = f(a_ref, b_ref)
out_test = compiled_f(a_test, b_test)
@@ -4395,7 +5068,12 @@ def forward(self, tangents_1, tangents_2):
b1_ref = torch.arange(9, requires_grad=True, dtype=torch.float32).reshape(3, 3)
b2_ref = torch.arange(9, requires_grad=True, dtype=torch.float32).reshape(3, 3)
b_ref_base = TwoTensor(b1_ref, b2_ref)
- a_ref_base = torch.arange(9, dtype=torch.float32).reshape(3, 3).detach().requires_grad_(True)
+ a_ref_base = (
+ torch.arange(9, dtype=torch.float32)
+ .reshape(3, 3)
+ .detach()
+ .requires_grad_(True)
+ )
b_ref = b_ref_base + 1
a_ref = a_ref_base + 1
@@ -4410,7 +5088,7 @@ def forward(self, tangents_1, tangents_2):
f,
fw_compiler=nop,
bw_compiler=nop,
- partition_fn=min_cut_rematerialization_partition
+ partition_fn=min_cut_rematerialization_partition,
)
out_ref1, out_ref2 = f(a_ref, b_ref)
out_test1, out_test2 = compiled_f(a_test, b_test)
@@ -4439,7 +5117,7 @@ class TestAOTModuleSimplified(AOTTestCase):
self.linear = torch.nn.Linear(20, 30)
def forward(self, x, y):
- return (self.linear(x) + y, )
+ return (self.linear(x) + y,)
mod = MockModule()
mod.zero_grad()
@@ -4468,7 +5146,7 @@ class TestAOTModuleSimplified(AOTTestCase):
self.linear = torch.nn.Linear(20, 30)
def forward(self, x, y):
- return (self.linear(x) + y, )
+ return (self.linear(x) + y,)
mod = MockModule()
@@ -4489,9 +5167,12 @@ class TestAOTModuleSimplified(AOTTestCase):
res = compiled_f(*cloned_inputs)
res[0].sum().backward()
- self.assertExpectedInline(shape_env.format_guards(), """\
+ self.assertExpectedInline(
+ shape_env.format_guards(),
+ """\
- Eq(s1, 20)
- - Eq(s2, 30)""")
+ - Eq(s2, 30)""",
+ )
assert torch.allclose(ref[0], res[0])
assert torch.allclose(inputs[0].grad, cloned_inputs[0].grad)
@@ -4502,7 +5183,9 @@ class TestAOTModuleSimplified(AOTTestCase):
class MyMod(torch.nn.Module):
def forward(self, x):
_tensor_constant0 = torch.tensor([1])
- lift_fresh_copy = torch.ops.aten.lift_fresh_copy.default(_tensor_constant0)
+ lift_fresh_copy = torch.ops.aten.lift_fresh_copy.default(
+ _tensor_constant0
+ )
y = x.mul(lift_fresh_copy)
return (y,)
@@ -4523,10 +5206,12 @@ class TestAOTModuleSimplified(AOTTestCase):
class MockModule(torch.nn.Module):
def __init__(self):
super().__init__()
- self.upsample = torch.nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
+ self.upsample = torch.nn.Upsample(
+ scale_factor=2, mode="bilinear", align_corners=True
+ )
def forward(self, x):
- return (self.upsample(x), )
+ return (self.upsample(x),)
mod = MockModule()
shape_env = ShapeEnv()
@@ -4546,7 +5231,7 @@ class TestAOTModuleSimplified(AOTTestCase):
z = self.linear(x)
z = z + y
z = z.relu()
- return (z, )
+ return (z,)
tracer = torch.fx.Tracer()
tracer.record_stack_traces = True
@@ -4554,24 +5239,26 @@ class TestAOTModuleSimplified(AOTTestCase):
mod = torch.fx.GraphModule(tracer.root, graph)
for node in mod.graph.nodes:
- if node.op == 'output':
+ if node.op == "output":
continue
self.assertTrue(node.stack_trace is not None)
- assert 'test_aotdispatch.py' in node.stack_trace
+ assert "test_aotdispatch.py" in node.stack_trace
def assert_compiler(gm: torch.fx.GraphModule, _):
for node in gm.graph.nodes:
- if node.op == 'output' or node.op == 'placeholder':
+ if node.op == "output" or node.op == "placeholder":
continue
self.assertTrue(node.stack_trace is not None)
- assert 'test_aotdispatch.py' in node.stack_trace
+ assert "test_aotdispatch.py" in node.stack_trace
return gm.forward # return a python callable
x = torch.randn(128, 20, requires_grad=True)
y = torch.randn(128, 30, requires_grad=True)
inputs = [x, y]
- compiled_f = aot_module_simplified(mod, inputs, fw_compiler=assert_compiler, bw_compiler=assert_compiler)
+ compiled_f = aot_module_simplified(
+ mod, inputs, fw_compiler=assert_compiler, bw_compiler=assert_compiler
+ )
res = compiled_f(*inputs)
res[0].sum().backward()
@@ -4583,7 +5270,7 @@ class TestAOTModuleSimplified(AOTTestCase):
def forward(self, x):
x_view = x[0]
x_view.mul_(2)
- return (x + x, )
+ return (x + x,)
tracer = torch.fx.Tracer()
tracer.record_stack_traces = True
@@ -4591,17 +5278,17 @@ class TestAOTModuleSimplified(AOTTestCase):
mod = torch.fx.GraphModule(tracer.root, graph)
for node in mod.graph.nodes:
- if node.op == 'output':
+ if node.op == "output":
continue
self.assertTrue(node.stack_trace is not None)
- assert 'test_aotdispatch.py' in node.stack_trace
+ assert "test_aotdispatch.py" in node.stack_trace
def assert_compiler(gm: torch.fx.GraphModule, _):
assert torch.ops.aten.copy_.default in [x.target for x in gm.graph.nodes]
for node in gm.graph.nodes:
if node.target == torch.ops.aten.copy_.default:
- assert 'stack_trace' in node.meta
- assert 'x_view.mul_(2)' in node.meta['stack_trace']
+ assert "stack_trace" in node.meta
+ assert "x_view.mul_(2)" in node.meta["stack_trace"]
return gm.forward # return a python callable
x = torch.randn(128, 20)
@@ -4628,11 +5315,9 @@ class TestAOTModuleSimplified(AOTTestCase):
# constant to make_fx, and result in the tensor being traced
# into the graph, which is an error condition. Make sure we
# report adequately in this case.
- return (x + fake_z, )
+ return (x + fake_z,)
- with self.assertRaisesRegex(
- AssertionError, "Unexpected fake"
- ):
+ with self.assertRaisesRegex(AssertionError, "Unexpected fake"):
aot_module_simplified(MockModule(), (fake_x,), nop)
@@ -4640,86 +5325,130 @@ class TestAOTModuleSimplified(AOTTestCase):
# Each one of these is a bug (or needs to be investigated)
aot_autograd_failures = {
# data-dependent control flow
- xfail('cov'),
- xfail('nn.functional.gaussian_nll_loss'),
- xfail('tensor_split'),
- xfail('corrcoef'),
- xfail('quantile'),
- xfail('nanquantile'),
- xfail('narrow'),
- xfail('istft'),
- xfail('linalg.eig'),
-
- skip('as_strided_scatter'),
- skip('as_strided', 'partial_views'), # flaky
-
+ xfail("cov"),
+ xfail("nn.functional.gaussian_nll_loss"),
+ xfail("tensor_split"),
+ xfail("corrcoef"),
+ xfail("quantile"),
+ xfail("nanquantile"),
+ xfail("narrow"),
+ xfail("istft"),
+ xfail("linalg.eig"),
+ skip("as_strided_scatter"),
+ skip("as_strided", "partial_views"), # flaky
# Given input size: (s0xs1x2). Calculated output size: ...
- skip('max_pool2d_with_indices_backward'),
-
- skip('nn.functional.nll_loss', ''), # UBSAN failure!
-
+ skip("max_pool2d_with_indices_backward"),
+ skip("nn.functional.nll_loss", ""), # UBSAN failure!
# Misc
- xfail('to_sparse'),
- xfail('corrcoef'),
- xfail('cov'),
- xfail('chalf'), # RuntimeError: "sum_cpu" not implemented for 'ComplexHalf'
- xfail('sparse.sampled_addmm'),
- xfail('sparse.mm', 'reduce'),
- skip('nn.functional.binary_cross_entropy_with_logits'), # seems to fail sometimes?
- skip('nn.functional.margin_ranking_loss'), # seems flaky
- skip('linalg.lu_solve'), # flaky
- decorate('matmul', decorator=unittest.skipIf(IS_ARM64, 'flaky')),
- decorate('__rmatmul__', decorator=unittest.skipIf(IS_ARM64, 'flaky')),
+ xfail("to_sparse"),
+ xfail("corrcoef"),
+ xfail("cov"),
+ xfail("chalf"), # RuntimeError: "sum_cpu" not implemented for 'ComplexHalf'
+ xfail("sparse.sampled_addmm"),
+ xfail("sparse.mm", "reduce"),
+ skip("nn.functional.binary_cross_entropy_with_logits"), # seems to fail sometimes?
+ skip("nn.functional.margin_ranking_loss"), # seems flaky
+ skip("linalg.lu_solve"), # flaky
+ decorate("matmul", decorator=unittest.skipIf(IS_ARM64, "flaky")),
+ decorate("__rmatmul__", decorator=unittest.skipIf(IS_ARM64, "flaky")),
# overrides atol=1e-4, rtol=1e-5 would do as well
- decorate('svd_lowrank', decorator=toleranceOverride({torch.float32: tol(atol=1e-04, rtol=1e-05)})),
- decorate('linalg.householder_product', decorator=unittest.skipIf(IS_MACOS and IS_X86, 'flaky')),
- decorate('linalg.pinv', 'singular', decorator=toleranceOverride({torch.float32: tol(atol=1e-05, rtol=1e-05)})),
- decorate('nn.functional.interpolate', 'bicubic', decorator=toleranceOverride({torch.float32: tol(atol=1e-04, rtol=1e-05)})),
+ decorate(
+ "svd_lowrank",
+ decorator=toleranceOverride({torch.float32: tol(atol=1e-04, rtol=1e-05)}),
+ ),
+ decorate(
+ "linalg.householder_product",
+ decorator=unittest.skipIf(IS_MACOS and IS_X86, "flaky"),
+ ),
+ decorate(
+ "linalg.pinv",
+ "singular",
+ decorator=toleranceOverride({torch.float32: tol(atol=1e-05, rtol=1e-05)}),
+ ),
+ decorate(
+ "nn.functional.interpolate",
+ "bicubic",
+ decorator=toleranceOverride({torch.float32: tol(atol=1e-04, rtol=1e-05)}),
+ ),
# conv2d sometimes nondeterministic in this config?
- decorate('nn.functional.conv2d', decorator=unittest.skipIf(IS_ARM64, "flaky")),
+ decorate("nn.functional.conv2d", decorator=unittest.skipIf(IS_ARM64, "flaky")),
}
symbolic_aot_autograd_failures = {
- xfail('combinations', ''), # aten.masked_select.default
- xfail('index_fill', ''), # Cannot call sizes() on tensor with symbolic sizes/strides
- xfail('kthvalue', ''), # Cannot call sizes() on tensor with symbolic sizes/strides
- xfail('linalg.lstsq', ''), # aten.linalg_lstsq.default - couldn't find symbolic meta function/decomposition
- xfail('linalg.lstsq', 'grad_oriented'), # aten.linalg_lstsq.default - couldn't find symbolic meta funct...
- xfail('linalg.lu_solve', ''), # aten.linalg_lu_solve.default - couldn't find symbolic meta function/deco...
- skip('nn.functional.batch_norm', ''), # '0 is not tracked with proxy for <torch.fx.experimental.proxy_te..
- xfail('nn.functional.binary_cross_entropy', ''), # aten.fill_.Scalar - couldn't find symbolic meta funct...
- xfail('nn.functional.cross_entropy', ''), # Cannot call sizes() on tensor with symbolic sizes/strides
- xfail('nn.functional.ctc_loss', ''), # aten._ctc_loss.Tensor - couldn't find symbolic meta function/deco...
- xfail('nn.functional.embedding_bag', ''), # Cannot call sizes() on tensor with symbolic sizes/strides
- xfail('nn.functional.fractional_max_pool2d', ''), # rand() received an invalid combination of arguments - g...
- xfail('nn.functional.fractional_max_pool3d', ''), # rand() received an invalid combination of arguments - g...
- xfail('nn.functional.group_norm', ''), # Cannot call sizes() on tensor with symbolic sizes/strides
- xfail('nn.functional.nll_loss', ''), # Cannot call sizes() on tensor with symbolic sizes/strides
- xfail('_segment_reduce', 'lengths'), # aten.segment_reduce.default - couldn't find symbolic meta functio...
- xfail('_segment_reduce', 'offsets'), # aten.segment_reduce.default - couldn't find symbolic meta functio...
- xfail('trace', ''), # Cannot call sizes() on tensor with symbolic sizes/strides
- xfail('_upsample_bilinear2d_aa'), # RuntimeError: isIntList() INTERNAL ASSERT FAILED Expected IntList but got GenericList
- decorate('linalg.householder_product', decorator=unittest.skipIf(IS_MACOS and IS_X86, 'flaky')),
-
+ xfail("combinations", ""), # aten.masked_select.default
+ xfail(
+ "index_fill", ""
+ ), # Cannot call sizes() on tensor with symbolic sizes/strides
+ xfail("kthvalue", ""), # Cannot call sizes() on tensor with symbolic sizes/strides
+ xfail(
+ "linalg.lstsq", ""
+ ), # aten.linalg_lstsq.default - couldn't find symbolic meta function/decomposition
+ xfail(
+ "linalg.lstsq", "grad_oriented"
+ ), # aten.linalg_lstsq.default - couldn't find symbolic meta funct...
+ xfail(
+ "linalg.lu_solve", ""
+ ), # aten.linalg_lu_solve.default - couldn't find symbolic meta function/deco...
+ skip(
+ "nn.functional.batch_norm", ""
+ ), # '0 is not tracked with proxy for <torch.fx.experimental.proxy_te..
+ xfail(
+ "nn.functional.binary_cross_entropy", ""
+ ), # aten.fill_.Scalar - couldn't find symbolic meta funct...
+ xfail(
+ "nn.functional.cross_entropy", ""
+ ), # Cannot call sizes() on tensor with symbolic sizes/strides
+ xfail(
+ "nn.functional.ctc_loss", ""
+ ), # aten._ctc_loss.Tensor - couldn't find symbolic meta function/deco...
+ xfail(
+ "nn.functional.embedding_bag", ""
+ ), # Cannot call sizes() on tensor with symbolic sizes/strides
+ xfail(
+ "nn.functional.fractional_max_pool2d", ""
+ ), # rand() received an invalid combination of arguments - g...
+ xfail(
+ "nn.functional.fractional_max_pool3d", ""
+ ), # rand() received an invalid combination of arguments - g...
+ xfail(
+ "nn.functional.group_norm", ""
+ ), # Cannot call sizes() on tensor with symbolic sizes/strides
+ xfail(
+ "nn.functional.nll_loss", ""
+ ), # Cannot call sizes() on tensor with symbolic sizes/strides
+ xfail(
+ "_segment_reduce", "lengths"
+ ), # aten.segment_reduce.default - couldn't find symbolic meta functio...
+ xfail(
+ "_segment_reduce", "offsets"
+ ), # aten.segment_reduce.default - couldn't find symbolic meta functio...
+ xfail("trace", ""), # Cannot call sizes() on tensor with symbolic sizes/strides
+ xfail(
+ "_upsample_bilinear2d_aa"
+ ), # RuntimeError: isIntList() INTERNAL ASSERT FAILED Expected IntList but got GenericList
+ decorate(
+ "linalg.householder_product",
+ decorator=unittest.skipIf(IS_MACOS and IS_X86, "flaky"),
+ ),
# many complex operators incorrect striding, metadata
- xfail('fft.fft', ''),
- xfail('fft.hfft2', ''),
- xfail('fft.hfft', ''),
- xfail('fft.hfftn', ''),
- xfail('fft.ifft', ''),
- xfail('fft.ihfft2', ''),
- xfail('fft.ihfft', ''),
- xfail('fft.ihfftn', ''),
- xfail('fft.irfft2', ''),
- xfail('fft.irfft', ''),
- xfail('fft.irfftn', ''),
- xfail('fft.rfft2', ''),
- xfail('fft.rfft', ''),
- xfail('fft.rfftn', ''),
-
- xfail('stft', ''), # Cannot call sizes() on tensor with symbolic sizes/strides
+ xfail("fft.fft", ""),
+ xfail("fft.hfft2", ""),
+ xfail("fft.hfft", ""),
+ xfail("fft.hfftn", ""),
+ xfail("fft.ifft", ""),
+ xfail("fft.ihfft2", ""),
+ xfail("fft.ihfft", ""),
+ xfail("fft.ihfftn", ""),
+ xfail("fft.irfft2", ""),
+ xfail("fft.irfft", ""),
+ xfail("fft.irfftn", ""),
+ xfail("fft.rfft2", ""),
+ xfail("fft.rfft", ""),
+ xfail("fft.rfftn", ""),
+ xfail("stft", ""), # Cannot call sizes() on tensor with symbolic sizes/strides
}
+
def _test_aot_autograd_helper(self, device, dtype, op, dynamic=False):
if not op.supports_autograd:
self.skipTest("Op does not support autograd")
@@ -4727,11 +5456,13 @@ def _test_aot_autograd_helper(self, device, dtype, op, dynamic=False):
# aot_autograd_check is able to check data specialization by
# randomizing the inputs. Here's a list of ops that really do not
# like random inputs for which we want to disable that.
- cant_check_data_specialization = set({
- 'nn.functional.max_unpool1d',
- 'nn.functional.max_unpool2d',
- 'nn.functional.max_unpool3d',
- })
+ cant_check_data_specialization = set(
+ {
+ "nn.functional.max_unpool1d",
+ "nn.functional.max_unpool2d",
+ "nn.functional.max_unpool3d",
+ }
+ )
try_check_data_specialization = op.name not in cant_check_data_specialization
sample_inputs_itr = op.sample_inputs(device, dtype, requires_grad=True)
@@ -4740,10 +5471,15 @@ def _test_aot_autograd_helper(self, device, dtype, op, dynamic=False):
t_kwargs = sample_input.kwargs
try:
aot_autograd_check(
- op.op, t_args, t_kwargs, dynamic,
- self.assertRaisesRegex, self.assertEqual,
+ op.op,
+ t_args,
+ t_kwargs,
+ dynamic,
+ self.assertRaisesRegex,
+ self.assertEqual,
check_gradients=True,
- try_check_data_specialization=try_check_data_specialization)
+ try_check_data_specialization=try_check_data_specialization,
+ )
except DynamicOutputShapeException:
self.skipTest("Dynamic output shape operation in trace")
except GuardOnDataDependentSymNode:
@@ -4755,21 +5491,31 @@ def _test_aot_autograd_helper(self, device, dtype, op, dynamic=False):
else:
raise
-def _test_aot_autograd_module_helper(self, device, dtype, training, module_info, *, dynamic=False):
+
+def _test_aot_autograd_module_helper(
+ self, device, dtype, training, module_info, *, dynamic=False
+):
module_cls = module_info.module_cls
- module_inputs = module_info.module_inputs_func(module_info, device=device, dtype=dtype,
- requires_grad=True, training=training)
+ module_inputs = module_info.module_inputs_func(
+ module_info, device=device, dtype=dtype, requires_grad=True, training=training
+ )
for module_input in module_inputs:
if module_input.forward_input is None:
continue
- args, kwargs = module_input.constructor_input.args, module_input.constructor_input.kwargs
+ args, kwargs = (
+ module_input.constructor_input.args,
+ module_input.constructor_input.kwargs,
+ )
m = module_cls(*args, **kwargs)
m.to(device).to(dtype)
m.train(training)
# Lazy modules need to see an input first to initialize params.
- args, kwargs = module_input.forward_input.args, module_input.forward_input.kwargs
+ args, kwargs = (
+ module_input.forward_input.args,
+ module_input.forward_input.kwargs,
+ )
flat_args, args_spec = pytree.tree_flatten((args, kwargs))
# PackedSequence is only used for RNNs. It might be possible to fake-ify if they're pytrees but
@@ -4782,8 +5528,9 @@ def _test_aot_autograd_module_helper(self, device, dtype, training, module_info,
m(*args, **kwargs)
sentinel_val = -42
- is_tensor_spec = [sentinel_val if isinstance(arg, torch.Tensor)
- else arg for arg in flat_args]
+ is_tensor_spec = [
+ sentinel_val if isinstance(arg, torch.Tensor) else arg for arg in flat_args
+ ]
args = [arg for arg in flat_args if isinstance(arg, torch.Tensor)]
def f(params_buffers_args):
@@ -4800,57 +5547,71 @@ def _test_aot_autograd_module_helper(self, device, dtype, training, module_info,
named_params = dict(m.named_parameters(remove_duplicate=False))
named_buffers = dict(m.named_buffers(remove_duplicate=False))
num_params_buffers = len(named_params) + len(named_buffers)
- compiled_f = aot_function(f, nop, num_params_buffers=num_params_buffers, dynamic=dynamic)
+ compiled_f = aot_function(
+ f, nop, num_params_buffers=num_params_buffers, dynamic=dynamic
+ )
params_buffers_args = [named_params, named_buffers, args]
_test_aot_autograd_forwards_backwards_helper(
- f, compiled_f, params_buffers_args,
- self.assertRaisesRegex, self.assertEqual, True)
+ f,
+ compiled_f,
+ params_buffers_args,
+ self.assertRaisesRegex,
+ self.assertEqual,
+ True,
+ )
class TestEagerFusionOpInfo(AOTTestCase):
@ops(op_db + hop_db, allowed_dtypes=(torch.float,))
- @skipOps('TestEagerFusionOpInfo', 'test_aot_autograd_exhaustive', aot_autograd_failures)
+ @skipOps(
+ "TestEagerFusionOpInfo", "test_aot_autograd_exhaustive", aot_autograd_failures
+ )
def test_aot_autograd_exhaustive(self, device, dtype, op):
_test_aot_autograd_helper(self, device, dtype, op)
@ops(op_db + hop_db, allowed_dtypes=(torch.float,))
@patch("functorch.compile.config.debug_assert", True)
- @skipOps('TestEagerFusionOpInfo', 'test_aot_autograd_symbolic_exhaustive',
- aot_autograd_failures | symbolic_aot_autograd_failures)
+ @skipOps(
+ "TestEagerFusionOpInfo",
+ "test_aot_autograd_symbolic_exhaustive",
+ aot_autograd_failures | symbolic_aot_autograd_failures,
+ )
def test_aot_autograd_symbolic_exhaustive(self, device, dtype, op):
_test_aot_autograd_helper(self, device, dtype, op, dynamic=True)
-aot_autograd_module_failures = set({
- torch.nn.CTCLoss, # torch._subclasses.fake_tensor.DynamicOutputShapeException: aten._ctc_loss.default
- torch.nn.GaussianNLLLoss, # RuntimeError: It appears that you're trying to get value out
- # of a tracing tensor with aten._local_scalar_dense.default -
- # erroring out! It's likely that this is caused by data-dependent
- # control flow or similar.
- torch.nn.MultiLabelMarginLoss, # AssertionError: The values for attribute 'shape' do not match:
- # torch.Size([1]) != torch.Size([]). Outputs of the operator are different in
- # eager-mode PyTorch vs AOTAutograd. This means the operator will have incorrect
- # output underneath torch.compile. This could be because the operator's
- # implementation not traceable or that there is a bug in AOTAutograd.
- torch.nn.TransformerEncoder, # DataDependentOutputException: aten.eq compares a mask input
- # to a causal mask tensor, to see if Boolean is_causal should be set
- # for TrnasformerEncoder layers, MHA and sdp custom kernels
- torch.nn.Transformer, # DataDependentOutputException: aten.equal compares a mask input
- # to a causal mask tensor, to see if Boolean is_causal should be set
- # for TransformerEncoder layers, MHA and sdp custom kernels
- # (this bubbles up to Transformer)
-})
+aot_autograd_module_failures = set(
+ {
+ torch.nn.CTCLoss, # torch._subclasses.fake_tensor.DynamicOutputShapeException: aten._ctc_loss.default
+ torch.nn.GaussianNLLLoss, # RuntimeError: It appears that you're trying to get value out
+ # of a tracing tensor with aten._local_scalar_dense.default -
+ # erroring out! It's likely that this is caused by data-dependent
+ # control flow or similar.
+ torch.nn.MultiLabelMarginLoss, # AssertionError: The values for attribute 'shape' do not match:
+ # torch.Size([1]) != torch.Size([]). Outputs of the operator are different in
+ # eager-mode PyTorch vs AOTAutograd. This means the operator will have incorrect
+ # output underneath torch.compile. This could be because the operator's
+ # implementation not traceable or that there is a bug in AOTAutograd.
+ torch.nn.TransformerEncoder, # DataDependentOutputException: aten.eq compares a mask input
+ # to a causal mask tensor, to see if Boolean is_causal should be set
+ # for TrnasformerEncoder layers, MHA and sdp custom kernels
+ torch.nn.Transformer, # DataDependentOutputException: aten.equal compares a mask input
+ # to a causal mask tensor, to see if Boolean is_causal should be set
+ # for TransformerEncoder layers, MHA and sdp custom kernels
+ # (this bubbles up to Transformer)
+ }
+)
symbolic_aot_autograd_module_failures = {
torch.nn.Transformer, # DataDependentOutputException: aten.equal compares a mask input to a mask producing a bool
torch.nn.TransformerEncoder, # DataDependentOutputException: aten.equal compares a mask input to a mask producing a bool
torch.nn.GaussianNLLLoss, # NotImplementedError: local_scalar_dense/item NYI for torch.bool
torch.nn.GroupNorm, # in native_group_norm_backward cpg, _rem = divmod(C, group)
- # TypeError: unsupported operand type(s) for divmod(): 'SymInt' and 'int'
+ # TypeError: unsupported operand type(s) for divmod(): 'SymInt' and 'int'
torch.nn.FractionalMaxPool2d, # int() argument must be a string, a bytes-like object or a number, not 'SymFloat'
torch.nn.FractionalMaxPool3d, # int() argument must be a string, a bytes-like object or a number, not 'SymFloat'
torch.nn.BCELoss, # new_size = _infer_size(target.size(), weight.size())
- # RuntimeError: expected int at position 0, but got: SymInt
+ # RuntimeError: expected int at position 0, but got: SymInt
}
@@ -4861,14 +5622,20 @@ class TestEagerFusionModuleInfo(AOTTestCase):
_test_aot_autograd_module_helper(self, device, dtype, training, module_info)
@modules(module_db, allowed_dtypes=(torch.float,))
- @decorateForModules(unittest.expectedFailure,
- aot_autograd_module_failures | symbolic_aot_autograd_module_failures)
- def test_aot_autograd_symbolic_module_exhaustive(self, device, dtype, training, module_info):
- _test_aot_autograd_module_helper(self, device, dtype, training, module_info, dynamic=True)
+ @decorateForModules(
+ unittest.expectedFailure,
+ aot_autograd_module_failures | symbolic_aot_autograd_module_failures,
+ )
+ def test_aot_autograd_symbolic_module_exhaustive(
+ self, device, dtype, training, module_info
+ ):
+ _test_aot_autograd_module_helper(
+ self, device, dtype, training, module_info, dynamic=True
+ )
instantiate_parametrized_tests(TestAOTAutograd)
-only_for = ("cpu")
+only_for = "cpu"
instantiate_device_type_tests(
TestPythonKey,
globals(),
@@ -4878,5 +5645,5 @@ instantiate_device_type_tests(TestEagerFusionOpInfo, globals(), only_for=only_fo
instantiate_device_type_tests(TestEagerFusionModuleInfo, globals(), only_for=only_for)
-if __name__ == '__main__':
+if __name__ == "__main__":
run_tests()
diff --git a/test/functorch/test_control_flow.py b/test/functorch/test_control_flow.py
index a3cc410080..a1aeb8c1de 100644
--- a/test/functorch/test_control_flow.py
+++ b/test/functorch/test_control_flow.py
@@ -1,20 +1,32 @@
# Owner(s): ["module: functorch"]
-import functools
import contextlib
+import functools
import unittest
-from torch.testing._internal.common_utils import (
- TEST_WITH_TORCHDYNAMO, parametrize, instantiate_parametrized_tests, skipIfTorchDynamo
-)
import torch
import torch.utils._pytree as pytree
from functorch.experimental import control_flow
-from functorch.experimental.control_flow import UnsupportedAliasMutationException, cond
+from functorch.experimental.control_flow import cond, UnsupportedAliasMutationException
from torch._higher_order_ops.while_loop import while_loop
+from torch._subclasses.functional_tensor import (
+ CppFunctionalizeAPI,
+ FunctionalTensor,
+ FunctionalTensorMode,
+ PythonFunctionalizeAPI,
+)
from torch.fx.experimental.proxy_tensor import make_fx
-from torch.testing._internal.common_utils import run_tests, TestCase, IS_WINDOWS
from torch.testing._internal.common_quantization import skipIfNoDynamoSupport
-from torch._subclasses.functional_tensor import FunctionalTensor, CppFunctionalizeAPI, PythonFunctionalizeAPI, FunctionalTensorMode
+
+from torch.testing._internal.common_utils import (
+ instantiate_parametrized_tests,
+ IS_WINDOWS,
+ parametrize,
+ run_tests,
+ skipIfTorchDynamo,
+ TEST_WITH_TORCHDYNAMO,
+ TestCase,
+)
+
# TODO: pull these helpers from AOTAutograd later
def to_fun(t):
@@ -22,6 +34,7 @@ def to_fun(t):
return FunctionalTensor.to_functional(t)
return t
+
def from_fun(t):
if not isinstance(t, FunctionalTensor):
# quick sanity assert
@@ -31,6 +44,7 @@ def from_fun(t):
torch._sync(t)
return torch._from_functional_tensor(t.elem)
+
def to_fun_old(t):
if isinstance(t, torch.Tensor) and not torch._is_functional_tensor(t):
out = torch._to_functional_tensor(t)
@@ -38,6 +52,7 @@ def to_fun_old(t):
return out
return t
+
def from_fun_old(t):
# quick sanity assert
if isinstance(t, torch.Tensor):
@@ -46,19 +61,23 @@ def from_fun_old(t):
return torch._from_functional_tensor(t)
return t
+
def _fake_map(f, x, *args):
from functorch.experimental.control_flow import _stack_pytree, _unstack_pytree
+
x_pytrees = _unstack_pytree(x)
zs = []
for xp in x_pytrees:
zs.append(f(xp, *args))
return _stack_pytree(zs)
+
def _fake_while_loop(cond_fn, body_fn, operands):
while cond_fn(*operands):
operands = body_fn(*operands)
return operands
+
def _while_loop_tests():
def simple(x):
def cond_fn(x):
@@ -67,7 +86,7 @@ def _while_loop_tests():
def body_fn(x):
return (x + 1,)
- return while_loop(cond_fn, body_fn, (x, ))
+ return while_loop(cond_fn, body_fn, (x,))
def simple_with_mutation(x):
def cond_fn(x):
@@ -78,7 +97,7 @@ def _while_loop_tests():
y = x.clone().add_(1).add_(-1)
return (y + 1,)
- return while_loop(cond_fn, body_fn, (x, ))
+ return while_loop(cond_fn, body_fn, (x,))
def nested(out_iter, it, y):
def cond_fn(out_iter, it, y):
@@ -98,12 +117,10 @@ def _while_loop_tests():
class Nested(torch.nn.Module):
def forward(self, ci, cj, a, b):
-
def cond_fn(i1, j1, x1, y1):
return i1 > 0
def body_fn(i1, j1, x1, y1):
-
def cond_fn_nested(i2, j2, x2, y2):
return j2 > 0
@@ -114,6 +131,7 @@ def _while_loop_tests():
cond_fn_nested, body_fn_nested, [i1, j1, x1, y1]
)
return i1 - 1, j1.clone(), x1 * 2, y1 / 2
+
return while_loop(cond_fn, body_fn, (ci, cj, a, b))
class SimpleWithLinear(torch.nn.Module):
@@ -128,6 +146,7 @@ def _while_loop_tests():
def body_fn(it, x):
return it - 1, self.linear(x)
+
return while_loop(cond_fn, body_fn, (iter, x))
class NestedWithLinear(torch.nn.Module):
@@ -143,6 +162,7 @@ def _while_loop_tests():
def body_fn(it, x):
return it - 1, self.outer_linear(self.mod(it, x)[1])
+
return while_loop(cond_fn, body_fn, (iter, x))
nested2 = Nested()
@@ -152,16 +172,31 @@ def _while_loop_tests():
x = torch.zeros(1)
y = torch.zeros(1)
z = torch.zeros(1)
- return {"simple": (simple, (x,)),
- "nested": (nested, (x, y, z)),
- "nested2": (nested2, (torch.tensor(2), torch.tensor(2), torch.ones(2, 2), torch.ones(2, 2))),
- "simple_with_mutation": (simple_with_mutation, (x,)),
- "simple_with_linear": (simple_with_linear, (torch.tensor(3), torch.randn(2, 2))),
- "nested_with_linear": (nested_with_linear, (torch.tensor(3), torch.randn(2, 2)))}
+ return {
+ "simple": (simple, (x,)),
+ "nested": (nested, (x, y, z)),
+ "nested2": (
+ nested2,
+ (torch.tensor(2), torch.tensor(2), torch.ones(2, 2), torch.ones(2, 2)),
+ ),
+ "simple_with_mutation": (simple_with_mutation, (x,)),
+ "simple_with_linear": (
+ simple_with_linear,
+ (torch.tensor(3), torch.randn(2, 2)),
+ ),
+ "nested_with_linear": (
+ nested_with_linear,
+ (torch.tensor(3), torch.randn(2, 2)),
+ ),
+ }
+
WHILE_LOOP_TESTS = _while_loop_tests()
-def collect_meta_for_filtered_nodes(gm: torch.fx.GraphModule, node_names, meta_field_name):
+
+def collect_meta_for_filtered_nodes(
+ gm: torch.fx.GraphModule, node_names, meta_field_name
+):
ret = []
for mod in gm.modules():
for node in mod.graph.nodes:
@@ -170,16 +205,19 @@ def collect_meta_for_filtered_nodes(gm: torch.fx.GraphModule, node_names, meta_f
ret.append(node.meta.get(field_name))
return ret
+
def reduce_func(*operands):
acc = 0
for operand in operands:
acc += operand
return acc
+
class ReduceObj:
def __call__(self, *operands):
return reduce_func(*operands)
+
class ReduceMod(torch.nn.Module):
def _reduce(self, *operands):
return reduce_func(*operands)
@@ -239,26 +277,35 @@ class TestControlFlow(TestCase):
return (x + 1,)
x = torch.zeros(1, device="cuda")
- res = while_loop(cond_fn, body_fn, (x, ))
- expected = _fake_while_loop(cond_fn, body_fn, (x, ))
+ res = while_loop(cond_fn, body_fn, (x,))
+ expected = _fake_while_loop(cond_fn, body_fn, (x,))
self.assertEqual(expected, res)
def test_map_illegal_inputs(self):
def f(x, y):
return x[0] + x[1] + y
- with self.assertRaisesRegex(RuntimeError,
- r"Mapped xs can only consist of tensors\. Got xs \[3, tensor\(\[1\., 1\.\]\)\]\."):
+ with self.assertRaisesRegex(
+ RuntimeError,
+ r"Mapped xs can only consist of tensors\. Got xs \[3, tensor\(\[1\., 1\.\]\)\]\.",
+ ):
_ = control_flow.map(f, (3, torch.ones(2)), torch.ones(2))
- with self.assertRaisesRegex(RuntimeError,
- r"Leading dimensions of mapped xs cannot be 0\."):
- _ = control_flow.map(f, (torch.ones(0, 1, 2), torch.ones(0, 1, 2)), torch.ones(2))
+ with self.assertRaisesRegex(
+ RuntimeError, r"Leading dimensions of mapped xs cannot be 0\."
+ ):
+ _ = control_flow.map(
+ f, (torch.ones(0, 1, 2), torch.ones(0, 1, 2)), torch.ones(2)
+ )
- with self.assertRaisesRegex(RuntimeError,
- r"Leading dimensions of mapped xs must be consistent\. "
- r"Got shapes \[torch\.Size\(\[3, 4, 5\]\), torch\.Size\(\[4, 4, 5\]\)\]\."):
- _ = control_flow.map(f, (torch.ones(3, 4, 5), torch.ones(4, 4, 5)), torch.ones(5))
+ with self.assertRaisesRegex(
+ RuntimeError,
+ r"Leading dimensions of mapped xs must be consistent\. "
+ r"Got shapes \[torch\.Size\(\[3, 4, 5\]\), torch\.Size\(\[4, 4, 5\]\)\]\.",
+ ):
+ _ = control_flow.map(
+ f, (torch.ones(3, 4, 5), torch.ones(4, 4, 5)), torch.ones(5)
+ )
def test_map_illegal_outputs(self):
def f(x, y):
@@ -272,16 +319,19 @@ class TestControlFlow(TestCase):
x = torch.ones([3])
y = torch.ones([1, 2, 3])
- with self.assertRaisesRegex(RuntimeError, r"Expect outputs of map only contains tensors or None\."):
+ with self.assertRaisesRegex(
+ RuntimeError, r"Expect outputs of map only contains tensors or None\."
+ ):
_ = control_flow.map(f, x, y)
- with self.assertRaisesRegex(RuntimeError, r"Expect outputs of map only contains tensors or None\."):
+ with self.assertRaisesRegex(
+ RuntimeError, r"Expect outputs of map only contains tensors or None\."
+ ):
out = control_flow.map(f1, x, y)
# return None is OK
_ = control_flow.map(f2, x, y)
-
def test_map_list_in_out(self):
def f(x, y):
return [[x[0][0] + y]]
@@ -350,7 +400,6 @@ class TestControlFlow(TestCase):
self.assertEqual(expected_res, res)
self.assertEqual(expected_grads, grads)
-
def test_map_autograd_nested_list(self):
import torch.utils._pytree as pytree
@@ -363,11 +412,18 @@ class TestControlFlow(TestCase):
z = map_op(f, x, y)
flat_x = pytree.tree_leaves(x)
flat_z = pytree.tree_leaves(z)
- grads = torch.autograd.grad(flat_z, flat_x, [torch.ones_like(z) for z in flat_z])
+ grads = torch.autograd.grad(
+ flat_z, flat_x, [torch.ones_like(z) for z in flat_z]
+ )
return z, grads
- x = [[torch.randn(3, 2, 2, requires_grad=True), torch.randn(3, 2, 1, requires_grad=True)],
- torch.ones(3, 1, 2, requires_grad=True)]
+ x = [
+ [
+ torch.randn(3, 2, 2, requires_grad=True),
+ torch.randn(3, 2, 1, requires_grad=True),
+ ],
+ torch.ones(3, 1, 2, requires_grad=True),
+ ]
y = torch.ones(1, requires_grad=True)
true_outs = fwbw(control_flow.map, f, x, y)
fake_outs = fwbw(_fake_map, f, x, y)
@@ -385,7 +441,11 @@ class TestControlFlowTraced(TestCase):
graphs = {}
eager_res = fn(*args)
for tracing_mode in ["symbolic", "real", "fake"]:
- graph = make_fx(fn, tracing_mode=tracing_mode, _allow_non_fake_inputs=allow_non_fake_inputs)(*args)
+ graph = make_fx(
+ fn,
+ tracing_mode=tracing_mode,
+ _allow_non_fake_inputs=allow_non_fake_inputs,
+ )(*args)
graphs[tracing_mode] = graph
self.assertEqual(graph(*args), eager_res)
return graphs
@@ -419,7 +479,9 @@ class TestControlFlowTraced(TestCase):
def test_while_loop_nested_traced(self):
fn, inp = WHILE_LOOP_TESTS["nested"]
graphs = self._check_tracing(fn, inp)
- self.assertExpectedInline(graphs["symbolic"].code.strip("\n"), """\
+ self.assertExpectedInline(
+ graphs["symbolic"].code.strip("\n"),
+ """\
def forward(self, out_iter_1, it_1, y_1):
while_loop_cond_graph_0 = self.while_loop_cond_graph_0
while_loop_body_graph_0 = self.while_loop_body_graph_0
@@ -428,14 +490,20 @@ def forward(self, out_iter_1, it_1, y_1):
getitem_1 = while_loop[1]
getitem_2 = while_loop[2]; while_loop = None
return (getitem, getitem_1, getitem_2)
- """) # noqa: B950
- self.assertExpectedInline(graphs["symbolic"].while_loop_cond_graph_0.code.strip("\n"), """\
+ """, # noqa: B950
+ )
+ self.assertExpectedInline(
+ graphs["symbolic"].while_loop_cond_graph_0.code.strip("\n"),
+ """\
def forward(self, arg0_1, arg1_1, arg2_1):
sum_1 = torch.ops.aten.sum.default(arg0_1); arg0_1 = None
lt = torch.ops.aten.lt.Scalar(sum_1, 2); sum_1 = None
return lt
- """)
- self.assertExpectedInline(graphs["symbolic"].while_loop_body_graph_0.code.strip("\n"), """\
+ """,
+ )
+ self.assertExpectedInline(
+ graphs["symbolic"].while_loop_body_graph_0.code.strip("\n"),
+ """\
def forward(self, arg0_1, arg1_1, arg2_1):
while_loop_cond_graph_0 = self.while_loop_cond_graph_0
while_loop_body_graph_0 = self.while_loop_body_graph_0
@@ -445,7 +513,8 @@ def forward(self, arg0_1, arg1_1, arg2_1):
getitem_2 = while_loop[2]; while_loop = None
add = torch.ops.aten.add.Tensor(getitem, 1); getitem = None
return (add, getitem_1, getitem_2)
- """) # noqa: B950
+ """, # noqa: B950
+ )
def _wrap_with_functionalize(self, fn, func_type):
mode = None
@@ -468,15 +537,20 @@ def forward(self, arg0_1, arg1_1, arg2_1):
with mode:
graphs = self._check_tracing(fn, inp)
if func_type == "no":
- self.assertExpectedInline(graphs["symbolic"].code.strip("\n"), """\
+ self.assertExpectedInline(
+ graphs["symbolic"].code.strip("\n"),
+ """\
def forward(self, x_1):
while_loop_cond_graph_0 = self.while_loop_cond_graph_0
while_loop_body_graph_0 = self.while_loop_body_graph_0
while_loop = torch.ops.higher_order.while_loop(while_loop_cond_graph_0, while_loop_body_graph_0, (x_1,), ()); while_loop_cond_graph_0 = while_loop_body_graph_0 = x_1 = None
getitem = while_loop[0]; while_loop = None
return (getitem,)
- """) # noqa: B950
- self.assertExpectedInline(graphs["symbolic"].while_loop_cond_graph_0.code.strip("\n"), """\
+ """, # noqa: B950
+ )
+ self.assertExpectedInline(
+ graphs["symbolic"].while_loop_cond_graph_0.code.strip("\n"),
+ """\
def forward(self, arg0_1):
clone = torch.ops.aten.clone.default(arg0_1); arg0_1 = None
add_ = torch.ops.aten.add_.Tensor(clone, 1); clone = None
@@ -484,25 +558,34 @@ def forward(self, arg0_1):
sum_1 = torch.ops.aten.sum.default(add__1); add__1 = None
lt = torch.ops.aten.lt.Scalar(sum_1, 10); sum_1 = None
return lt
- """)
- self.assertExpectedInline(graphs["symbolic"].while_loop_body_graph_0.code.strip("\n"), """\
+ """,
+ )
+ self.assertExpectedInline(
+ graphs["symbolic"].while_loop_body_graph_0.code.strip("\n"),
+ """\
def forward(self, arg0_1):
clone = torch.ops.aten.clone.default(arg0_1); arg0_1 = None
add_ = torch.ops.aten.add_.Tensor(clone, 1); clone = None
add__1 = torch.ops.aten.add_.Tensor(add_, -1); add_ = None
add = torch.ops.aten.add.Tensor(add__1, 1); add__1 = None
return (add,)
- """)
+ """,
+ )
elif func_type == "python":
- self.assertExpectedInline(graphs["symbolic"].code.strip("\n"), """\
+ self.assertExpectedInline(
+ graphs["symbolic"].code.strip("\n"),
+ """\
def forward(self, arg0_1):
while_loop_cond_graph_0 = self.while_loop_cond_graph_0
while_loop_body_graph_0 = self.while_loop_body_graph_0
while_loop = torch.ops.higher_order.while_loop(while_loop_cond_graph_0, while_loop_body_graph_0, (arg0_1,), ()); while_loop_cond_graph_0 = while_loop_body_graph_0 = arg0_1 = None
getitem = while_loop[0]; while_loop = None
return (getitem,)
- """) # noqa: B950
- self.assertExpectedInline(graphs["symbolic"].while_loop_cond_graph_0.code.strip("\n"), """\
+ """, # noqa: B950
+ )
+ self.assertExpectedInline(
+ graphs["symbolic"].while_loop_cond_graph_0.code.strip("\n"),
+ """\
def forward(self, arg0_1):
clone = torch.ops.aten.clone.default(arg0_1); arg0_1 = None
add = torch.ops.aten.add.Tensor(clone, 1); clone = None
@@ -510,25 +593,34 @@ def forward(self, arg0_1):
sum_1 = torch.ops.aten.sum.default(add_1); add_1 = None
lt = torch.ops.aten.lt.Scalar(sum_1, 10); sum_1 = None
return lt
- """)
- self.assertExpectedInline(graphs["symbolic"].while_loop_body_graph_0.code.strip("\n"), """\
+ """,
+ )
+ self.assertExpectedInline(
+ graphs["symbolic"].while_loop_body_graph_0.code.strip("\n"),
+ """\
def forward(self, arg0_1):
clone = torch.ops.aten.clone.default(arg0_1); arg0_1 = None
add = torch.ops.aten.add.Tensor(clone, 1); clone = None
add_1 = torch.ops.aten.add.Tensor(add, -1); add = None
add_2 = torch.ops.aten.add.Tensor(add_1, 1); add_1 = None
return (add_2,)
- """)
+ """,
+ )
else:
- self.assertExpectedInline(graphs["symbolic"].code.strip("\n"), """\
+ self.assertExpectedInline(
+ graphs["symbolic"].code.strip("\n"),
+ """\
def forward(self, x_1):
while_loop_cond_graph_0 = self.while_loop_cond_graph_0
while_loop_body_graph_0 = self.while_loop_body_graph_0
while_loop = torch.ops.higher_order.while_loop(while_loop_cond_graph_0, while_loop_body_graph_0, (x_1,), ()); while_loop_cond_graph_0 = while_loop_body_graph_0 = x_1 = None
getitem = while_loop[0]; while_loop = None
return (getitem,)
- """) # noqa: B950
- self.assertExpectedInline(graphs["symbolic"].while_loop_cond_graph_0.code.strip("\n"), """\
+ """, # noqa: B950
+ )
+ self.assertExpectedInline(
+ graphs["symbolic"].while_loop_cond_graph_0.code.strip("\n"),
+ """\
def forward(self, arg0_1):
clone = torch.ops.aten.clone.default(arg0_1); arg0_1 = None
add = torch.ops.aten.add.Tensor(clone, 1); clone = None
@@ -536,15 +628,19 @@ def forward(self, arg0_1):
sum_1 = torch.ops.aten.sum.default(add_1); add_1 = None
lt = torch.ops.aten.lt.Scalar(sum_1, 10); sum_1 = None
return lt
- """)
- self.assertExpectedInline(graphs["symbolic"].while_loop_body_graph_0.code.strip("\n"), """\
+ """,
+ )
+ self.assertExpectedInline(
+ graphs["symbolic"].while_loop_body_graph_0.code.strip("\n"),
+ """\
def forward(self, arg0_1):
clone = torch.ops.aten.clone.default(arg0_1); arg0_1 = None
add = torch.ops.aten.add.Tensor(clone, 1); clone = None
add_1 = torch.ops.aten.add.Tensor(add, -1); add = None
add_2 = torch.ops.aten.add.Tensor(add_1, 1); add_1 = None
return (add_2,)
- """)
+ """,
+ )
@parametrize("func_type", ["no", "cpp", "python", "functorch"])
@parametrize("while_loop_test", list(WHILE_LOOP_TESTS.keys()))
@@ -561,10 +657,13 @@ def forward(self, arg0_1):
@parametrize("while_loop_test", list(WHILE_LOOP_TESTS.keys()))
def test_while_loop_tracing(self, while_loop_test):
fn, inp = WHILE_LOOP_TESTS[while_loop_test]
- allow_non_fake_inputs = False if while_loop_test not in ("simple_with_linear", "nested_with_linear") else True
+ allow_non_fake_inputs = (
+ False
+ if while_loop_test not in ("simple_with_linear", "nested_with_linear")
+ else True
+ )
self._check_tracing(fn, inp, allow_non_fake_inputs)
-
@parametrize("backend", ["eager", "aot_eager"])
@parametrize("while_loop_test", list(WHILE_LOOP_TESTS.keys()))
def test_while_loop_compile(self, backend, while_loop_test):
@@ -574,14 +673,15 @@ def forward(self, arg0_1):
@skipIfTorchDynamo("Graph is not captured by backend if test with dynamo")
def test_while_loop_simple_with_linear_compile_check_graph(self):
fn, inp = WHILE_LOOP_TESTS["simple_with_linear"]
- from torch._dynamo.testing import (
- EagerAndRecordGraphs,
- )
+ from torch._dynamo.testing import EagerAndRecordGraphs
+
backend = EagerAndRecordGraphs()
torch.compile(fn, backend=backend)(*inp)
self.assertEqual(len(backend.graphs), 1)
gm = backend.graphs[0]
- self.assertExpectedInline(gm.code.strip(), """\
+ self.assertExpectedInline(
+ gm.code.strip(),
+ """\
def forward(self, L_iter_ : torch.Tensor, L_x_ : torch.Tensor):
l_iter_ = L_iter_
l_x_ = L_x_
@@ -593,17 +693,24 @@ def forward(self, L_iter_ : torch.Tensor, L_x_ : torch.Tensor):
while_loop = torch.ops.higher_order.while_loop(cond_fn_0, body_fn_0, (l_iter_, l_x_), (l__self___dec, l__self___linear_bias, l__self___linear_weight)); cond_fn_0 = body_fn_0 = l_iter_ = l_x_ = l__self___dec = l__self___linear_bias = l__self___linear_weight = None
getitem = while_loop[0]
getitem_1 = while_loop[1]; while_loop = None
- return (getitem, getitem_1)""") # noqa: B950
- self.assertExpectedInline(gm.cond_fn_0.code.strip(), """\
+ return (getitem, getitem_1)""", # noqa: B950
+ )
+ self.assertExpectedInline(
+ gm.cond_fn_0.code.strip(),
+ """\
def forward(self, l_iter_, l_x_, l__self___dec_cond_fn, l__self___linear_bias_body_fn, l__self___linear_weight_body_fn):
sub = l_iter_ - l__self___dec_cond_fn; l_iter_ = l__self___dec_cond_fn = None
gt = sub > 0; sub = None
- return gt""") # noqa: B950
- self.assertExpectedInline(gm.body_fn_0.code.strip(), """\
+ return gt""", # noqa: B950
+ )
+ self.assertExpectedInline(
+ gm.body_fn_0.code.strip(),
+ """\
def forward(self, l_iter_, l_x_, l__self___dec_cond_fn, l__self___linear_bias_body_fn, l__self___linear_weight_body_fn):
sub = l_iter_ - 1; l_iter_ = None
linear = torch._C._nn.linear(l_x_, l__self___linear_weight_body_fn, l__self___linear_bias_body_fn); l_x_ = l__self___linear_weight_body_fn = l__self___linear_bias_body_fn = None
- return (sub, linear)""") # noqa: B950
+ return (sub, linear)""", # noqa: B950
+ )
def test_while_loop_nested2_traced(self):
fn, inp = WHILE_LOOP_TESTS["nested2"]
@@ -613,7 +720,9 @@ def forward(self, l_iter_, l_x_, l__self___dec_cond_fn, l__self___linear_bias_bo
outer_cond = gm.while_loop_cond_graph_0
inner_body = outer_body.while_loop_body_graph_0
inner_cond = outer_body.while_loop_cond_graph_0
- self.assertExpectedInline(gm.code.strip("\n"), """\
+ self.assertExpectedInline(
+ gm.code.strip("\n"),
+ """\
def forward(self, arg0_1, arg1_1, arg2_1, arg3_1):
while_loop_cond_graph_0 = self.while_loop_cond_graph_0
while_loop_body_graph_0 = self.while_loop_body_graph_0
@@ -623,8 +732,11 @@ def forward(self, arg0_1, arg1_1, arg2_1, arg3_1):
getitem_2 = while_loop[2]
getitem_3 = while_loop[3]; while_loop = None
return (getitem, getitem_1, getitem_2, getitem_3)
- """) # noqa: B950
- self.assertExpectedInline(outer_body.code.strip("\n"), """\
+ """, # noqa: B950
+ )
+ self.assertExpectedInline(
+ outer_body.code.strip("\n"),
+ """\
def forward(self, arg0_1, arg1_1, arg2_1, arg3_1):
while_loop_cond_graph_0 = self.while_loop_cond_graph_0
while_loop_body_graph_0 = self.while_loop_body_graph_0
@@ -638,8 +750,11 @@ def forward(self, arg0_1, arg1_1, arg2_1, arg3_1):
mul = torch.ops.aten.mul.Tensor(getitem_2, 2); getitem_2 = None
div = torch.ops.aten.div.Tensor(getitem_3, 2); getitem_3 = None
return (sub, clone, mul, div)
- """) # noqa: B950
- self.assertExpectedInline(outer_body.code.strip("\n"), """\
+ """, # noqa: B950
+ )
+ self.assertExpectedInline(
+ outer_body.code.strip("\n"),
+ """\
def forward(self, arg0_1, arg1_1, arg2_1, arg3_1):
while_loop_cond_graph_0 = self.while_loop_cond_graph_0
while_loop_body_graph_0 = self.while_loop_body_graph_0
@@ -653,20 +768,27 @@ def forward(self, arg0_1, arg1_1, arg2_1, arg3_1):
mul = torch.ops.aten.mul.Tensor(getitem_2, 2); getitem_2 = None
div = torch.ops.aten.div.Tensor(getitem_3, 2); getitem_3 = None
return (sub, clone, mul, div)
- """) # noqa: B950
- self.assertExpectedInline(inner_body.code.strip("\n"), """\
+ """, # noqa: B950
+ )
+ self.assertExpectedInline(
+ inner_body.code.strip("\n"),
+ """\
def forward(self, arg0_1, arg1_1, arg2_1, arg3_1):
clone = torch.ops.aten.clone.default(arg0_1); arg0_1 = None
sub = torch.ops.aten.sub.Tensor(arg1_1, 1); arg1_1 = None
add = torch.ops.aten.add.Tensor(arg2_1, 3.14); arg2_1 = None
sub_1 = torch.ops.aten.sub.Tensor(arg3_1, 2.71); arg3_1 = None
return (clone, sub, add, sub_1)
- """)
- self.assertExpectedInline(inner_cond.code.strip("\n"), """\
+ """,
+ )
+ self.assertExpectedInline(
+ inner_cond.code.strip("\n"),
+ """\
def forward(self, arg0_1, arg1_1, arg2_1, arg3_1):
gt = torch.ops.aten.gt.Scalar(arg1_1, 0); arg1_1 = None
return gt
- """)
+ """,
+ )
def test_cond_nested_traced(self):
def true_nested(y):
@@ -688,10 +810,18 @@ def forward(self, arg0_1, arg1_1, arg2_1, arg3_1):
x = torch.randn(4)
graph = make_fx(f)(x, torch.tensor(False), torch.tensor(False))
- result_true_true = graph.forward(x, torch.tensor(True), torch.tensor(True)) # True + True -> x * x
- result_true_false = graph.forward(x, torch.tensor(True), torch.tensor(False)) # True + True -> x + x
- result_false_true = graph.forward(x, torch.tensor(False), torch.tensor(True)) # False + either -> cos
- result_false_false = graph.forward(x, torch.tensor(False), torch.tensor(False)) # False + either -> cos
+ result_true_true = graph.forward(
+ x, torch.tensor(True), torch.tensor(True)
+ ) # True + True -> x * x
+ result_true_false = graph.forward(
+ x, torch.tensor(True), torch.tensor(False)
+ ) # True + True -> x + x
+ result_false_true = graph.forward(
+ x, torch.tensor(False), torch.tensor(True)
+ ) # False + either -> cos
+ result_false_false = graph.forward(
+ x, torch.tensor(False), torch.tensor(False)
+ ) # False + either -> cos
self.assertNotEqual(result_true_true, result_true_false)
self.assertFalse(torch.allclose(result_false_true, result_true_true))
@@ -703,8 +833,13 @@ def forward(self, arg0_1, arg1_1, arg2_1, arg3_1):
self.assertEqual(result_false_true, torch.cos(x))
- graph = make_fx(f, tracing_mode="symbolic")(x, torch.tensor(False), torch.tensor(False))
- self.assertEqual(graph(x, torch.tensor(True), torch.tensor(True)), f(x, torch.tensor(True), torch.tensor(True)))
+ graph = make_fx(f, tracing_mode="symbolic")(
+ x, torch.tensor(False), torch.tensor(False)
+ )
+ self.assertEqual(
+ graph(x, torch.tensor(True), torch.tensor(True)),
+ f(x, torch.tensor(True), torch.tensor(True)),
+ )
def test_cond_functionalized_hah(self):
def true_fn(x):
@@ -733,7 +868,9 @@ def forward(self, arg0_1, arg1_1, arg2_1, arg3_1):
self.assertFalse(any(op._schema.is_mutable for op in all_ops_in_true_branch))
- graph_module = make_fx(torch.func.functionalize(f), tracing_mode="symbolic")(*example_inputs)
+ graph_module = make_fx(torch.func.functionalize(f), tracing_mode="symbolic")(
+ *example_inputs
+ )
self.assertEqual(graph_module(*example_inputs), f(*example_inputs))
def test_cond_retrace_functionalized(self):
@@ -748,7 +885,9 @@ def forward(self, arg0_1, arg1_1, arg2_1, arg3_1):
inp = torch.ones(1, 2)
gm_non_functional = make_fx(f, tracing_mode="real")(inp)
- gm_functional = make_fx(torch.func.functionalize(gm_non_functional), tracing_mode="real")(inp)
+ gm_functional = make_fx(
+ torch.func.functionalize(gm_non_functional), tracing_mode="real"
+ )(inp)
self.assertEqual(gm_functional(torch.zeros(1, 2)), f(torch.zeros(1, 2)))
def test_cond_functionalized_nested(self):
@@ -780,7 +919,9 @@ def forward(self, arg0_1, arg1_1, arg2_1, arg3_1):
gm_true_true_branch = graph_module.true_graph_0.true_graph_0
- graph_module1 = make_fx(torch.func.functionalize(f), tracing_mode="symbolic")(*example_inputs)
+ graph_module1 = make_fx(torch.func.functionalize(f), tracing_mode="symbolic")(
+ *example_inputs
+ )
self.assertEqual(graph_module1(*example_inputs), f(*example_inputs))
all_ops = []
@@ -823,10 +964,14 @@ def forward(self, arg0_1, arg1_1, arg2_1, arg3_1):
example_inputs = (torch.ones(4, 5),)
functional_f = torch.func.functionalize(f)
- with self.assertRaisesRegex(UnsupportedAliasMutationException, "One of torch.cond branch"):
+ with self.assertRaisesRegex(
+ UnsupportedAliasMutationException, "One of torch.cond branch"
+ ):
functional_f(*example_inputs)
- with self.assertRaisesRegex(UnsupportedAliasMutationException, "One of torch.cond branch"):
+ with self.assertRaisesRegex(
+ UnsupportedAliasMutationException, "One of torch.cond branch"
+ ):
make_fx(torch.func.functionalize(f))(*example_inputs)
def test_cond_functionalized_input_mutation_on_false_branch(self):
@@ -844,10 +989,14 @@ def forward(self, arg0_1, arg1_1, arg2_1, arg3_1):
example_inputs = (torch.ones(5, 5),)
functional_f = torch.func.functionalize(f)
- with self.assertRaisesRegex(UnsupportedAliasMutationException, "One of torch.cond branch"):
+ with self.assertRaisesRegex(
+ UnsupportedAliasMutationException, "One of torch.cond branch"
+ ):
functional_f(*example_inputs)
- with self.assertRaisesRegex(UnsupportedAliasMutationException, "One of torch.cond branch"):
+ with self.assertRaisesRegex(
+ UnsupportedAliasMutationException, "One of torch.cond branch"
+ ):
make_fx(torch.func.functionalize(f))(*example_inputs)
def test_cond_functionalized_output_alias_input(self):
@@ -865,10 +1014,16 @@ def forward(self, arg0_1, arg1_1, arg2_1, arg3_1):
example_inputs = (torch.ones(5, 5),)
functional_f = torch.func.functionalize(f)
- with self.assertRaisesRegex(UnsupportedAliasMutationException, "One of torch.cond branch might be aliasing"):
+ with self.assertRaisesRegex(
+ UnsupportedAliasMutationException,
+ "One of torch.cond branch might be aliasing",
+ ):
functional_f(*example_inputs)
- with self.assertRaisesRegex(UnsupportedAliasMutationException, "One of torch.cond branch might be aliasing"):
+ with self.assertRaisesRegex(
+ UnsupportedAliasMutationException,
+ "One of torch.cond branch might be aliasing",
+ ):
make_fx(torch.func.functionalize(f))(*example_inputs)
def test_cond_functionalized_nested_input_mutation(self):
@@ -892,10 +1047,14 @@ def forward(self, arg0_1, arg1_1, arg2_1, arg3_1):
example_inputs = (torch.ones(4, 5),)
functional_f = torch.func.functionalize(f)
- with self.assertRaisesRegex(UnsupportedAliasMutationException, "One of torch.cond branch"):
+ with self.assertRaisesRegex(
+ UnsupportedAliasMutationException, "One of torch.cond branch"
+ ):
functional_f(*example_inputs)
- with self.assertRaisesRegex(UnsupportedAliasMutationException, "One of torch.cond branch"):
+ with self.assertRaisesRegex(
+ UnsupportedAliasMutationException, "One of torch.cond branch"
+ ):
make_fx(torch.func.functionalize(f))(*example_inputs)
def test_cond_functionalized_nested_input_mutation_with_aot_func(self):
@@ -921,10 +1080,14 @@ def forward(self, arg0_1, arg1_1, arg2_1, arg3_1):
try:
example_input_func = to_fun_old(example_input)
torch._enable_functionalization(reapply_views=False)
- with self.assertRaisesRegex(UnsupportedAliasMutationException, "One of torch.cond branch"):
+ with self.assertRaisesRegex(
+ UnsupportedAliasMutationException, "One of torch.cond branch"
+ ):
f(example_input_func)
- with self.assertRaisesRegex(UnsupportedAliasMutationException, "One of torch.cond branch"):
+ with self.assertRaisesRegex(
+ UnsupportedAliasMutationException, "One of torch.cond branch"
+ ):
make_fx(f)(example_input_func)
finally:
torch._disable_functionalization()
@@ -937,12 +1100,14 @@ def forward(self, arg0_1, arg1_1, arg2_1, arg3_1):
return func(*args, **kwargs)
finally:
torch._disable_functionalization()
+
return wrapper
- with self.assertRaisesRegex(UnsupportedAliasMutationException, "One of torch.cond branch"):
+ with self.assertRaisesRegex(
+ UnsupportedAliasMutationException, "One of torch.cond branch"
+ ):
make_fx(f_wrapper(f))(example_input_func)
-
def test_cond_functionalized_input_aliasing_with_aot_func(self):
def true_fn(x):
return x
@@ -959,7 +1124,10 @@ def forward(self, arg0_1, arg1_1, arg2_1, arg3_1):
try:
example_input_func = to_fun_old(example_input)
torch._enable_functionalization(reapply_views=False)
- with self.assertRaisesRegex(UnsupportedAliasMutationException, "One of torch.cond branch might be aliasing"):
+ with self.assertRaisesRegex(
+ UnsupportedAliasMutationException,
+ "One of torch.cond branch might be aliasing",
+ ):
f(example_input_func)
finally:
torch._disable_functionalization()
@@ -970,15 +1138,27 @@ def forward(self, arg0_1, arg1_1, arg2_1, arg3_1):
torch._enable_functionalization(reapply_views=False)
try:
func_args = pytree.tree_map(
- lambda x: torch._to_functional_tensor(x) if isinstance(x, torch.Tensor) else x, args)
+ lambda x: torch._to_functional_tensor(x)
+ if isinstance(x, torch.Tensor)
+ else x,
+ args,
+ )
func_kwargs = pytree.tree_map(
- lambda x: torch._to_functional_tensor(x) if isinstance(x, torch.Tensor) else x, kwargs)
+ lambda x: torch._to_functional_tensor(x)
+ if isinstance(x, torch.Tensor)
+ else x,
+ kwargs,
+ )
return func(*func_args, **func_kwargs)
finally:
torch._disable_functionalization()
+
return wrapper
- with self.assertRaisesRegex(UnsupportedAliasMutationException, "One of torch.cond branch might be aliasing"):
+ with self.assertRaisesRegex(
+ UnsupportedAliasMutationException,
+ "One of torch.cond branch might be aliasing",
+ ):
make_fx(f_wrapper(f))(example_input)
def test_cond_functionalized_aot_func_check_functional(self):
@@ -1002,12 +1182,19 @@ def forward(self, arg0_1, arg1_1, arg2_1, arg3_1):
torch._enable_functionalization(reapply_views=False)
try:
func_args = pytree.tree_map(
- lambda x: to_fun_old(x) if isinstance(x, torch.Tensor) else x, args)
+ lambda x: to_fun_old(x) if isinstance(x, torch.Tensor) else x,
+ args,
+ )
func_kwargs = pytree.tree_map(
- lambda x: to_fun_old(x) if isinstance(x, torch.Tensor) else x, kwargs)
- return pytree.tree_map(from_fun_old, func(*func_args, **func_kwargs))
+ lambda x: to_fun_old(x) if isinstance(x, torch.Tensor) else x,
+ kwargs,
+ )
+ return pytree.tree_map(
+ from_fun_old, func(*func_args, **func_kwargs)
+ )
finally:
torch._disable_functionalization()
+
return wrapper
result_gm = make_fx(f_wrapper(f))(example_input)
@@ -1030,7 +1217,7 @@ def forward(self, arg0_1, arg1_1, arg2_1, arg3_1):
def true_fn(k, pred2):
z = cond(pred2, true_nested, false_nested, [k])
- return torch.add(torch.tensor([.25, .25]), z)
+ return torch.add(torch.tensor([0.25, 0.25]), z)
def false_fn(k, _):
return k.cos()
@@ -1070,7 +1257,9 @@ def forward(self, arg0_1, arg1_1, arg2_1, arg3_1):
x = torch.randn(4)
graph = make_fx(f)(x, torch.tensor(False), torch.tensor(False))
- self.assertExpectedInline(graph.code.strip(), """\
+ self.assertExpectedInline(
+ graph.code.strip(),
+ """\
def forward(self, x_1, pred_1, pred2_1):
true_graph_0 = self.true_graph_0
false_graph_0 = self.false_graph_0
@@ -1081,11 +1270,15 @@ def forward(self, x_1, pred_1, pred2_1):
conditional_1 = torch.ops.higher_order.cond(pred2_1, true_graph_1, false_graph_1, [x_1]); pred2_1 = true_graph_1 = false_graph_1 = x_1 = None
getitem_1 = conditional_1[0]; conditional_1 = None
add = torch.ops.aten.add.Tensor(getitem, getitem_1); getitem = getitem_1 = None
- return add""") # noqa: B950
- self.assertExpectedInline(graph.true_graph_0.code.strip(), """\
+ return add""", # noqa: B950
+ )
+ self.assertExpectedInline(
+ graph.true_graph_0.code.strip(),
+ """\
def forward(self, arg0_1):
mul = torch.ops.aten.mul.Tensor(arg0_1, arg0_1); arg0_1 = None
- return (mul,)""")
+ return (mul,)""",
+ )
def test_raise_error_on_mismatch_type_size(self):
def true_fn(x):
@@ -1100,7 +1293,7 @@ def forward(self, arg0_1):
x = torch.randn(4)
with self.assertRaisesRegex(
torch._dynamo.exc.UncapturedHigherOrderOpError,
- "Cond doesn't work unless it is captured completely with torch.compile"
+ "Cond doesn't work unless it is captured completely with torch.compile",
):
make_fx(f)(x, torch.tensor(False))
@@ -1117,7 +1310,7 @@ def forward(self, arg0_1):
x = torch.randn(4)
with self.assertRaisesRegex(
torch._dynamo.exc.UncapturedHigherOrderOpError,
- "Cond doesn't work unless it is captured completely with torch.compile"
+ "Cond doesn't work unless it is captured completely with torch.compile",
):
make_fx(f)(x, torch.tensor(False))
@@ -1157,12 +1350,22 @@ def forward(self, arg0_1):
return cond(pred, true_fn, false_fn, [x, pred2])
x = torch.randn(4)
- graph = make_fx(f, tracing_mode="fake")(x, torch.tensor(False), torch.tensor(False))
+ graph = make_fx(f, tracing_mode="fake")(
+ x, torch.tensor(False), torch.tensor(False)
+ )
- result_true_true = graph.forward(x, torch.tensor(True), torch.tensor(True)) # True + True -> x * x
- result_true_false = graph.forward(x, torch.tensor(True), torch.tensor(False)) # True + True -> x + x
- result_false_true = graph.forward(x, torch.tensor(False), torch.tensor(True)) # False + either -> cos
- result_false_false = graph.forward(x, torch.tensor(False), torch.tensor(False)) # False + either -> cos
+ result_true_true = graph.forward(
+ x, torch.tensor(True), torch.tensor(True)
+ ) # True + True -> x * x
+ result_true_false = graph.forward(
+ x, torch.tensor(True), torch.tensor(False)
+ ) # True + True -> x + x
+ result_false_true = graph.forward(
+ x, torch.tensor(False), torch.tensor(True)
+ ) # False + either -> cos
+ result_false_false = graph.forward(
+ x, torch.tensor(False), torch.tensor(False)
+ ) # False + either -> cos
self.assertNotEqual(result_true_true, result_true_false)
self.assertFalse(torch.allclose(result_false_true, result_true_true))
@@ -1183,7 +1386,7 @@ def forward(self, arg0_1):
def true_fn(k, pred2):
z = cond(pred2, true_nested, false_nested, [k])
- return torch.add(torch.tensor([.25, .25]), z)
+ return torch.add(torch.tensor([0.25, 0.25]), z)
def false_fn(k, _):
return k.cos()
@@ -1192,7 +1395,9 @@ def forward(self, arg0_1):
return cond(pred, true_fn, false_fn, [k, pred2])
x = torch.tensor([0.5, 0.5])
- graph = make_fx(f, tracing_mode="fake")(x, torch.tensor(False), torch.tensor(False))
+ graph = make_fx(f, tracing_mode="fake")(
+ x, torch.tensor(False), torch.tensor(False)
+ )
a = torch.tensor([1.0, 1.0])
result_true_true = graph.forward(a, torch.tensor(True), torch.tensor(True))
@@ -1221,9 +1426,13 @@ def forward(self, arg0_1):
return a_out + b_out
x = torch.randn(4)
- graph = make_fx(f, tracing_mode="fake")(x, torch.tensor(False), torch.tensor(False))
+ graph = make_fx(f, tracing_mode="fake")(
+ x, torch.tensor(False), torch.tensor(False)
+ )
- self.assertExpectedInline(graph.code.strip(), """\
+ self.assertExpectedInline(
+ graph.code.strip(),
+ """\
def forward(self, x_1, pred_1, pred2_1):
true_graph_0 = self.true_graph_0
false_graph_0 = self.false_graph_0
@@ -1234,11 +1443,15 @@ def forward(self, x_1, pred_1, pred2_1):
conditional_1 = torch.ops.higher_order.cond(pred2_1, true_graph_1, false_graph_1, [x_1]); pred2_1 = true_graph_1 = false_graph_1 = x_1 = None
getitem_1 = conditional_1[0]; conditional_1 = None
add = torch.ops.aten.add.Tensor(getitem, getitem_1); getitem = getitem_1 = None
- return add""") # noqa: B950
- self.assertExpectedInline(graph.true_graph_0.code.strip(), """\
+ return add""", # noqa: B950
+ )
+ self.assertExpectedInline(
+ graph.true_graph_0.code.strip(),
+ """\
def forward(self, arg0_1):
mul = torch.ops.aten.mul.Tensor(arg0_1, arg0_1); arg0_1 = None
- return (mul,)""")
+ return (mul,)""",
+ )
def test_raise_error_on_mismatch_type_size_fake_tensor(self):
def true_fn(x):
@@ -1253,11 +1466,10 @@ def forward(self, arg0_1):
x = torch.randn(4)
with self.assertRaisesRegex(
torch._dynamo.exc.UncapturedHigherOrderOpError,
- "Cond doesn't work unless it is captured completely with torch.compile"
+ "Cond doesn't work unless it is captured completely with torch.compile",
):
make_fx(f, tracing_mode="fake")(x, torch.tensor(False))
-
def test_raise_error_on_mismatch_tensor_size_fake_tensor(self):
def true_fn(x):
return x.sin()
@@ -1271,7 +1483,7 @@ def forward(self, arg0_1):
x = torch.randn(4)
with self.assertRaisesRegex(
torch._dynamo.exc.UncapturedHigherOrderOpError,
- "Cond doesn't work unless it is captured completely with torch.compile"
+ "Cond doesn't work unless it is captured completely with torch.compile",
):
make_fx(f, tracing_mode="fake")(x, torch.tensor(False))
@@ -1279,7 +1491,10 @@ def forward(self, arg0_1):
i = 0
for m in gm.modules():
for node in m.graph.nodes:
- if node.op == "call_function" and node.target == torch.ops.higher_order.map_impl:
+ if (
+ node.op == "call_function"
+ and node.target == torch.ops.higher_order.map_impl
+ ):
i += 1
self.assertEqual(i, op_count)
@@ -1320,7 +1535,9 @@ def forward(self, arg0_1):
return out[0] + z, out[1] * z
example_x = [[torch.ones(3, 4, 5)], torch.ones(3, 4, 5)]
- gm = make_fx(g, tracing_mode="symbolic")(example_x, torch.ones(5), torch.ones(5))
+ gm = make_fx(g, tracing_mode="symbolic")(
+ example_x, torch.ones(5), torch.ones(5)
+ )
x = [[torch.randn(4, 5, 6)], torch.ones(4, 5, 6)]
y = torch.randn(6)
z = torch.ones(6)
@@ -1337,7 +1554,9 @@ def forward(self, arg0_1):
return {"f": out["d"] + z, "g": out["e"] * z}
example_x = {"b": {"a": torch.ones(3, 4, 5)}, "c": torch.ones(3, 4, 5)}
- gm = make_fx(g, tracing_mode="symbolic")(example_x, torch.ones(5), torch.ones(5))
+ gm = make_fx(g, tracing_mode="symbolic")(
+ example_x, torch.ones(5), torch.ones(5)
+ )
x = {"b": {"a": torch.randn(4, 5, 6)}, "c": torch.ones(4, 5, 6)}
y = torch.randn(6)
z = torch.ones(6)
@@ -1353,14 +1572,15 @@ def forward(self, arg0_1):
out = control_flow.map(f, xs, y)
return torch.autograd.grad(out, (xs, y), torch.ones_like(out))
- gm = make_fx(g, tracing_mode="symbolic")(torch.ones(3, 4, 5, requires_grad=True), torch.ones(5, requires_grad=True))
+ gm = make_fx(g, tracing_mode="symbolic")(
+ torch.ones(3, 4, 5, requires_grad=True), torch.ones(5, requires_grad=True)
+ )
x = torch.randn(4, 5, 6, requires_grad=True)
y = torch.randn(6, requires_grad=True)
res = gm(x, y)
self.assertEqual(res, g(x, y))
self.check_map_count(gm, 2)
-
def test_tracing_map_autograd_symbolic_list(self):
import torch.utils._pytree as pytree
@@ -1372,11 +1592,14 @@ def forward(self, arg0_1):
flat_out = pytree.tree_leaves(out)
flat_inp = pytree.tree_leaves((xs, y))
requires_grad_inp = [inp for inp in flat_inp if inp.requires_grad]
- return torch.autograd.grad(flat_out, requires_grad_inp, [torch.ones_like(out) for out in flat_out])
+ return torch.autograd.grad(
+ flat_out, requires_grad_inp, [torch.ones_like(out) for out in flat_out]
+ )
gm = make_fx(g, tracing_mode="symbolic")(
[torch.ones(3, 4, 5), torch.ones(3, 4, 5, requires_grad=True)],
- torch.ones(5, requires_grad=True))
+ torch.ones(5, requires_grad=True),
+ )
x = [torch.randn(4, 5, 6), torch.ones(4, 5, 6, requires_grad=True)]
y = torch.randn(6, requires_grad=True)
res = gm(x, y)
@@ -1392,11 +1615,21 @@ def forward(self, arg0_1):
flat_out = pytree.tree_leaves(out)
flat_inp = pytree.tree_leaves((xs, y))
requires_grad_inp = [inp for inp in flat_inp if inp.requires_grad]
- return torch.autograd.grad(flat_out, requires_grad_inp, [torch.ones_like(out) for out in flat_out])
+ return torch.autograd.grad(
+ flat_out, requires_grad_inp, [torch.ones_like(out) for out in flat_out]
+ )
- traced_x = {"a": torch.ones(3, 4, 5, requires_grad=True), "b": torch.ones(3, 4, 5, requires_grad=True)}
- gm = make_fx(g, tracing_mode="symbolic")(traced_x, torch.ones(5, requires_grad=True))
- x = {"a": torch.randn(4, 5, 6, requires_grad=True), "b": torch.ones(4, 5, 6, requires_grad=True)}
+ traced_x = {
+ "a": torch.ones(3, 4, 5, requires_grad=True),
+ "b": torch.ones(3, 4, 5, requires_grad=True),
+ }
+ gm = make_fx(g, tracing_mode="symbolic")(
+ traced_x, torch.ones(5, requires_grad=True)
+ )
+ x = {
+ "a": torch.randn(4, 5, 6, requires_grad=True),
+ "b": torch.ones(4, 5, 6, requires_grad=True),
+ }
y = torch.randn(6, requires_grad=True)
res = gm(x, y)
self.assertEqual(res, g(x, y))
@@ -1421,9 +1654,13 @@ def forward(self, arg0_1):
return pytree.tree_map(from_fun_old, func(*args, **kwargs))
finally:
torch._disable_functionalization()
+
return wrapper
- example_inputs = (torch.ones(3, 2, 4, requires_grad=True), torch.ones(2, 4, requires_grad=True))
+ example_inputs = (
+ torch.ones(3, 2, 4, requires_grad=True),
+ torch.ones(2, 4, requires_grad=True),
+ )
gm = make_fx(f, tracing_mode="symbolic")(*example_inputs)
fgm = make_fx(f_wrapper(f), tracing_mode="symbolic")(*example_inputs)
xs = torch.ones(3, 4, 5, requires_grad=True)
@@ -1440,6 +1677,7 @@ def forward(self, arg0_1):
elif schema := getattr(node.target, "_schema", None):
c += int(schema.is_mutable)
return c
+
self.assertEqual(count_mutable(fgm), 0)
# One for forward, one for recomputation logic in backward
self.assertEqual(count_mutable(gm), 2)
@@ -1460,7 +1698,9 @@ def forward(self, arg0_1):
gm = make_fx(torch.func.functionalize(f))(*example_inputs)
self.assertEqual(gm(*example_inputs), f(*example_inputs))
- gm = make_fx(torch.func.functionalize(f), tracing_mode="symbolic")(*example_inputs)
+ gm = make_fx(torch.func.functionalize(f), tracing_mode="symbolic")(
+ *example_inputs
+ )
self.assertEqual(gm(*example_inputs), f(*example_inputs))
for node in gm.body_graph_0.graph.nodes:
@@ -1485,6 +1725,7 @@ def forward(self, arg0_1):
return pytree.tree_map(from_fun_old, func(*args, **kwargs))
finally:
torch._disable_functionalization()
+
return wrapper
example_inputs = (torch.ones(3, 2, 4), torch.ones(4))
@@ -1507,7 +1748,9 @@ def forward(self, arg0_1):
example_inputs = (torch.ones(3, 2, 4), torch.ones(4))
functional_f = torch.func.functionalize(f)
- with self.assertRaisesRegex(UnsupportedAliasMutationException, "torch.map is mutating the input!"):
+ with self.assertRaisesRegex(
+ UnsupportedAliasMutationException, "torch.map is mutating the input!"
+ ):
functional_f(*example_inputs)
def test_map_functionalized_elem_mutation(self):
@@ -1520,7 +1763,9 @@ def forward(self, arg0_1):
example_inputs = (torch.ones(3, 2, 4), torch.ones(4))
functional_f = torch.func.functionalize(f)
- with self.assertRaisesRegex(UnsupportedAliasMutationException, "torch.map is mutating the input!"):
+ with self.assertRaisesRegex(
+ UnsupportedAliasMutationException, "torch.map is mutating the input!"
+ ):
functional_f(*example_inputs)
def test_cond_autograd_fail(self):
@@ -1533,7 +1778,10 @@ def forward(self, arg0_1):
def f(x, y):
return control_flow.cond(x.shape[0] > 4, true_fn, false_fn, [y])
- example_inputs = (torch.ones(3, 2, 4, requires_grad=True), torch.ones(4, requires_grad=True))
+ example_inputs = (
+ torch.ones(3, 2, 4, requires_grad=True),
+ torch.ones(4, requires_grad=True),
+ )
with self.assertRaisesRegex(RuntimeError, "Autograd not implemented for cond"):
f(*example_inputs).sum().backward()
@@ -1550,7 +1798,9 @@ def forward(self, arg0_1):
example_inputs = (torch.ones(3, 2, 4),)
functional_f = torch.func.functionalize(f)
- with self.assertRaisesRegex(UnsupportedAliasMutationException, "torch.map is aliasing the input!"):
+ with self.assertRaisesRegex(
+ UnsupportedAliasMutationException, "torch.map is aliasing the input!"
+ ):
functional_f(*example_inputs)
def test_nested_map_cond_real(self):
@@ -1600,7 +1850,6 @@ def forward(self, arg0_1):
self.check_map_count(gm, 1)
def test_nested_cond_map_cond_symbolic(self):
-
def true_fn(x, y):
return x * y
@@ -1647,7 +1896,9 @@ def forward(self, arg0_1):
# The symbols in make_fx's shape_env should not be specialized.
self.assertEqual(len(gm.shape_env.guards), 0)
- self.assertExpectedInline(gm.code.strip(), """\
+ self.assertExpectedInline(
+ gm.code.strip(),
+ """\
def forward(self, x_1):
sym_size_int = torch.ops.aten.sym_size.int(x_1, 0)
eq = sym_size_int == 4; sym_size_int = None
@@ -1655,14 +1906,14 @@ def forward(self, x_1):
false_graph_0 = self.false_graph_0
conditional = torch.ops.higher_order.cond(eq, true_graph_0, false_graph_0, [x_1]); eq = true_graph_0 = false_graph_0 = x_1 = None
getitem = conditional[0]; conditional = None
- return getitem""") # noqa: B950
+ return getitem""", # noqa: B950
+ )
# We expect the traced graph module to work even if input size changes.
x = torch.ones(4, 3, 2)
self.assertEqual(gm(x), true_fn(x))
self.assertEqual(foo(x), true_fn(x))
-
def _check_closure_correctly_lifted(self, f, *, args, exp_res, exp_arg_num):
assert isinstance(args, (tuple, list))
self.assertEqual(f(*args), exp_res)
@@ -1671,18 +1922,25 @@ def forward(self, x_1):
def cnt_placeholder(gm):
return len([node for node in gm.graph.nodes if node.op == "placeholder"])
+
placeholder_cnts = [cnt_placeholder(mod) for mod in gm.children()]
self.assertTrue(all(cnt == exp_arg_num for cnt in placeholder_cnts))
- def _check_closure_correctly_lifted_with_mutation(self, f, closures_to_be_mutated, *, args, exp_arg_num):
+ def _check_closure_correctly_lifted_with_mutation(
+ self, f, closures_to_be_mutated, *, args, exp_arg_num
+ ):
exp_res = f(*args)
- self._check_closure_correctly_lifted(f, args=args, exp_res=exp_res, exp_arg_num=exp_arg_num)
+ self._check_closure_correctly_lifted(
+ f, args=args, exp_res=exp_res, exp_arg_num=exp_arg_num
+ )
for closure in closures_to_be_mutated:
closure.add(-1)
new_exp_res = f(*args)
- self._check_closure_correctly_lifted(f, args=args, exp_res=new_exp_res, exp_arg_num=exp_arg_num)
+ self._check_closure_correctly_lifted(
+ f, args=args, exp_res=new_exp_res, exp_arg_num=exp_arg_num
+ )
def test_cond_with_tensor_closure(self):
a = torch.ones(2, 3)
@@ -1697,10 +1955,11 @@ def forward(self, x_1):
def foo(x):
return cond(x.shape[0] == 4, true_fn, false_fn, [x])
-
# expected branches takes [x, a, b] as input
inp = torch.randn(2, 3)
- self._check_closure_correctly_lifted_with_mutation(foo, (a, b), args=(inp, ), exp_arg_num=3)
+ self._check_closure_correctly_lifted_with_mutation(
+ foo, (a, b), args=(inp,), exp_arg_num=3
+ )
def test_cond_with_tensor_closure_graph_module(self):
a = torch.ones(2, 3)
@@ -1715,13 +1974,14 @@ def forward(self, x_1):
def foo(x):
return cond(x.shape[0] == 4, true_fn, false_fn, [x])
-
# expected branches takes [x, a, b] as input
inp = torch.randn(2, 3)
gm = make_fx(foo)(inp)
- self.assertExpectedInline(gm.code.strip(), """\
+ self.assertExpectedInline(
+ gm.code.strip(),
+ """\
def forward(self, x_1):
true_graph_0 = self.true_graph_0
false_graph_0 = self.false_graph_0
@@ -1729,17 +1989,23 @@ def forward(self, x_1):
_tensor_constant1 = self._tensor_constant1
conditional = torch.ops.higher_order.cond(False, true_graph_0, false_graph_0, [x_1, _tensor_constant0, _tensor_constant1]); true_graph_0 = false_graph_0 = x_1 = _tensor_constant0 = _tensor_constant1 = None
getitem = conditional[0]; conditional = None
- return getitem""") # noqa: B950
- self.assertExpectedInline(gm.true_graph_0.code.strip(), """\
+ return getitem""", # noqa: B950
+ )
+ self.assertExpectedInline(
+ gm.true_graph_0.code.strip(),
+ """\
def forward(self, arg0_1, arg1_1, arg2_1):
add = torch.ops.aten.add.Tensor(arg0_1, arg1_1); arg0_1 = arg1_1 = None
- return (add,)""")
+ return (add,)""",
+ )
def test_cond_with_module_param_closure(self):
class Mod(torch.nn.Module):
def __init__(self):
super().__init__()
- self.register_parameter("param", torch.nn.Parameter(torch.ones(2, 3), requires_grad=False))
+ self.register_parameter(
+ "param", torch.nn.Parameter(torch.ones(2, 3), requires_grad=False)
+ )
self.register_buffer("buffer", torch.ones(2, 3) + 1)
my_mode = Mod()
@@ -1755,11 +2021,11 @@ def forward(self, arg0_1, arg1_1, arg2_1):
inp = torch.ones(2, 3)
# expected both branches takes (x, param, buffer)
- self._check_closure_correctly_lifted_with_mutation(foo, (my_mode.param, my_mode.buffer), args=(inp,), exp_arg_num=3)
-
+ self._check_closure_correctly_lifted_with_mutation(
+ foo, (my_mode.param, my_mode.buffer), args=(inp,), exp_arg_num=3
+ )
def test_cond_with_module_python_scalar_closure(self):
-
def foo(x):
a = torch.ones(1, 1)
b = 1
@@ -1769,12 +2035,15 @@ def forward(self, arg0_1, arg1_1, arg2_1):
def false_fn(x):
return x + b
+
return cond(x.shape[0] == 4, true_fn, false_fn, [x])
inp = torch.ones(2, 3)
res = inp + 1
# python scalar b is not lifted as input, so both branches take (x, a)
- self._check_closure_correctly_lifted(foo, args=(inp,), exp_res=res, exp_arg_num=2)
+ self._check_closure_correctly_lifted(
+ foo, args=(inp,), exp_res=res, exp_arg_num=2
+ )
def test_cond_nested_with_closure(self):
a = torch.ones(1, 1)
@@ -1792,6 +2061,7 @@ def forward(self, arg0_1, arg1_1, arg2_1):
def false_fn(x):
return cond(x.shape[0] > 4, inner_true_fn, inner_false_fn, [x])
+
return cond(x.shape[0] == 4, true_fn, false_fn, [x])
inp = torch.ones(2, 3)
@@ -1799,7 +2069,9 @@ def forward(self, arg0_1, arg1_1, arg2_1):
# realize that the nonlocal variables are same for the true and false
# branches, so it should de-dupe them.
# For second-level conds, it takes (x, a, b)
- self._check_closure_correctly_lifted_with_mutation(foo, (a, b), args=(inp,), exp_arg_num=3)
+ self._check_closure_correctly_lifted_with_mutation(
+ foo, (a, b), args=(inp,), exp_arg_num=3
+ )
def test_cond_nested_with_closure_graph_module(self):
a = torch.ones(1, 1)
@@ -1817,12 +2089,14 @@ def forward(self, arg0_1, arg1_1, arg2_1):
def false_fn(x):
return cond(x.shape[0] > 4, inner_true_fn, inner_false_fn, [x])
+
return cond(x.shape[0] == 4, true_fn, false_fn, [x])
def test_map_unfunc_boolean_tensor_for_nested_map_cond(self):
def map_fn(pred, x):
def fn(x, pred):
- return control_flow.cond(pred, lambda x: x * 2, lambda x: x / 2 , (x,))
+ return control_flow.cond(pred, lambda x: x * 2, lambda x: x / 2, (x,))
+
return control_flow.map(fn, x, pred)
def f_wrapper(func):
@@ -1831,31 +2105,45 @@ def forward(self, arg0_1, arg1_1, arg2_1):
torch._enable_functionalization(reapply_views=False)
try:
func_args = pytree.tree_map(
- lambda x: to_fun_old(x) if isinstance(x, torch.Tensor) else x, args)
+ lambda x: to_fun_old(x) if isinstance(x, torch.Tensor) else x,
+ args,
+ )
func_kwargs = pytree.tree_map(
- lambda x: to_fun_old(x) if isinstance(x, torch.Tensor) else x, kwargs)
- return pytree.tree_map(from_fun_old, func(*func_args, **func_kwargs))
+ lambda x: to_fun_old(x) if isinstance(x, torch.Tensor) else x,
+ kwargs,
+ )
+ return pytree.tree_map(
+ from_fun_old, func(*func_args, **func_kwargs)
+ )
finally:
torch._disable_functionalization()
+
return wrapper
- gm = make_fx(f_wrapper(map_fn))(torch.tensor(True), torch.ones([2, 3], requires_grad=False))
- self.assertExpectedInline(gm.code.strip(), """\
+ gm = make_fx(f_wrapper(map_fn))(
+ torch.tensor(True), torch.ones([2, 3], requires_grad=False)
+ )
+ self.assertExpectedInline(
+ gm.code.strip(),
+ """\
def forward(self, pred_1, x_1):
body_graph_0 = self.body_graph_0
map_impl = torch.ops.higher_order.map_impl(body_graph_0, [x_1], [pred_1]); body_graph_0 = x_1 = pred_1 = None
getitem = map_impl[0]; map_impl = None
- return getitem""")
- self.assertExpectedInline(gm.body_graph_0.code.strip(), """\
+ return getitem""",
+ )
+ self.assertExpectedInline(
+ gm.body_graph_0.code.strip(),
+ """\
def forward(self, arg0_1, arg1_1):
true_graph_0 = self.true_graph_0
false_graph_0 = self.false_graph_0
conditional = torch.ops.higher_order.cond(arg1_1, true_graph_0, false_graph_0, [arg0_1]); arg1_1 = true_graph_0 = false_graph_0 = arg0_1 = None
getitem = conditional[0]; conditional = None
- return [getitem]""") # noqa: B950
+ return [getitem]""", # noqa: B950
+ )
def test_cond_make_fx_preserve_stack_trace_for_nodes_in_subgraph(self):
-
def true_fn(x):
return x + x.cos()
@@ -1864,22 +2152,28 @@ def forward(self, arg0_1, arg1_1):
def foo(x):
return cond(x.shape[0] == 4, true_fn, false_fn, (x,))
+
inp = torch.randn([4, 3])
gm, _ = torch._dynamo.export(foo)(inp)
def run_with_interpreter(*args):
with torch.fx.traceback.preserve_node_meta():
return torch.fx.Interpreter(gm).run(*args)
- new_gm = make_fx(run_with_interpreter)(inp)
+ new_gm = make_fx(run_with_interpreter)(inp)
checked_ops = {"add", "mul", "sin", "cos"}
checked_meta = ["source_fn_stack", "stack_trace"]
all_source_fns = collect_meta_for_filtered_nodes(gm, checked_ops, checked_meta)
- new_source_fns = collect_meta_for_filtered_nodes(new_gm, checked_ops, checked_meta)
+ new_source_fns = collect_meta_for_filtered_nodes(
+ new_gm, checked_ops, checked_meta
+ )
self.assertEqual(all_source_fns, new_source_fns)
- @unittest.skipIf(TEST_WITH_TORCHDYNAMO, "triggers cache limit for foo and changes unique_graphs count.")
+ @unittest.skipIf(
+ TEST_WITH_TORCHDYNAMO,
+ "triggers cache limit for foo and changes unique_graphs count.",
+ )
def test_cond_no_dynamo_cache_limit(self):
torch._dynamo.reset()
counters = torch._dynamo.utils.counters
@@ -1899,7 +2193,9 @@ def forward(self, arg0_1, arg1_1):
for _ in range(iter_n):
# each lambda has a different object id thus fails the guard
- self.assertEqual(foo(inp, make_dummy_fn("cos"), make_dummy_fn("sin")), exp_out)
+ self.assertEqual(
+ foo(inp, make_dummy_fn("cos"), make_dummy_fn("sin")), exp_out
+ )
# each iteration captures a cond and a getitem from the tuple output
self.assertEqual(counters["stats"]["calls_captured"], iter_n * 2)
@@ -1917,8 +2213,10 @@ def forward(self, arg0_1, arg1_1):
inps = (torch.ones(3, 4), torch.ones(3, 5), torch.ones(5, 4), torch.ones(5, 3))
for inp in inps:
- gm = make_fx(foo, tracing_mode='symbolic')(torch.ones(3, 4))
- self.assertExpectedInline(gm.code.strip(), """\
+ gm = make_fx(foo, tracing_mode="symbolic")(torch.ones(3, 4))
+ self.assertExpectedInline(
+ gm.code.strip(),
+ """\
def forward(self, x_1):
sym_size_int = torch.ops.aten.sym_size.int(x_1, 0)
eq = sym_size_int == 4; sym_size_int = None
@@ -1926,29 +2224,41 @@ def forward(self, x_1):
false_graph_0 = self.false_graph_0
conditional = torch.ops.higher_order.cond(eq, true_graph_0, false_graph_0, [x_1]); eq = true_graph_0 = false_graph_0 = x_1 = None
getitem = conditional[0]; conditional = None
- return getitem""") # noqa: B950
+ return getitem""", # noqa: B950
+ )
- self.assertExpectedInline(gm.true_graph_0.code.strip(), """\
+ self.assertExpectedInline(
+ gm.true_graph_0.code.strip(),
+ """\
def forward(self, arg0_1):
cos = torch.ops.aten.cos.default(arg0_1)
sub = torch.ops.aten.sub.Tensor(arg0_1, cos); arg0_1 = cos = None
- return (sub,)""")
+ return (sub,)""",
+ )
- self.assertExpectedInline(gm.false_graph_0.code.strip(), """\
+ self.assertExpectedInline(
+ gm.false_graph_0.code.strip(),
+ """\
def forward(self, arg0_1):
sin = torch.ops.aten.sin.default(arg0_1)
add = torch.ops.aten.add.Tensor(arg0_1, sin); arg0_1 = sin = None
- return (add,)""")
+ return (add,)""",
+ )
- def _create_test_fns_for_cond(self, pred, inner_most_fn, operands, closure_list, nested_level):
+ def _create_test_fns_for_cond(
+ self, pred, inner_most_fn, operands, closure_list, nested_level
+ ):
if nested_level == 0:
if len(closure_list) > 0:
+
def true_fn(*operands):
return inner_most_fn(*operands) + inner_most_fn(*closure_list)
def false_fn(*operands):
return inner_most_fn(*operands) - inner_most_fn(*closure_list)
+
else:
+
def true_fn(*operands):
return inner_most_fn(*operands)
@@ -1959,9 +2269,12 @@ def forward(self, arg0_1):
if len(operands) == 0 and len(closure_list) == 0:
return torch.zeros(1)
return cond(pred, true_fn, false_fn, operands)
+
return operands, fn
else:
- args, inner_fn = self._create_test_fns_for_cond(pred <= 0, inner_most_fn, operands, closure_list, nested_level - 1)
+ args, inner_fn = self._create_test_fns_for_cond(
+ pred <= 0, inner_most_fn, operands, closure_list, nested_level - 1
+ )
def true_fn(*operands):
return inner_most_fn(*operands) + inner_fn(*args)
@@ -1973,6 +2286,7 @@ def forward(self, arg0_1):
if len(operands) == 0 and len(closure_list) == 0:
return torch.ones(1)
return cond(pred, true_fn, false_fn, operands)
+
return operands, fn
def _init_predicate(self, pred_type):
@@ -1981,7 +2295,7 @@ def forward(self, arg0_1):
elif pred_type == "intTensor":
return torch.tensor(1)
elif pred_type == "floatTensor":
- return torch.tensor(1.)
+ return torch.tensor(1.0)
elif pred_type == "boolTensor":
return torch.tensor(False)
else:
@@ -2002,17 +2316,23 @@ def forward(self, arg0_1):
@parametrize("nOperands", [0, 1])
@parametrize("nClosure", [0, 1])
@parametrize("nesting", [0, 2])
- def test_cond_tracing_with_valid_inputs(self, predType, innerFnType, nOperands, nClosure, nesting):
+ def test_cond_tracing_with_valid_inputs(
+ self, predType, innerFnType, nOperands, nClosure, nesting
+ ):
pred = self._init_predicate(predType)
inner_fn = self._init_fn(innerFnType)
operands = [torch.ones(2, 3) + i for i in range(nOperands)]
closure = [torch.ones(2, 3) - i for i in range(nClosure)]
- args, fn = self._create_test_fns_for_cond(pred, inner_fn, operands, closure, nesting)
+ args, fn = self._create_test_fns_for_cond(
+ pred, inner_fn, operands, closure, nesting
+ )
eager_res = fn(*args)
for tracing_mode in ["symbolic", "fake", "real"]:
# set _allow_non_fake_inputs = True to allow fake prop through closures
with self.subTest(tracing_mode=tracing_mode):
- gm = make_fx(fn, tracing_mode=tracing_mode, _allow_non_fake_inputs=True)(*args)
+ gm = make_fx(
+ fn, tracing_mode=tracing_mode, _allow_non_fake_inputs=True
+ )(*args)
self.assertEqual(gm(*args), eager_res)
@parametrize("predType", ["boolTensor"])
@@ -2025,7 +2345,9 @@ def forward(self, arg0_1):
inner_fn = self._init_fn(innerFnType)
operands = [torch.ones(2, 3) + i for i in range(nOperands)]
closure = [torch.ones(2, 3) - i for i in range(nClosure)]
- args, fn = self._create_test_fns_for_cond(pred, inner_fn, operands, closure, nesting)
+ args, fn = self._create_test_fns_for_cond(
+ pred, inner_fn, operands, closure, nesting
+ )
eager_res = fn(*args)
out = torch.vmap(fn)(*args)
if nClosure == 0:
@@ -2035,13 +2357,12 @@ def forward(self, arg0_1):
self.assertEqual(eager_res, out[1])
def test_cond_vmap_simple(self):
-
def fn(x):
return torch.cond(
pred=torch.tensor([True]),
true_fn=lambda x: x + 100,
false_fn=lambda x: x,
- operands=(x,)
+ operands=(x,),
)
a = torch.arange(15).reshape((3, 5))
@@ -2050,30 +2371,24 @@ def forward(self, arg0_1):
self.assertEqual(res, a + 100)
def test_cond_vmap_multiple_inputs(self):
-
def fn(x, y):
return torch.cond(
pred=x.sum() < y.sum(),
true_fn=lambda x, y: x + 100,
false_fn=lambda x, y: y,
- operands=(x, y)
+ operands=(x, y),
)
a = torch.arange(15).reshape(3, 5)
b = torch.ones_like(a) + 3
res = torch.vmap(fn, in_dims=(0, 0))(a, b)
expected = torch.tensor(
- [
- [100, 101, 102, 103, 104],
- [4, 4, 4, 4, 4],
- [4, 4, 4, 4, 4]
- ]
+ [[100, 101, 102, 103, 104], [4, 4, 4, 4, 4], [4, 4, 4, 4, 4]]
)
self.assertEqual(res.shape, (3, 5))
self.assertEqual(expected, res)
def test_cond_vmap_single_input_with_closure(self):
-
a = torch.ones((3, 5)) + 3
c = torch.arange(5)
@@ -2082,16 +2397,19 @@ def forward(self, arg0_1):
pred=torch.tensor([True]),
true_fn=lambda x: x + c,
false_fn=lambda x: x - c,
- operands=(x,)
+ operands=(x,),
)
- res = torch.vmap(fn, in_dims=(0,))(a,)
+ res = torch.vmap(fn, in_dims=(0,))(
+ a,
+ )
with unittest.mock.patch("torch._dynamo.config.error_on_recompile", True):
- res = torch.vmap(fn, in_dims=(0,))(a,)
+ res = torch.vmap(fn, in_dims=(0,))(
+ a,
+ )
self.assertEqual(a + c, res)
def test_cond_vmap_multiple_args_with_closure(self):
-
a = torch.ones((3, 5), dtype=torch.int64) + 3
b = torch.arange(15).reshape(3, 5)
c = torch.arange(5)
@@ -2101,7 +2419,7 @@ def forward(self, arg0_1):
pred=torch.tensor([False]),
true_fn=lambda x, y: x + c,
false_fn=lambda x, y: y - c,
- operands=(x, y)
+ operands=(x, y),
)
res = torch.vmap(fn)(a, b)
@@ -2109,7 +2427,6 @@ def forward(self, arg0_1):
@parametrize("nClosure", [0, 1])
def test_cond_vmap_multiple_outputs(self, nClosure):
-
if nClosure:
c = torch.ones(5, dtype=torch.int64) + 5
@@ -2118,19 +2435,23 @@ def forward(self, arg0_1):
pred=torch.tensor([True]),
true_fn=lambda x: (x + c, x - c),
false_fn=lambda x: (x, x),
- operands=(x,)
+ operands=(x,),
)
+
else:
+
def fn(x):
return torch.cond(
pred=torch.tensor([True]),
true_fn=lambda x: (x + 1, x - 1),
false_fn=lambda x: (x, x),
- operands=(x,)
+ operands=(x,),
)
a = torch.arange(15).reshape(3, 5)
- res = torch.vmap(fn)(a,)
+ res = torch.vmap(fn)(
+ a,
+ )
self.assertEqual(len(res), 2)
if nClosure:
self.assertEqual(res, (a + c, a - c))
@@ -2143,7 +2464,7 @@ def forward(self, arg0_1):
pred=torch.tensor([True]),
true_fn=lambda x: x + 1,
false_fn=lambda x: x - 1,
- operands=(x,)
+ operands=(x,),
)
def wrapper(x):
@@ -2153,7 +2474,8 @@ def forward(self, arg0_1):
res = torch.vmap(wrapper)(a)
self.assertEqual(res, a + 1)
+
instantiate_parametrized_tests(TestControlFlowTraced)
-if __name__ == '__main__':
+if __name__ == "__main__":
run_tests()
diff --git a/test/functorch/test_dims.py b/test/functorch/test_dims.py
index bb038ee492..8d282f4465 100644
--- a/test/functorch/test_dims.py
+++ b/test/functorch/test_dims.py
@@ -5,25 +5,43 @@
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
-from functorch.dim import Tensor, Dim, dims, dimlists, stack, DimensionBindError, DimList
-
-from attn_ft import BertSelfAttention as BertSelfAttentionA, Linear
-from attn_positional import BertSelfAttention as BertSelfAttentionB
-
-from torch.testing._internal.common_utils import TestCase, run_tests, TEST_CUDA, skipIfTorchDynamo
+import gc
from unittest import skip, skipIf
+
import torch
-import gc
+
+from attn_ft import BertSelfAttention as BertSelfAttentionA, Linear
+from attn_positional import BertSelfAttention as BertSelfAttentionB
from functorch._C import dim as _C
+from functorch.dim import (
+ Dim,
+ DimensionBindError,
+ DimList,
+ dimlists,
+ dims,
+ stack,
+ Tensor,
+)
+
+from torch.testing._internal.common_utils import (
+ run_tests,
+ skipIfTorchDynamo,
+ TEST_CUDA,
+ TestCase,
+)
try:
from torchvision.models import resnet18
except ImportError:
resnet18 = None
-_test_c, _parse_test, _set_pointwise_optimize = _C._test_c, _C._parse_test, _C._set_pointwise_optimize
+_test_c, _parse_test, _set_pointwise_optimize = (
+ _C._test_c,
+ _C._parse_test,
+ _C._set_pointwise_optimize,
+)
from contextlib import contextmanager
from time import perf_counter
@@ -32,10 +50,12 @@ measure_perf = False
if measure_perf:
from torchdim.magic_trace import magic_trace
else:
+
@contextmanager
def magic_trace(*args, **kwargs):
yield
+
@contextmanager
def measure(what):
b = perf_counter()
@@ -43,12 +63,14 @@ def measure(what):
e = perf_counter()
print(f"{what}: {e - b:.20f} seconds")
+
def triu(A):
i, j = dims()
a = A[i, j]
zero = torch.tensor(0, dtype=torch.float) # XXX - torch.where is janky...
return torch.where(i <= j, a, zero).order(i, j)
+
def gpu_time(lmb, name, r=100):
b = torch.cuda.Event(enable_timing=True)
e = torch.cuda.Event(enable_timing=True)
@@ -71,9 +93,9 @@ def gpu_time(lmb, name, r=100):
print(name, elapsed / r)
return elapsed / r
+
@skipIfTorchDynamo("Bad interaction")
class TestMin(TestCase):
-
def setUp(self):
super().setUp()
gc.disable()
@@ -82,32 +104,36 @@ class TestMin(TestCase):
for o in gc.get_objects():
if isinstance(o, (torch.Tensor, Dim, Tensor, DimList)):
self.interesting.add(id(o))
- if 'cuda' in self._testMethodName:
+ if "cuda" in self._testMethodName:
self.mem_allocated = torch.cuda.memory_allocated()
def tearDown(self):
interesting = []
for o in gc.get_objects():
- if isinstance(o, (torch.Tensor, Dim, Tensor, DimList)) and id(o) not in self.interesting:
+ if (
+ isinstance(o, (torch.Tensor, Dim, Tensor, DimList))
+ and id(o) not in self.interesting
+ ):
interesting.append(o)
extra_memory = 0
- if 'cuda' in self._testMethodName:
+ if "cuda" in self._testMethodName:
extra_memory += torch.cuda.memory_allocated() - self.mem_allocated
# nolevels = _n_levels_in_use() == 0
if extra_memory != 0 or len(interesting) != 0:
import refcycle
- refcycle.garbage().export_image('garbage.pdf')
+
+ refcycle.garbage().export_image("garbage.pdf")
gc.collect()
# assert nolevels, f"cleanup failed? {_n_levels_in_use()}"
- assert extra_memory == 0, f'extra cuda memory left allocated: {extra_memory}'
- assert len(interesting) == 0, \
- f'extra torch.Tensor, Dim, or Tensor left allocated: {len(interesting)} objects of types:' \
- f' { [type(t) for t in interesting] }'
+ assert extra_memory == 0, f"extra cuda memory left allocated: {extra_memory}"
+ assert len(interesting) == 0, (
+ f"extra torch.Tensor, Dim, or Tensor left allocated: {len(interesting)} objects of types:"
+ f" { [type(t) for t in interesting] }"
+ )
def test_manual_stuff(self):
-
A_ = torch.rand(3, 4)
B_ = torch.rand(4, 5)
i, j, k = dims()
@@ -123,33 +149,71 @@ class TestMin(TestCase):
A.index([i], [D]).order(k, d)
- def attn(self, batch_size=1, sequence_length=4, hidden_size=6, num_attention_heads=3, linear=Linear, device=None, time=False):
+ def attn(
+ self,
+ batch_size=1,
+ sequence_length=4,
+ hidden_size=6,
+ num_attention_heads=3,
+ linear=Linear,
+ device=None,
+ time=False,
+ ):
def maybe_to(x):
return x if device is None else x.to(device)
- attention_probs_dropout_prob = 0.
- A = maybe_to(BertSelfAttentionA(hidden_size, num_attention_heads, attention_probs_dropout_prob, linear=linear))
- B = maybe_to(BertSelfAttentionB(hidden_size, num_attention_heads, attention_probs_dropout_prob))
-
+ attention_probs_dropout_prob = 0.0
+ A = maybe_to(
+ BertSelfAttentionA(
+ hidden_size,
+ num_attention_heads,
+ attention_probs_dropout_prob,
+ linear=linear,
+ )
+ )
+ B = maybe_to(
+ BertSelfAttentionB(
+ hidden_size, num_attention_heads, attention_probs_dropout_prob
+ )
+ )
A.load_state_dict(B.state_dict())
hidden_state = maybe_to(torch.rand(batch_size, sequence_length, hidden_size))
b_out = B(hidden_state)
a_out = A(hidden_state)
- self.assertTrue(torch.allclose(a_out, b_out)) # why does a simple matmul not do the right thing?
+ self.assertTrue(
+ torch.allclose(a_out, b_out)
+ ) # why does a simple matmul not do the right thing?
if time:
gpu_time(lambda: B(hidden_state), "positional", r=3)
gpu_time(lambda: A(hidden_state), "first_class", r=3)
- for approach in ('relative_key', 'relative_key_query'):
- A = maybe_to(BertSelfAttentionA(hidden_size, num_attention_heads,
- attention_probs_dropout_prob, approach, sequence_length, linear=linear))
- B = maybe_to(BertSelfAttentionB(hidden_size, num_attention_heads,
- attention_probs_dropout_prob, approach, sequence_length))
+ for approach in ("relative_key", "relative_key_query"):
+ A = maybe_to(
+ BertSelfAttentionA(
+ hidden_size,
+ num_attention_heads,
+ attention_probs_dropout_prob,
+ approach,
+ sequence_length,
+ linear=linear,
+ )
+ )
+ B = maybe_to(
+ BertSelfAttentionB(
+ hidden_size,
+ num_attention_heads,
+ attention_probs_dropout_prob,
+ approach,
+ sequence_length,
+ )
+ )
A.load_state_dict(B.state_dict())
- hidden_state = maybe_to(torch.rand(batch_size, sequence_length, hidden_size))
+ hidden_state = maybe_to(
+ torch.rand(batch_size, sequence_length, hidden_size)
+ )
b_out = B(hidden_state)
a_out = A(hidden_state)
self.assertTrue(torch.allclose(a_out, b_out))
@@ -158,17 +222,46 @@ class TestMin(TestCase):
gpu_time(lambda: B(hidden_state), "positional", r=3)
gpu_time(lambda: A(hidden_state), "first_class", r=3)
- A = maybe_to(BertSelfAttentionA(hidden_size, num_attention_heads,
- attention_probs_dropout_prob, None, None, linear=linear))
- B = maybe_to(BertSelfAttentionB(hidden_size, num_attention_heads,
- attention_probs_dropout_prob, None, None))
+ A = maybe_to(
+ BertSelfAttentionA(
+ hidden_size,
+ num_attention_heads,
+ attention_probs_dropout_prob,
+ None,
+ None,
+ linear=linear,
+ )
+ )
+ B = maybe_to(
+ BertSelfAttentionB(
+ hidden_size,
+ num_attention_heads,
+ attention_probs_dropout_prob,
+ None,
+ None,
+ )
+ )
A.load_state_dict(B.state_dict())
hidden_state = maybe_to(torch.rand(batch_size, sequence_length, hidden_size))
- past_key_value = (maybe_to(torch.rand(batch_size, num_attention_heads,
- sequence_length, hidden_size // num_attention_heads)),
- maybe_to(torch.rand(batch_size, num_attention_heads,
- sequence_length, hidden_size // num_attention_heads)))
+ past_key_value = (
+ maybe_to(
+ torch.rand(
+ batch_size,
+ num_attention_heads,
+ sequence_length,
+ hidden_size // num_attention_heads,
+ )
+ ),
+ maybe_to(
+ torch.rand(
+ batch_size,
+ num_attention_heads,
+ sequence_length,
+ hidden_size // num_attention_heads,
+ )
+ ),
+ )
b_out = B(hidden_state, past_key_value=past_key_value)
a_out = A(hidden_state, past_key_value=past_key_value)
@@ -196,6 +289,7 @@ class TestMin(TestCase):
def test_adapt(self):
def f():
ci, co = dims()
+
# python 3.11 adapts bytecode after a number of iterations
# check that we still match names correctly
for i in range(10):
@@ -204,8 +298,15 @@ class TestMin(TestCase):
@skipIf(not TEST_CUDA, "no CUDA")
def test_attn_cuda(self):
# size from the BERT paper, 90% pretraining of sequence length 128
- self.attn(batch_size=256, hidden_size=768, sequence_length=128,
- num_attention_heads=12, device='cuda', time=measure_perf, linear=torch.nn.Linear)
+ self.attn(
+ batch_size=256,
+ hidden_size=768,
+ sequence_length=128,
+ num_attention_heads=12,
+ device="cuda",
+ time=measure_perf,
+ linear=torch.nn.Linear,
+ )
def test_stack(self):
i, j, d = dims()
@@ -247,8 +348,6 @@ class TestMin(TestCase):
B = torch.rand(4, 5)
i, j, k = dims()
-
-
# r = A[i]*4
r = (A[i, k] * B[k, j]).sum(k).order(i, j)
assert torch.allclose(r, A @ B)
@@ -311,7 +410,9 @@ class TestMin(TestCase):
c.size = 2
assert torch.allclose(A[i, [c, d]].order(i, c, d), A.view(3, 2, 2))
- assert torch.allclose(A[c + 1, c + 0].order(c), A[torch.arange(2) + 1, torch.arange(2)])
+ assert torch.allclose(
+ A[c + 1, c + 0].order(c), A[torch.arange(2) + 1, torch.arange(2)]
+ )
try:
A[..., 3, ...]
raise NotImplementedError()
@@ -329,8 +430,9 @@ class TestMin(TestCase):
assert torch.allclose(a, b.order(s, d))
D = torch.rand(3, 4, 5)
- assert torch.allclose(D.transpose(0, 1).flatten(1, 2), D[i, k, j].order((i, j)).order(k))
-
+ assert torch.allclose(
+ D.transpose(0, 1).flatten(1, 2), D[i, k, j].order((i, j)).order(k)
+ )
r = [id(x) for x in torch.rand_like(A[i, k]).dims]
assert id(i) in r and id(k) in r
@@ -357,7 +459,6 @@ class TestMin(TestCase):
A = torch.rand(3, 4)
B = torch.rand(4, 5)
-
for _ in range(10):
r0 = A @ B
@@ -366,26 +467,25 @@ class TestMin(TestCase):
b = B[k, j]
r1 = (a * b).sum(k)
- with measure('pp'):
+ with measure("pp"):
for _ in range(10000):
A @ B
# magic_trace_stop_indicator()
- with measure('fc'):
+ with measure("fc"):
for _ in range(10000):
(A[i, k] * B[k, j]).sum(k).order(i, j)
- with magic_trace('f.fxt'):
+ with magic_trace("f.fxt"):
for _ in range(10000):
(A[i, k] * B[k, j]).sum(k).order(i, j)
- with magic_trace('p.fxt'):
+ with magic_trace("p.fxt"):
for _ in range(10000):
A @ B
# magic_trace_stop_indicator()
-
assert torch.allclose(r1.order(i, j), r0)
def test_compare_dims(self):
@@ -409,7 +509,6 @@ class TestMin(TestCase):
i = dims()
assert list(A[i].expand(2, 4).order(i).size()) == [3, 2, 4]
-
def test_parse(self):
self.assertEqual(("x", None, None, None), _parse_test(1, 0, "x"))
self.assertEqual(("x", None, "y", None), _parse_test(1, 0, "x", c="y"))
@@ -429,8 +528,10 @@ class TestMin(TestCase):
def test_network(self):
if resnet18 is None:
- self.skipTest('no torchvision')
- rn = resnet18(norm_layer=lambda x: torch.nn.BatchNorm2d(x, track_running_stats=False))
+ self.skipTest("no torchvision")
+ rn = resnet18(
+ norm_layer=lambda x: torch.nn.BatchNorm2d(x, track_running_stats=False)
+ )
rn.train()
img = torch.rand(1, 1, 2, 3, 224, 224)
imgf = img.view(2, 3, 224, 224)
@@ -448,7 +549,7 @@ class TestMin(TestCase):
b = dimlists()
assert isinstance(a, Dim)
assert isinstance(b, DimList)
- assert str(a) == 'a'
+ assert str(a) == "a"
a, b = dims(sizes=[3, 4])
assert a.size == 3
assert b.size == 4
@@ -468,7 +569,7 @@ class TestMin(TestCase):
def test_softmax_split(self):
a = torch.rand(16)
g, i = dims(sizes=[2, None])
- a2 = a[[i, g], ]
+ a2 = a[[i, g],]
m_b, _ = a2.max(i)
f_b = torch.exp(a2 - m_b)
@@ -498,7 +599,9 @@ class TestMin(TestCase):
C = torch.rand(3, 4, 5)
ik = dims()
- assert torch.allclose(C.index((0, 2), ik).order(ik), C.permute(0, 2, 1).reshape(15, 4))
+ assert torch.allclose(
+ C.index((0, 2), ik).order(ik), C.permute(0, 2, 1).reshape(15, 4)
+ )
# failures that came up from monkey patching some operators...
def test_monkey(self):
@@ -546,6 +649,7 @@ class TestMin(TestCase):
class Foo:
pass
+
y = Foo()
z, y.x, q = dims(3)
assert str(z) == "z"
@@ -560,7 +664,6 @@ class TestMin(TestCase):
assert Tensor.clamp.__doc__ == torch.Tensor.clamp.__doc__
def test_embed(self):
-
embeddings = torch.rand(8, 32)
ids = torch.tensor([1, 0, 3, 4])
@@ -606,7 +709,10 @@ class TestMin(TestCase):
x = torch.randn(total, 1)
x.split(l, 0)
-skip_functorch_only = ['test_time_mm_fuse', 'test_attn_cuda']
+
+skip_functorch_only = ["test_time_mm_fuse", "test_attn_cuda"]
+
+
class TestMinFunctorchOnly(TestMin):
def setUp(self):
super().setUp()
@@ -616,8 +722,9 @@ class TestMinFunctorchOnly(TestMin):
_set_pointwise_optimize(True)
super().tearDown()
+
for n in skip_functorch_only:
setattr(TestMinFunctorchOnly, n, skip("skip_functorch_only")(lambda self: None))
-if __name__ == '__main__':
+if __name__ == "__main__":
run_tests()
diff --git a/test/functorch/test_eager_transforms.py b/test/functorch/test_eager_transforms.py
index 5a2c9e28be..ce8aa84bf3 100644
--- a/test/functorch/test_eager_transforms.py
+++ b/test/functorch/test_eager_transforms.py
@@ -7,63 +7,89 @@
# LICENSE file in the root directory of this source tree.
import copy
-from torch.testing._internal.common_utils import (
- TestCase, run_tests, parametrize, subtest, instantiate_parametrized_tests,
- IS_FBCODE, freeze_rng_state, skipIfTorchDynamo, IS_WINDOWS, IS_MACOS, IS_ARM64,
- markDynamoStrictTest
-)
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
+import math
import os
import subprocess
import sys
import unittest
import warnings
-import math
-from functools import wraps
-from torch.testing._internal.common_device_type import instantiate_device_type_tests, onlyCPU, dtypes, onlyCUDA
-from torch.testing._internal.common_dtype import get_all_fp_dtypes
-from torch.testing._internal.common_cuda import with_tf32_off, SM70OrLater, TEST_CUDA
-from torch.testing._internal.common_utils import skipIfRocm, TEST_WITH_TORCHDYNAMO
-from torch.testing import make_tensor
-from torch._dynamo import allow_in_graph
-from torch._subclasses.fake_tensor import FakeTensorMode
-from functools import partial
-from functorch.experimental import replace_all_batch_norm_modules_
-from torch._C import _ExcludeDispatchKeyGuard, DispatchKeySet, DispatchKey
+from functools import partial, wraps
import functorch
+
+# NB: numpy is a testing dependency!
+import numpy as np
+import torch
+import torch.autograd.forward_ad as fwAD
+import torch.nn as nn
+import torch.nn.functional as F
+from common_utils import expectedFailureIf
from functorch import (
- grad, vjp, vmap, jacrev, jacfwd, grad_and_value, hessian,
- jvp, make_functional, make_functional_with_buffers,
- combine_state_for_ensemble, make_fx
+ combine_state_for_ensemble,
+ grad,
+ grad_and_value,
+ hessian,
+ jacfwd,
+ jacrev,
+ jvp,
+ make_functional,
+ make_functional_with_buffers,
+ make_fx,
+ vjp,
+ vmap,
)
+from functorch.experimental import functionalize, replace_all_batch_norm_modules_
+from torch._C import _ExcludeDispatchKeyGuard, DispatchKey, DispatchKeySet
+from torch._dynamo import allow_in_graph
+from torch._functorch.eager_transforms import _slice_argnums
from torch._functorch.make_functional import (
- functional_init, functional_init_with_buffers,
+ functional_init,
+ functional_init_with_buffers,
)
-from torch._functorch.eager_transforms import _slice_argnums
-from functorch.experimental import functionalize
-from torch._ops import HigherOrderOperator
from torch._functorch.utils import enable_single_level_autograd_function
-import torch.autograd.forward_ad as fwAD
-from torch.func import functional_call, stack_module_state, linearize
-from common_utils import expectedFailureIf
-
-# NB: numpy is a testing dependency!
-import numpy as np
+from torch._ops import HigherOrderOperator
+from torch._subclasses.fake_tensor import FakeTensorMode
+from torch.func import functional_call, linearize, stack_module_state
+from torch.testing import make_tensor
+from torch.testing._internal.common_cuda import SM70OrLater, TEST_CUDA, with_tf32_off
+from torch.testing._internal.common_device_type import (
+ dtypes,
+ instantiate_device_type_tests,
+ onlyCPU,
+ onlyCUDA,
+)
+from torch.testing._internal.common_dtype import get_all_fp_dtypes
+from torch.testing._internal.common_utils import (
+ freeze_rng_state,
+ instantiate_parametrized_tests,
+ IS_ARM64,
+ IS_FBCODE,
+ IS_MACOS,
+ IS_WINDOWS,
+ markDynamoStrictTest,
+ parametrize,
+ run_tests,
+ skipIfRocm,
+ skipIfTorchDynamo,
+ subtest,
+ TEST_WITH_TORCHDYNAMO,
+ TestCase,
+)
-from torch.utils._pytree import tree_flatten, tree_unflatten, tree_map
+from torch.utils._pytree import tree_flatten, tree_map, tree_unflatten
USE_TORCHVISION = False
try:
import torchvision # noqa: F401
+
USE_TORCHVISION = True
except ImportError:
- warnings.warn("Couldn't import torchvision. Some of our tests use it, try "
- "to install it with commands from pytorch.org, post-fixed with "
- "`--no-deps` to avoid overwriting the pytorch installation",
- UserWarning)
+ warnings.warn(
+ "Couldn't import torchvision. Some of our tests use it, try "
+ "to install it with commands from pytorch.org, post-fixed with "
+ "`--no-deps` to avoid overwriting the pytorch installation",
+ UserWarning,
+ )
# TestCase for _slice_argnums, an important helper function
@@ -182,6 +208,7 @@ class TestSliceArgnums(TestCase):
res = _slice_argnums(args, (1, 0))
self.assertEqual(res, (args[1], args[0]))
+
def _get_weights_and_functional_call(net, mechanism):
if mechanism == "make_functional":
return make_functional(net)
@@ -194,6 +221,7 @@ def _get_weights_and_functional_call(net, mechanism):
return net_func, dict(net.named_parameters())
+
def _get_weights_and_functional_call_with_buffers(net, mechanism):
if mechanism == "make_functional":
return make_functional_with_buffers(net)
@@ -241,7 +269,7 @@ class TestGradTransform(TestCase):
x.requires_grad_()
out = foo(x, y)
- expected, = torch.autograd.grad(out, x)
+ (expected,) = torch.autograd.grad(out, x)
self.assertEqual(result, expected)
@@ -256,7 +284,7 @@ class TestGradTransform(TestCase):
result = grad(foo)(y, targets)
y.requires_grad_()
- expected, = torch.autograd.grad(foo(y, targets), y)
+ (expected,) = torch.autograd.grad(foo(y, targets), y)
self.assertEqual(result, expected)
@@ -304,7 +332,7 @@ class TestGradTransform(TestCase):
x.requires_grad_()
out = foo(x)
- expected, = torch.autograd.grad(out, x)
+ (expected,) = torch.autograd.grad(out, x)
self.assertEqual(result, expected)
@@ -321,19 +349,19 @@ class TestGradTransform(TestCase):
x.requires_grad_()
out = foo(x)
- expected, = torch.autograd.grad(out, x)
+ (expected,) = torch.autograd.grad(out, x)
self.assertEqual(result, expected)
def test_inplace_on_captures(self, device):
- x = torch.tensor([1., 2., 3.], device=device)
+ x = torch.tensor([1.0, 2.0, 3.0], device=device)
captured = torch.randn(3, device=device)
def foo(x):
captured.copy_(x)
return (x * captured).sum()
- with self.assertRaisesRegex(RuntimeError, 'mutate a captured Tensor'):
+ with self.assertRaisesRegex(RuntimeError, "mutate a captured Tensor"):
grad(foo)(x)
def test_nesting_simple(self, device):
@@ -380,7 +408,7 @@ class TestGradTransform(TestCase):
with freeze_rng_state():
result = grad(f)(x)
x.requires_grad_()
- expected, = torch.autograd.grad(f(x), x)
+ (expected,) = torch.autograd.grad(f(x), x)
self.assertEqual(result, expected)
def test_vjp(self, device):
@@ -389,13 +417,14 @@ class TestGradTransform(TestCase):
self.assertEqual(out, x.sin())
v = torch.randn([], device=device)
- result, = vjp_fn(v)
+ (result,) = vjp_fn(v)
self.assertEqual(result, v * x.cos())
def test_vjp_two_outputs(self, device):
def f(x):
return x, x
- result, vjp_fn = vjp(f, torch.tensor(1.))
+
+ result, vjp_fn = vjp(f, torch.tensor(1.0))
vjp_fn(result)
def test_conj_bit(self):
@@ -406,6 +435,7 @@ class TestGradTransform(TestCase):
y = x.conj()
assert y.is_conj()
return y.abs()
+
res = grad(foo)(x)
with torch.no_grad():
self.assertEqual(res, torch.ones_like(res) * torch.sgn(x))
@@ -414,7 +444,7 @@ class TestGradTransform(TestCase):
x = torch.randn([], requires_grad=True, device=device)
y = grad(torch.sin)(x)
- result, = torch.autograd.grad(y, x)
+ (result,) = torch.autograd.grad(y, x)
self.assertEqual(result, -x.sin())
def test_grad_of_vjp_composition(self, device):
@@ -485,17 +515,17 @@ class TestGradTransform(TestCase):
def test_invalid_argnums(self, device):
x = torch.randn([])
y = torch.randn([])
- with self.assertRaisesRegex(RuntimeError, 'but only'):
+ with self.assertRaisesRegex(RuntimeError, "but only"):
grad(torch.mul, argnums=-3)(x, y)
- with self.assertRaisesRegex(RuntimeError, 'but only'):
+ with self.assertRaisesRegex(RuntimeError, "but only"):
grad(torch.mul, argnums=2)(x, y)
- with self.assertRaisesRegex(RuntimeError, 'int or Tuple'):
+ with self.assertRaisesRegex(RuntimeError, "int or Tuple"):
grad(torch.mul, argnums=[0])(x, y)
- with self.assertRaisesRegex(RuntimeError, 'must be int'):
- grad(torch.mul, argnums=('0',))(x, y)
- with self.assertRaisesRegex(RuntimeError, 'must be unique'):
+ with self.assertRaisesRegex(RuntimeError, "must be int"):
+ grad(torch.mul, argnums=("0",))(x, y)
+ with self.assertRaisesRegex(RuntimeError, "must be unique"):
grad(torch.mul, argnums=(0, 0))(x, y)
- with self.assertRaisesRegex(RuntimeError, 'must be unique'):
+ with self.assertRaisesRegex(RuntimeError, "must be unique"):
grad(torch.mul, argnums=(0, -2))(x, y)
def test_argnums(self, device):
@@ -507,7 +537,7 @@ class TestGradTransform(TestCase):
gy = grad(torch.mul, argnums=1)(x, y)
self.assertEqual(gy, x)
- gx, = grad(torch.mul, argnums=(0,))(x, y)
+ (gx,) = grad(torch.mul, argnums=(0,))(x, y)
self.assertEqual(gx, y)
gx, gy = grad(torch.mul, argnums=(0, 1))(x, y)
@@ -530,7 +560,7 @@ class TestGradTransform(TestCase):
gy = grad(torch.mul, argnums=-1)(x, y)
self.assertEqual(gy, x)
- gx, = grad(torch.mul, argnums=(-2,))(x, y)
+ (gx,) = grad(torch.mul, argnums=(-2,))(x, y)
self.assertEqual(gx, y)
gx, gy = grad(torch.mul, argnums=(-2, -1))(x, y)
@@ -542,36 +572,35 @@ class TestGradTransform(TestCase):
def f(a, b):
x, y = a
- return 1 * x + 2 * y + 3 * b['foo']
+ return 1 * x + 2 * y + 3 * b["foo"]
- args = ((x, x), {'foo': x})
+ args = ((x, x), {"foo": x})
gx, gy = grad(f)(*args)
- self.assertEqual(gx, torch.tensor(1., device=device))
- self.assertEqual(gy, torch.tensor(2., device=device))
+ self.assertEqual(gx, torch.tensor(1.0, device=device))
+ self.assertEqual(gy, torch.tensor(2.0, device=device))
- (gx, gy), = grad(f, argnums=(0,))(*args)
- self.assertEqual(gx, torch.tensor(1., device=device))
- self.assertEqual(gy, torch.tensor(2., device=device))
+ ((gx, gy),) = grad(f, argnums=(0,))(*args)
+ self.assertEqual(gx, torch.tensor(1.0, device=device))
+ self.assertEqual(gy, torch.tensor(2.0, device=device))
(gx, gy), gz = grad(f, argnums=(0, 1))(*args)
- self.assertEqual(gx, torch.tensor(1., device=device))
- self.assertEqual(gy, torch.tensor(2., device=device))
- self.assertEqual(gz['foo'], torch.tensor(3., device=device))
+ self.assertEqual(gx, torch.tensor(1.0, device=device))
+ self.assertEqual(gy, torch.tensor(2.0, device=device))
+ self.assertEqual(gz["foo"], torch.tensor(3.0, device=device))
def test_grad_aux_tensor(self, device):
-
x = torch.randn(3, device=device)
with self.assertRaisesRegex(
RuntimeError,
- r'grad_and_value\(f\)\(\*args\): output of function f should be a tuple'
+ r"grad_and_value\(f\)\(\*args\): output of function f should be a tuple",
):
grad(lambda t: [t, t], has_aux=True)(x)
with self.assertRaisesRegex(
RuntimeError,
- r'grad_and_value\(f\)\(\*args\): output of function f should be a tuple'
+ r"grad_and_value\(f\)\(\*args\): output of function f should be a tuple",
):
grad(lambda t: (t, t + 2, t + 3), has_aux=True)(x)
@@ -586,7 +615,7 @@ class TestGradTransform(TestCase):
def test_grad_aux_pytree(self, device):
def f(x):
y = x.sin()
- return y.sum(), {'a': x.cos(), 'b': [x.tan()]}
+ return y.sum(), {"a": x.cos(), "b": [x.tan()]}
x = torch.randn(3, device=device)
@@ -596,22 +625,30 @@ class TestGradTransform(TestCase):
self.assertEqual(out, x.cos())
for aux in [1, 1.0, "abc"]:
- with self.assertRaisesRegex(RuntimeError, r"Expected tensors, got unsupported type"):
+ with self.assertRaisesRegex(
+ RuntimeError, r"Expected tensors, got unsupported type"
+ ):
_ = grad(lambda x: (x.sum(), aux), has_aux=True)(x)
- with self.assertRaisesRegex(RuntimeError, r"Expected tensors, got unsupported type"):
+ with self.assertRaisesRegex(
+ RuntimeError, r"Expected tensors, got unsupported type"
+ ):
_ = grad(lambda x: (x.sum(), [x, aux]), has_aux=True)(x)
def test_zero_grad(self, device):
def f(x):
- return (x['a']**2.0).sum()
- inps = ({'a': torch.randn(10, device=device) + 3, 'b': torch.randn(10, device=device)})
+ return (x["a"] ** 2.0).sum()
+
+ inps = {
+ "a": torch.randn(10, device=device) + 3,
+ "b": torch.randn(10, device=device),
+ }
grads = grad(f)(inps)
- self.assertNotEqual(grads['a'].sum(), 0.0)
- self.assertEqual(grads['b'].sum(), 0.0)
+ self.assertNotEqual(grads["a"].sum(), 0.0)
+ self.assertEqual(grads["b"].sum(), 0.0)
def test_unrelated_grad(self, device):
- x = torch.tensor(1., device=device)
- y = torch.tensor(2., device=device)
+ x = torch.tensor(1.0, device=device)
+ y = torch.tensor(2.0, device=device)
def unrelated(x):
return y
@@ -620,9 +657,9 @@ class TestGradTransform(TestCase):
self.assertEqual(result, torch.zeros_like(x))
def test_unrelated_vjp(self, device):
- x = torch.tensor(1., device=device)
- y = torch.tensor(2., device=device)
- v = torch.tensor(1., device=device)
+ x = torch.tensor(1.0, device=device)
+ y = torch.tensor(2.0, device=device)
+ v = torch.tensor(1.0, device=device)
def unrelated(x):
return y
@@ -633,10 +670,10 @@ class TestGradTransform(TestCase):
self.assertEqual(result, expected)
def test_unrelated_vjp_multiple_inputs_outputs(self, device):
- w = torch.tensor(3., device=device)
- x = torch.tensor(4., device=device)
- y = torch.tensor(2., device=device)
- v = torch.tensor(1., device=device)
+ w = torch.tensor(3.0, device=device)
+ x = torch.tensor(4.0, device=device)
+ y = torch.tensor(2.0, device=device)
+ v = torch.tensor(1.0, device=device)
def unrelated(w, x):
return y, y, x
@@ -670,7 +707,7 @@ class TestGradTransform(TestCase):
out, vjp_fn = vjp(f, (x, (x, x)))
self.assertEqual(out, x * x)
result = vjp_fn(v)
- self.assertEqual(result, ((x * v, (x * v, 0.)),))
+ self.assertEqual(result, ((x * v, (x * v, 0.0)),))
def test_vjp_pytree_output(self, device):
def f(x):
@@ -681,7 +718,7 @@ class TestGradTransform(TestCase):
v2 = torch.randn([], device=device)
v3 = torch.randn([], device=device)
_, vjp_fn = vjp(f, x)
- result, = vjp_fn((v1, (v2, v3)))
+ (result,) = vjp_fn((v1, (v2, v3)))
self.assertEqual(result, v1 + v2 + v3)
def test_vjp_outputs_can_any_pytree(self, device):
@@ -690,27 +727,29 @@ class TestGradTransform(TestCase):
for output in [None, ()]:
with self.assertRaisesRegex(
- RuntimeError, r"vjp\(f, \*primals\): Expected f to be a function that has non-empty output"
+ RuntimeError,
+ r"vjp\(f, \*primals\): Expected f to be a function that has non-empty output",
):
_, vjp_fn = vjp(lambda _: output, x)
vjp_fn(t)
for output in [1, True, 12.2, "abc"]:
with self.assertRaisesRegex(
- RuntimeError, r"vjp\(f, \*primals\): expected f\(\*primals\) to return only tensors"
+ RuntimeError,
+ r"vjp\(f, \*primals\): expected f\(\*primals\) to return only tensors",
):
_, vjp_fn = vjp(lambda _: output, x)
vjp_fn(t)
# Check list output
output, vjp_fn = vjp(lambda x: [x, x.sum()], x)
- vjp_out, = vjp_fn([t, t.sum()])
+ (vjp_out,) = vjp_fn([t, t.sum()])
assert isinstance(output, list) and len(output) == 2
assert isinstance(vjp_out, torch.Tensor)
# Check dict output
output, vjp_fn = vjp(lambda x: {"x": x, "xsum": x.sum()}, x)
- vjp_out, = vjp_fn({"x": t, "xsum": t.sum()})
+ (vjp_out,) = vjp_fn({"x": t, "xsum": t.sum()})
assert isinstance(output, dict) and len(output) == 2 and "xsum" in output
assert isinstance(vjp_out, torch.Tensor)
@@ -721,7 +760,11 @@ class TestGradTransform(TestCase):
]
output, vjp_fn = vjp(composite_output, x)
- vjp_out, = vjp_fn([(t.sum(), {"a": t, "out": [t, t.sum()]}), ])
+ (vjp_out,) = vjp_fn(
+ [
+ (t.sum(), {"a": t, "out": [t, t.sum()]}),
+ ]
+ )
assert isinstance(output, list)
assert isinstance(output[0], tuple) and isinstance(output[0][1], dict)
assert isinstance(vjp_out, torch.Tensor)
@@ -735,17 +778,20 @@ class TestGradTransform(TestCase):
v2 = torch.randn([], device=device)
v3 = torch.randn([], device=device)
_, vjp_fn = vjp(f, x)
- with self.assertRaisesRegex(RuntimeError, 'Expected pytree structure'):
- result, = vjp_fn(((v1, (v2, v3)),))
+ with self.assertRaisesRegex(RuntimeError, "Expected pytree structure"):
+ (result,) = vjp_fn(((v1, (v2, v3)),))
def test_vjp_aux_tensor(self, device):
-
x = torch.randn(3, device=device)
- with self.assertRaisesRegex(RuntimeError, r'vjp\(f, \*primals\): output of function f should be a tuple'):
+ with self.assertRaisesRegex(
+ RuntimeError, r"vjp\(f, \*primals\): output of function f should be a tuple"
+ ):
vjp(lambda t: [t, t], x, has_aux=True)
- with self.assertRaisesRegex(RuntimeError, r'vjp\(f, \*primals\): output of function f should be a tuple'):
+ with self.assertRaisesRegex(
+ RuntimeError, r"vjp\(f, \*primals\): output of function f should be a tuple"
+ ):
vjp(lambda t: (t, t + 2, t + 3), x, has_aux=True)
def f(t):
@@ -757,13 +803,13 @@ class TestGradTransform(TestCase):
self.assertEqual(out, x.sin())
v = torch.randn(3, device=device)
- grad_x, = vjp_fn(v)
+ (grad_x,) = vjp_fn(v)
self.assertEqual(grad_x, v * x.cos())
def test_vjp_aux_pytree(self, device):
def f(x):
y = x.sin()
- return y, {'a': x.cos(), 'b': [x.tan()]}
+ return y, {"a": x.cos(), "b": [x.tan()]}
x = torch.randn(3, device=device)
@@ -773,13 +819,17 @@ class TestGradTransform(TestCase):
self.assertEqual(aux, expected_aux)
v = torch.randn(3, device=device)
- grad_x, = vjp_fn(v)
+ (grad_x,) = vjp_fn(v)
self.assertEqual(grad_x, v * x.cos())
for aux in [1, 1.0, "abc"]:
- with self.assertRaisesRegex(RuntimeError, r"Expected tensors, got unsupported type"):
+ with self.assertRaisesRegex(
+ RuntimeError, r"Expected tensors, got unsupported type"
+ ):
_ = vjp(lambda x: (x, aux), x, has_aux=True)
- with self.assertRaisesRegex(RuntimeError, r"Expected tensors, got unsupported type"):
+ with self.assertRaisesRegex(
+ RuntimeError, r"Expected tensors, got unsupported type"
+ ):
_ = vjp(lambda x: (x, [x, aux]), x, has_aux=True)
def test_functional_init(self, device):
@@ -824,15 +874,16 @@ class TestGradTransform(TestCase):
return x
B = 10
- weights, buffers, fn, _, _ = \
- functional_init_with_buffers(MLPClassifier, [B], device=device)(32, 2)
+ weights, buffers, fn, _, _ = functional_init_with_buffers(
+ MLPClassifier, [B], device=device
+ )(32, 2)
inputs = torch.randn(B, 7, 2, device=device)
vmap(fn)(weights, buffers, (inputs,))
def test_advanced_indexing(self, device):
def f(value):
log_prob = torch.ones((), device=device)
- val = (torch.zeros(()) > 0)
+ val = torch.zeros(()) > 0
log_prob[val] = 0
return value
@@ -850,20 +901,46 @@ class TestGradTransform(TestCase):
def test_tensor_ctor_inside_grad(self, device):
def foo(x):
- return x * torch.tensor(2., device=device)
+ return x * torch.tensor(2.0, device=device)
x = torch.tensor(3.14, device=device)
functorch.grad(foo)(x)
- @parametrize("op_list_data", [
- subtest(([vmap, ], [(4, 2), (64, 3, 32, 32)]), name='vmap'),
- subtest(([vmap, vmap], [(4, 3, 2), (64, 3, 32, 32)]), name='vmap_vmap'),
- subtest(([grad, ], [(0, ), [], (4, 2), (64, 3, 32, 32)]), name='grad'),
- subtest(([grad, grad], [[], ]), name='grad_grad'),
- subtest(([vmap, grad], [(4, 2)]), name='vmap_grad'),
- ])
+ @parametrize(
+ "op_list_data",
+ [
+ subtest(
+ (
+ [
+ vmap,
+ ],
+ [(4, 2), (64, 3, 32, 32)],
+ ),
+ name="vmap",
+ ),
+ subtest(([vmap, vmap], [(4, 3, 2), (64, 3, 32, 32)]), name="vmap_vmap"),
+ subtest(
+ (
+ [
+ grad,
+ ],
+ [(0,), [], (4, 2), (64, 3, 32, 32)],
+ ),
+ name="grad",
+ ),
+ subtest(
+ (
+ [grad, grad],
+ [
+ [],
+ ],
+ ),
+ name="grad_grad",
+ ),
+ subtest(([vmap, grad], [(4, 2)]), name="vmap_grad"),
+ ],
+ )
def test_tensor_print(self, device, op_list_data):
-
op_list, shapes = op_list_data
for dt in get_all_fp_dtypes():
@@ -894,7 +971,9 @@ class TestGradTransform(TestCase):
expected = f"GradTrackingTensor(lvl={level}, value={expected})"
elif op == vmap:
bdim -= 1
- expected = f"BatchedTensor(lvl={level}, bdim={bdim}, value={expected})"
+ expected = (
+ f"BatchedTensor(lvl={level}, bdim={bdim}, value={expected})"
+ )
fn(x)
buf = buf.replace("\n", "").replace(" ", "")
@@ -902,7 +981,7 @@ class TestGradTransform(TestCase):
self.assertEqual(expected, buf)
def test_print_captured_tensor_inside_transform(self, device):
- x = torch.tensor([1., 2., 3.], device=device)
+ x = torch.tensor([1.0, 2.0, 3.0], device=device)
out = None
def f(y):
@@ -923,8 +1002,8 @@ class TestGradTransform(TestCase):
def test_no_grad_inside(self, device):
def f(x):
with torch.no_grad():
- shift = x ** 2
- return x ** 2 - shift
+ shift = x**2
+ return x**2 - shift
x = torch.randn([], device=device)
y = grad(f)(x)
@@ -934,14 +1013,14 @@ class TestGradTransform(TestCase):
x = torch.randn([], device=device, requires_grad=True)
y = grad(f)(x)
- z, = torch.autograd.grad(y, x)
+ (z,) = torch.autograd.grad(y, x)
self.assertEqual(z, 2)
def test_no_grad_mixed(self, device):
def f(x):
with torch.no_grad():
- shift = x ** 2
- return x ** 2 - shift
+ shift = x**2
+ return x**2 - shift
x = torch.randn([], device=device, requires_grad=True)
with torch.no_grad():
@@ -953,21 +1032,21 @@ class TestGradTransform(TestCase):
def test_no_grad_nested_simple(self, device):
def h(x):
with torch.no_grad():
- shift = grad(lambda x: 0.25 * x ** 4)(x)
- return x ** 3 - shift
+ shift = grad(lambda x: 0.25 * x**4)(x)
+ return x**3 - shift
x = torch.tensor(1.5, device=device, requires_grad=True)
y = grad(h)(x)
- self.assertEqual(y, 3 * x ** 2)
+ self.assertEqual(y, 3 * x**2)
- z, = torch.autograd.grad(y, x)
+ (z,) = torch.autograd.grad(y, x)
self.assertEqual(z, 6 * x)
def test_no_grad_nested_complicated(self, device):
def f(x):
with torch.no_grad():
- shift = x ** 3
- return x ** 3 - shift
+ shift = x**3
+ return x**3 - shift
def g(x):
r1 = grad(f)(x)
@@ -980,30 +1059,30 @@ class TestGradTransform(TestCase):
# The only differential part of g is x ** 3
self.assertEqual(y, 6 * x)
- z, = torch.autograd.grad(y, x)
+ (z,) = torch.autograd.grad(y, x)
self.assertEqual(z, 6)
def test_no_grad_value(self, device):
def h(x):
with torch.no_grad():
- gvalue, value = grad_and_value(lambda x: x ** 3)(x)
- return x ** 3 - value
+ gvalue, value = grad_and_value(lambda x: x**3)(x)
+ return x**3 - value
x = torch.tensor(1.6, device=device, requires_grad=True)
y = grad(h)(x)
- self.assertEqual(y, 3 * x ** 2)
+ self.assertEqual(y, 3 * x**2)
- z, = torch.autograd.grad(y, x)
+ (z,) = torch.autograd.grad(y, x)
self.assertEqual(z, 6 * x)
def test_no_grad_outside_vjp(self, device):
def h(x):
- return x ** 2
+ return x**2
- x = torch.tensor(2., requires_grad=True, device=device)
+ x = torch.tensor(2.0, requires_grad=True, device=device)
with torch.no_grad():
out, vjp_fn = vjp(h, x)
- y, = vjp_fn(torch.tensor(1., device=device))
+ (y,) = vjp_fn(torch.tensor(1.0, device=device))
self.assertEqual(y, 2 * x)
self.assertFalse(y.requires_grad)
@@ -1011,28 +1090,28 @@ class TestGradTransform(TestCase):
def test_no_grad_outside_vjp_fn(self, device):
def h(x):
- return x ** 2
+ return x**2
x = torch.tensor(3.14, requires_grad=True, device=device)
out, vjp_fn = vjp(h, x)
with torch.no_grad():
- y, = vjp_fn(torch.tensor(1., device=device))
+ (y,) = vjp_fn(torch.tensor(1.0, device=device))
self.assertEqual(y, 2 * x)
self.assertFalse(y.requires_grad)
self.assertTrue(out.requires_grad)
- z, = torch.autograd.grad(out, x)
+ (z,) = torch.autograd.grad(out, x)
self.assertEqual(z, 2 * x)
def test_no_grad_outside_vjp_only(self, device):
def h(x):
- return x ** 2
+ return x**2
x = torch.tensor(3.14, requires_grad=True, device=device)
with torch.no_grad():
out, vjp_fn = vjp(h, x)
- y, = vjp_fn(torch.tensor(1., device=device))
+ (y,) = vjp_fn(torch.tensor(1.0, device=device))
self.assertEqual(y, 2 * x)
self.assertFalse(out.requires_grad)
@@ -1040,7 +1119,7 @@ class TestGradTransform(TestCase):
# This one is a little weird...
self.assertTrue(y.requires_grad)
- z, = torch.autograd.grad(y, x)
+ (z,) = torch.autograd.grad(y, x)
self.assertEqual(z, 2)
@@ -1064,10 +1143,10 @@ class TestAutogradFunction(TestCase):
def f(y, x):
x, y = A.apply(x, y)
- return x ** 2
+ return x**2
- x = torch.tensor(2., device=device)
- y = torch.tensor(3., device=device)
+ x = torch.tensor(2.0, device=device)
+ y = torch.tensor(3.0, device=device)
# grad differentiates w.r.t. arg 0 by default
grad(f)(y, x)
grad(grad(f))(y, x)
@@ -1076,7 +1155,9 @@ class TestAutogradFunction(TestCase):
@parametrize("save_for", ["jvp", "vjp"])
@parametrize("save_tensors", ["input", "output", "neither"])
@parametrize("mark_dirty", [True, False])
- def test_function_returns_input(self, device, inner_requires_grad, save_for, save_tensors, mark_dirty):
+ def test_function_returns_input(
+ self, device, inner_requires_grad, save_for, save_tensors, mark_dirty
+ ):
class A(torch.autograd.Function):
@staticmethod
def forward(x):
@@ -1118,8 +1199,8 @@ class TestAutogradFunction(TestCase):
err_msg = "A input that has been returned as-is"
- a = torch.tensor(2., device=device, requires_grad=inner_requires_grad)
- a_t = torch.tensor(2., device=device, requires_grad=inner_requires_grad)
+ a = torch.tensor(2.0, device=device, requires_grad=inner_requires_grad)
+ a_t = torch.tensor(2.0, device=device, requires_grad=inner_requires_grad)
if save_tensors in ("input", "output") and not mark_dirty:
with self.assertRaisesRegex(RuntimeError, err_msg):
grad(fn)(a)
@@ -1129,8 +1210,10 @@ class TestAutogradFunction(TestCase):
grad(fn)(a)
jvp(fn, (a,), (a_t,))
- a = torch.tensor(2., device=device, requires_grad=inner_requires_grad).clone()
- a_t = torch.tensor(2., device=device, requires_grad=inner_requires_grad).clone()
+ a = torch.tensor(2.0, device=device, requires_grad=inner_requires_grad).clone()
+ a_t = torch.tensor(
+ 2.0, device=device, requires_grad=inner_requires_grad
+ ).clone()
if save_tensors in ("input", "output") and not mark_dirty:
with self.assertRaisesRegex(RuntimeError, err_msg):
@@ -1142,7 +1225,9 @@ class TestAutogradFunction(TestCase):
b = A.apply(a)
if mark_dirty:
self.assertTrue(a is b)
- if not (mark_dirty and save_for == "vjp" and save_tensors in ("input", "output")):
+ if not (
+ mark_dirty and save_for == "vjp" and save_tensors in ("input", "output")
+ ):
# TODO(soulitzer): https://github.com/pytorch/pytorch/issues/97827
with fwAD.dual_level():
a_dual = fwAD.make_dual(a, a_t)
@@ -1166,8 +1251,8 @@ class TestAutogradFunction(TestCase):
self.assertFalse(ctx.needs_input_grad[1])
return None, None
- x = torch.tensor(2., device=device)
- y = torch.tensor(3., device=device)
+ x = torch.tensor(2.0, device=device)
+ y = torch.tensor(3.0, device=device)
# grad differentiates w.r.t. arg 0 by default
grad(A.apply)(x, y)
grad(grad(A.apply))(x, y)
@@ -1177,7 +1262,7 @@ class TestAutogradFunction(TestCase):
@staticmethod
def forward(input):
input_np = input.cpu().numpy()
- return torch.tensor(input_np ** 3, device=input.device), input_np
+ return torch.tensor(input_np**3, device=input.device), input_np
@staticmethod
def setup_context(ctx, inputs, output):
@@ -1187,7 +1272,7 @@ class TestAutogradFunction(TestCase):
@staticmethod
@torch.autograd.function.once_differentiable
def backward(ctx, grad_output, grad_saved):
- result_np = 3 * (ctx.input_np ** 2)
+ result_np = 3 * (ctx.input_np**2)
return torch.tensor(result_np, device=ctx.device)
return NumpyCubeNotComposable
@@ -1203,7 +1288,7 @@ class TestAutogradFunction(TestCase):
x = torch.randn([], requires_grad=True, device=device)
grad_y = torch.randn_like(x, requires_grad=True)
_, vjp_fn = vjp(f, x)
- gx, = vjp_fn(grad_y)
+ (gx,) = vjp_fn(grad_y)
with self.assertRaisesRegex(RuntimeError, "marked with @once_differentiable"):
gx.backward()
@@ -1221,7 +1306,7 @@ class TestAutogradFunction(TestCase):
def h(x, grad_y):
_, vjp_fn = vjp(f, x) # noqa: F821
- gx, = vjp_fn(grad_y)
+ (gx,) = vjp_fn(grad_y)
return gx
grad(h, argnums=(0, 1))(x, grad_y)
@@ -1247,9 +1332,9 @@ class TestAutogradFunction(TestCase):
names.append(type(y.grad_fn).__name__)
return y
- x = torch.tensor(1.)
+ x = torch.tensor(1.0)
grad(f)(x)
- self.assertEqual(names, ['FooBarGeneratedBackward'])
+ self.assertEqual(names, ["FooBarGeneratedBackward"])
@markDynamoStrictTest
@@ -1259,8 +1344,8 @@ class TestAutogradFunctionVmapAPI(TestCase):
@staticmethod
def forward(input):
input_np = to_numpy(input) # noqa: F821
- dinput = torch.tensor(3 * input_np ** 2, device=input.device)
- return torch.tensor(input_np ** 3, device=input.device), dinput
+ dinput = torch.tensor(3 * input_np**2, device=input.device)
+ return torch.tensor(input_np**3, device=input.device), dinput
@staticmethod
def setup_context(ctx, inputs, output):
@@ -1271,7 +1356,7 @@ class TestAutogradFunctionVmapAPI(TestCase):
raise RuntimeError("foobar")
x = torch.randn(3, device=device)
- with self.assertRaisesRegex(RuntimeError, 'does not have vmap support'):
+ with self.assertRaisesRegex(RuntimeError, "does not have vmap support"):
vmap(NumpyCube.apply)(x)
def test_has_vmap_staticmethod_and_has_generate_vmap_rule(self, device):
@@ -1281,8 +1366,8 @@ class TestAutogradFunctionVmapAPI(TestCase):
@staticmethod
def forward(input):
input_np = to_numpy(input) # noqa: F821
- dinput = torch.tensor(3 * input_np ** 2, device=input.device)
- return torch.tensor(input_np ** 3, device=input.device), dinput
+ dinput = torch.tensor(3 * input_np**2, device=input.device)
+ return torch.tensor(input_np**3, device=input.device), dinput
@staticmethod
def setup_context(ctx, outputs, input):
@@ -1297,7 +1382,7 @@ class TestAutogradFunctionVmapAPI(TestCase):
raise RuntimeError("foobar")
x = torch.randn(3, device=device)
- with self.assertRaisesRegex(RuntimeError, 'generate_vmap_rule=True and'):
+ with self.assertRaisesRegex(RuntimeError, "generate_vmap_rule=True and"):
vmap(NumpyCube.apply)(x)
def test_info_object(self, device):
@@ -1324,7 +1409,7 @@ class TestAutogradFunctionVmapAPI(TestCase):
x = torch.randn(batch_size, 3, device=device)
- for randomness in ('error', 'different', 'same'):
+ for randomness in ("error", "different", "same"):
vmap(Id.apply, randomness=randomness)(x)
def test_in_dims_single_input(self, device):
@@ -1394,7 +1479,7 @@ class TestAutogradFunctionVmapAPI(TestCase):
raise RuntimeError("expected to not be called")
def f(x):
- y = torch.tensor(1.)
+ y = torch.tensor(1.0)
y = Id.apply(y)
return x * 1
@@ -1596,7 +1681,9 @@ class TestVmapOfGrad(TestCase):
self.assertEqual(r, e, atol=0, rtol=1.5e-3)
else:
assert mechanism == "functional_call"
- expected = {k: tuple(d[k] for d in expected) for k, v in expected[0].items()}
+ expected = {
+ k: tuple(d[k] for d in expected) for k, v in expected[0].items()
+ }
expected = {k: torch.stack(shards) for k, shards in expected.items()}
for key in result:
self.assertEqual(result[key], expected[key], atol=0, rtol=1.5e-3)
@@ -1626,7 +1713,9 @@ class TestVmapOfGrad(TestCase):
vocab_size = 1000
batch_shape = [64]
words_per_sentence = 5
- data = torch.randint(0, vocab_size, (*batch_shape, words_per_sentence), device=device)
+ data = torch.randint(
+ 0, vocab_size, (*batch_shape, words_per_sentence), device=device
+ )
targets = torch.randint(0, 1, (*batch_shape,), device=device)
# Construct our module
@@ -1661,9 +1750,12 @@ class TestVmapOfGrad(TestCase):
self.assertEqual(result, x.grad)
-jacrev_and_jacfwd = parametrize("jacapi", [subtest(jacrev, name='jacrev'), subtest(jacfwd, name='jacfwd')])
+jacrev_and_jacfwd = parametrize(
+ "jacapi", [subtest(jacrev, name="jacrev"), subtest(jacfwd, name="jacfwd")]
+)
+
+FIXME_jacrev_only = parametrize("jacapi", [subtest(jacrev, name="jacrev")])
-FIXME_jacrev_only = parametrize("jacapi", [subtest(jacrev, name='jacrev')])
@markDynamoStrictTest
class TestJac(VmapTearDownMixin, TestCase):
@@ -1781,7 +1873,7 @@ class TestJac(VmapTearDownMixin, TestCase):
@jacrev_and_jacfwd
def test_multiple_outputs_pytree(self, device, jacapi):
def f(x, y):
- return {'left': 2 * x + 3 * y, 'right': 4 * x + 5 * y}
+ return {"left": 2 * x + 3 * y, "right": 4 * x + 5 * y}
x = torch.randn(3, device=device)
y = torch.randn(3, device=device)
@@ -1791,12 +1883,12 @@ class TestJac(VmapTearDownMixin, TestCase):
expected_right_x = torch.diagflat(torch.full_like(x, 4))
expected_right_y = torch.diagflat(torch.full_like(y, 5))
expected = {
- 'left': (expected_left_x, expected_left_y),
- 'right': (expected_right_x, expected_right_y),
+ "left": (expected_left_x, expected_left_y),
+ "right": (expected_right_x, expected_right_y),
}
self.assertTrue(isinstance(z, dict))
- self.assertTrue(isinstance(z['left'], tuple))
- self.assertTrue(isinstance(z['right'], tuple))
+ self.assertTrue(isinstance(z["left"], tuple))
+ self.assertTrue(isinstance(z["right"], tuple))
self.assertEqual(z, expected)
@jacrev_and_jacfwd
@@ -1810,18 +1902,20 @@ class TestJac(VmapTearDownMixin, TestCase):
result = jacapi(f, argnums=(0, 1, 2))(*args)
expected = (
- (torch.tensor(1., device=device), torch.tensor(2., device=device)),
- torch.tensor(3., device=device),
- torch.tensor(4., device=device),
+ (torch.tensor(1.0, device=device), torch.tensor(2.0, device=device)),
+ torch.tensor(3.0, device=device),
+ torch.tensor(4.0, device=device),
)
self.assertEqual(result, expected)
result = jacapi(f, argnums=(0,))(*args)
- expected = ((torch.tensor(1., device=device), torch.tensor(2., device=device)),)
+ expected = (
+ (torch.tensor(1.0, device=device), torch.tensor(2.0, device=device)),
+ )
self.assertEqual(result, expected)
result = jacapi(f)(*args)
- expected = (torch.tensor(1., device=device), torch.tensor(2., device=device))
+ expected = (torch.tensor(1.0, device=device), torch.tensor(2.0, device=device))
self.assertEqual(result, expected)
@jacrev_and_jacfwd
@@ -1855,7 +1949,7 @@ class TestJac(VmapTearDownMixin, TestCase):
def test_aux_pytree(self, device, jacapi):
def f(x):
y = x.clone()
- return y, {'a': y.cos(), 'b': [y.tan()]}
+ return y, {"a": y.cos(), "b": [y.tan()]}
x = torch.randn(3, device=device)
@@ -1865,9 +1959,13 @@ class TestJac(VmapTearDownMixin, TestCase):
self.assertEqual(aux, expected_aux)
for aux in [1, 1.0, "abc"]:
- with self.assertRaisesRegex(RuntimeError, r"Expected tensors, got unsupported type"):
+ with self.assertRaisesRegex(
+ RuntimeError, r"Expected tensors, got unsupported type"
+ ):
_ = jacapi(lambda x: (x, aux), has_aux=True)(x)
- with self.assertRaisesRegex(RuntimeError, r"Expected tensors, got unsupported type"):
+ with self.assertRaisesRegex(
+ RuntimeError, r"Expected tensors, got unsupported type"
+ ):
_ = jacapi(lambda x: (x, [x, aux]), has_aux=True)(x)
@jacrev_and_jacfwd
@@ -1876,13 +1974,15 @@ class TestJac(VmapTearDownMixin, TestCase):
for output in [None, ()]:
with self.assertRaisesRegex(
- RuntimeError, r"(vjp|jvp).+: Expected f to be a function that has non-empty output"
+ RuntimeError,
+ r"(vjp|jvp).+: Expected f to be a function that has non-empty output",
):
jacapi(lambda _: output)(x)
for output in [1, True, 12.2, "abc"]:
with self.assertRaisesRegex(
- RuntimeError, r"(vjp|jvp).+: expected f\(\*primals\) to return only tensors"
+ RuntimeError,
+ r"(vjp|jvp).+: expected f\(\*primals\) to return only tensors",
):
jacapi(lambda _: output)(x)
@@ -1908,7 +2008,7 @@ class TestJac(VmapTearDownMixin, TestCase):
def test_multiple_inputs_outputs_pytree(self, device, jacapi):
def f(a, b, c):
a0, a1 = a
- return a0 + a1 * 2, {'foo': b * 3 + c * 4}
+ return a0 + a1 * 2, {"foo": b * 3 + c * 4}
x = torch.randn([], device=device)
zero = torch.zeros([], device=device)
@@ -1916,39 +2016,42 @@ class TestJac(VmapTearDownMixin, TestCase):
result = jacapi(f)(*args)
expected = (
- (torch.tensor(1., device=device), torch.tensor(2., device=device)),
- {'foo': (zero, zero)},
+ (torch.tensor(1.0, device=device), torch.tensor(2.0, device=device)),
+ {"foo": (zero, zero)},
)
self.assertEqual(result, expected)
result = jacapi(f, argnums=(0,))(*args)
expected = (
- ((torch.tensor(1., device=device), torch.tensor(2., device=device)),),
- {'foo': ((zero, zero),)},
+ ((torch.tensor(1.0, device=device), torch.tensor(2.0, device=device)),),
+ {"foo": ((zero, zero),)},
)
self.assertEqual(result, expected)
result = jacapi(f, argnums=(0, 1))(*args)
expected = (
- ((torch.tensor(1., device=device), torch.tensor(2., device=device)), zero),
- {'foo': ((zero, zero), torch.tensor(3., device=device))},
+ (
+ (torch.tensor(1.0, device=device), torch.tensor(2.0, device=device)),
+ zero,
+ ),
+ {"foo": ((zero, zero), torch.tensor(3.0, device=device))},
)
self.assertEqual(result, expected)
@jacrev_and_jacfwd
def test_multiple_inputs_outputs_pytree_multidim(self, device, jacapi):
def f(dct):
- a = dct['a']
- b = dct['b']
- return {'c': a.sin(), 'd': b.cos()}
+ a = dct["a"]
+ b = dct["b"]
+ return {"c": a.sin(), "d": b.cos()}
x = torch.randn(3, device=device)
- args = ({'a': x, 'b': x},)
+ args = ({"a": x, "b": x},)
result = jacapi(f)(*args)
expected = {
- 'c': {'a': x.cos().diagflat(), 'b': x.new_zeros(3, 3)},
- 'd': {'a': x.new_zeros(3, 3), 'b': -x.sin().diagflat()},
+ "c": {"a": x.cos().diagflat(), "b": x.new_zeros(3, 3)},
+ "d": {"a": x.new_zeros(3, 3), "b": -x.sin().diagflat()},
}
self.assertEqual(result, expected)
@@ -1988,7 +2091,7 @@ class TestJac(VmapTearDownMixin, TestCase):
def f(x, y):
return ()
- with self.assertRaisesRegex(RuntimeError, 'xpected'):
+ with self.assertRaisesRegex(RuntimeError, "xpected"):
jacapi(f)(x, y)
@jacrev_and_jacfwd
@@ -2080,7 +2183,7 @@ class TestJac(VmapTearDownMixin, TestCase):
@jacrev_and_jacfwd
def test_against_reference_simple(self, device, jacapi):
def f(x):
- return 3 * x ** 2
+ return 3 * x**2
x = torch.randn(2, 3, 5, device=device)
self._test_against_reference(f, (x,), jacapi)
@@ -2148,7 +2251,7 @@ class TestJac(VmapTearDownMixin, TestCase):
@jacrev_and_jacfwd
def test_against_reference_default_arg(self, device, jacapi):
- def f(x, y, z=3.):
+ def f(x, y, z=3.0):
return x * y * z
x = torch.randn(3, device=device)
@@ -2171,16 +2274,23 @@ class TestJac(VmapTearDownMixin, TestCase):
return torch.vstack([(x**2).sum(), (z**3).sum()])
out = jacapi(g, argnums=(1, 2))
- x, y, z = torch.randn(3, device=device), torch.randn(2, device=device), torch.randn(2, device=device)
+ x, y, z = (
+ torch.randn(3, device=device),
+ torch.randn(2, device=device),
+ torch.randn(2, device=device),
+ )
- expected_out = (torch.zeros(2, 1, 2, device=device), torch.zeros(2, 1, 2, device=device))
+ expected_out = (
+ torch.zeros(2, 1, 2, device=device),
+ torch.zeros(2, 1, 2, device=device),
+ )
expected_out[0][0][0] = 2 * y # top left corner
- expected_out[1][1][0] = 3 * (z ** 2) # bottom right corner
+ expected_out[1][1][0] = 3 * (z**2) # bottom right corner
out_val = out(x, y, z)
self.assertEqual(out_val, expected_out)
- @parametrize('_preallocate_and_copy', (True, False))
+ @parametrize("_preallocate_and_copy", (True, False))
def test_chunk_jacrev(self, device, _preallocate_and_copy):
x = torch.randn(10, 2, device=device)
y = torch.randn(1, 2, device=device)
@@ -2190,19 +2300,22 @@ class TestJac(VmapTearDownMixin, TestCase):
for chunk_size in (1, 2, 3, 4, 7, 10, 1000):
expected = jacrev(f, argnums=(0, 1))(x, y)
- actual = jacrev(f, argnums=(0, 1),
- chunk_size=chunk_size,
- _preallocate_and_copy=_preallocate_and_copy)(x, y)
+ actual = jacrev(
+ f,
+ argnums=(0, 1),
+ chunk_size=chunk_size,
+ _preallocate_and_copy=_preallocate_and_copy,
+ )(x, y)
self.assertEqual(actual, expected)
err_msg = "jacrev: `chunk_size` should be greater than 0."
with self.assertRaisesRegex(ValueError, err_msg):
- jacrev(f, argnums=(0, ), chunk_size=0)(x, y)
+ jacrev(f, argnums=(0,), chunk_size=0)(x, y)
with self.assertRaisesRegex(ValueError, err_msg):
- jacrev(f, argnums=(0, ), chunk_size=-2)(x, y)
+ jacrev(f, argnums=(0,), chunk_size=-2)(x, y)
- @parametrize('_preallocate_and_copy', (True, False))
+ @parametrize("_preallocate_and_copy", (True, False))
def test_chunk_jacrev_composition(self, device, _preallocate_and_copy):
x = torch.randn(10, 2, device=device)
chunk_size = 3
@@ -2211,11 +2324,19 @@ class TestJac(VmapTearDownMixin, TestCase):
return (x.sin(), x), (x + 2, x.sum())
expected = vmap(jacrev(jacrev(f)))(x)
- actual = vmap(jacrev(jacrev(f, chunk_size=chunk_size,
- _preallocate_and_copy=_preallocate_and_copy), chunk_size=chunk_size))(x)
+ actual = vmap(
+ jacrev(
+ jacrev(
+ f,
+ chunk_size=chunk_size,
+ _preallocate_and_copy=_preallocate_and_copy,
+ ),
+ chunk_size=chunk_size,
+ )
+ )(x)
self.assertEqual(actual, expected)
- @parametrize('_preallocate_and_copy', (True, False))
+ @parametrize("_preallocate_and_copy", (True, False))
def test_chunk_jacrev_chunksize_one(self, device, _preallocate_and_copy):
# With chunk_size=1, we shouldn't `vmap` and hence not be limited
# by it's constraints.
@@ -2248,7 +2369,9 @@ class TestJac(VmapTearDownMixin, TestCase):
self.assertEqual(actual, expected)
# Should fail with `chunk_size=2`.
- msg = r"vmap: We do not support batching operators that can output dynamic shape."
+ msg = (
+ r"vmap: We do not support batching operators that can output dynamic shape."
+ )
with self.assertRaisesRegex(RuntimeError, msg):
jacrev(f, chunk_size=2, _preallocate_and_copy=_preallocate_and_copy)(x)
@@ -2290,6 +2413,7 @@ class TestJac(VmapTearDownMixin, TestCase):
expected = torch.autograd.functional.jacobian(partial(f, int_x=3), t)
self.assertEqual(actual, expected)
+
@markDynamoStrictTest
class TestHessian(TestCase):
def _test_against_reference(self, f, inputs):
@@ -2302,7 +2426,7 @@ class TestHessian(TestCase):
def test_hessian_vectorize_correctness_simple(self, device):
def f(x):
- return (3 * x ** 2).sum()
+ return (3 * x**2).sum()
x = torch.randn(2, 3, 5, device=device)
self._test_against_reference(f, (x,))
@@ -2319,7 +2443,7 @@ class TestHessian(TestCase):
def test_hessian_vectorize_correctness_unrelated_outputs(self, device):
# output unrelated to one input
def f(x, y):
- return (x ** 2).sum()
+ return (x**2).sum()
x = torch.randn(2, device=device)
y = torch.randn(3, device=device)
@@ -2357,14 +2481,14 @@ class TestHessian(TestCase):
@markDynamoStrictTest
class TestJvp(TestCase):
def test_inplace_on_captures(self, device):
- x = torch.tensor([1., 2., 3.], device=device)
+ x = torch.tensor([1.0, 2.0, 3.0], device=device)
captured = torch.randn(3, device=device)
def foo(x):
captured.copy_(x)
return (x * captured).sum()
- with self.assertRaisesRegex(RuntimeError, 'mutate a captured Tensor'):
+ with self.assertRaisesRegex(RuntimeError, "mutate a captured Tensor"):
grad(foo)(x)
def test_simple(self, device):
@@ -2394,8 +2518,10 @@ class TestJvp(TestCase):
a, b = x
return a + 2 * b + 3 * y + 4 * z
- one = torch.tensor(1., device=device)
- primal_outs, tangent_outs = jvp(f, ((one, one), one, one), ((one, one), one, one))
+ one = torch.tensor(1.0, device=device)
+ primal_outs, tangent_outs = jvp(
+ f, ((one, one), one, one), ((one, one), one, one)
+ )
self.assertEqual(primal_outs, one * 10)
self.assertEqual(tangent_outs, one * 10)
@@ -2403,17 +2529,17 @@ class TestJvp(TestCase):
def f(x):
return x
- one = torch.tensor(1., device=device)
+ one = torch.tensor(1.0, device=device)
- with self.assertRaisesRegex(RuntimeError, 'Expected primals to be a tuple'):
+ with self.assertRaisesRegex(RuntimeError, "Expected primals to be a tuple"):
jvp(f, one, one)
- with self.assertRaisesRegex(RuntimeError, 'same python structure'):
+ with self.assertRaisesRegex(RuntimeError, "same python structure"):
jvp(f, ((one, one), one), (one, one))
- with self.assertRaisesRegex(RuntimeError, 'only contain Tensors'):
+ with self.assertRaisesRegex(RuntimeError, "only contain Tensors"):
jvp(f, ((one, one), 1), ((one, one), one))
- with self.assertRaisesRegex(RuntimeError, 'only contain Tensors'):
+ with self.assertRaisesRegex(RuntimeError, "only contain Tensors"):
jvp(f, ((one, one), 1), ((1, one), one))
- with self.assertRaisesRegex(RuntimeError, 'at least one Tensor'):
+ with self.assertRaisesRegex(RuntimeError, "at least one Tensor"):
jvp(f, ((),), ((),))
def test_unrelated_input(self, device):
@@ -2500,16 +2626,16 @@ class TestJvp(TestCase):
x = torch.randn(2, 3, device=device)
t = torch.randn(2, 3, device=device)
- with self.assertRaisesRegex(RuntimeError, 'be a tuple'):
+ with self.assertRaisesRegex(RuntimeError, "be a tuple"):
jvp(torch.sin, x, (t,))
- with self.assertRaisesRegex(RuntimeError, 'same python structure'):
+ with self.assertRaisesRegex(RuntimeError, "same python structure"):
jvp(torch.sin, (x,), t)
- with self.assertRaisesRegex(RuntimeError, 'same python structure'):
+ with self.assertRaisesRegex(RuntimeError, "same python structure"):
jvp(torch.sin, (x,), [t])
- with self.assertRaisesRegex(RuntimeError, 'only contain Tensors'):
- jvp(torch.sin, (1.,), (t,))
- with self.assertRaisesRegex(RuntimeError, 'only contain Tensors'):
- jvp(torch.sin, (x,), (1.,))
+ with self.assertRaisesRegex(RuntimeError, "only contain Tensors"):
+ jvp(torch.sin, (1.0,), (t,))
+ with self.assertRaisesRegex(RuntimeError, "only contain Tensors"):
+ jvp(torch.sin, (x,), (1.0,))
def test_outputs_can_any_pytree(self, device):
x = torch.randn(2, 3, device=device)
@@ -2517,13 +2643,15 @@ class TestJvp(TestCase):
for output in [None, ()]:
with self.assertRaisesRegex(
- RuntimeError, r"jvp\(f, primals, tangents\): Expected f to be a function that has non-empty output"
+ RuntimeError,
+ r"jvp\(f, primals, tangents\): Expected f to be a function that has non-empty output",
):
jvp(lambda _: output, (x,), (t,))
for output in [1, True, 12.2, "abc"]:
with self.assertRaisesRegex(
- RuntimeError, r"jvp\(f, primals, tangents\): expected f\(\*primals\) to return only tensors"
+ RuntimeError,
+ r"jvp\(f, primals, tangents\): expected f\(\*primals\) to return only tensors",
):
jvp(lambda _: output, (x,), (t,))
@@ -2546,29 +2674,29 @@ class TestJvp(TestCase):
out = jvp(composite_output, (x,), (t,))
for i in range(2):
assert isinstance(out[i], list)
- assert isinstance(out[i][0], tuple) and \
- isinstance(out[i][0][1], dict)
+ assert isinstance(out[i][0], tuple) and isinstance(out[i][0][1], dict)
def test_aux_tensor(self, device):
-
x = torch.randn(3, device=device)
t = torch.randn(3, device=device)
with self.assertRaisesRegex(
- RuntimeError, r'jvp\(f, primals, tangents\): output of function f should be a tuple'
+ RuntimeError,
+ r"jvp\(f, primals, tangents\): output of function f should be a tuple",
):
- jvp(lambda t: [t, t], (x, ), (t, ), has_aux=True)
+ jvp(lambda t: [t, t], (x,), (t,), has_aux=True)
with self.assertRaisesRegex(
- RuntimeError, r'jvp\(f, primals, tangents\): output of function f should be a tuple'
+ RuntimeError,
+ r"jvp\(f, primals, tangents\): output of function f should be a tuple",
):
- jvp(lambda t: (t, t + 2, t + 3), (x, ), (t, ), has_aux=True)
+ jvp(lambda t: (t, t + 2, t + 3), (x,), (t,), has_aux=True)
def f(z):
y = z.sin()
return y, z.cos()
- out, jvp_out, aux = jvp(f, (x, ), (t, ), has_aux=True)
+ out, jvp_out, aux = jvp(f, (x,), (t,), has_aux=True)
self.assertEqual(aux, x.cos())
self.assertEqual(out, x.sin())
self.assertEqual(jvp_out, t * x.cos())
@@ -2576,22 +2704,26 @@ class TestJvp(TestCase):
def test_aux_pytree(self, device):
def f(x):
y = x.sin()
- return y, {'a': x.cos(), 'b': [x.tan()]}
+ return y, {"a": x.cos(), "b": [x.tan()]}
x = torch.randn(3, device=device)
t = torch.randn(3, device=device)
- out, jvp_out, aux = jvp(f, (x, ), (t, ), has_aux=True)
+ out, jvp_out, aux = jvp(f, (x,), (t,), has_aux=True)
expected_out, expected_aux = f(x)
self.assertEqual(out, expected_out)
self.assertEqual(aux, expected_aux)
self.assertEqual(jvp_out, t * x.cos())
for aux in [1, 1.0, "abc"]:
- with self.assertRaisesRegex(RuntimeError, r"Expected tensors, got unsupported type"):
- _ = jvp(lambda x: (x, aux), (x, ), (t, ), has_aux=True)
- with self.assertRaisesRegex(RuntimeError, r"Expected tensors, got unsupported type"):
- _ = jvp(lambda x: (x, [x, aux]), (x, ), (t, ), has_aux=True)
+ with self.assertRaisesRegex(
+ RuntimeError, r"Expected tensors, got unsupported type"
+ ):
+ _ = jvp(lambda x: (x, aux), (x,), (t,), has_aux=True)
+ with self.assertRaisesRegex(
+ RuntimeError, r"Expected tensors, got unsupported type"
+ ):
+ _ = jvp(lambda x: (x, [x, aux]), (x,), (t,), has_aux=True)
def test_autograd_function_disables_fwd_grad(self, device):
# Sanity check. We don't really assume this anywhere so
@@ -2620,8 +2752,8 @@ class TestJvp(TestCase):
def test_disable_fwd_grad_inside(self, device):
def f(x):
with fwAD._set_fwd_grad_enabled(False):
- shift = x ** 2
- return x ** 2 - shift
+ shift = x**2
+ return x**2 - shift
x = torch.randn([], device=device)
t = torch.ones_like(x)
@@ -2633,8 +2765,8 @@ class TestJvp(TestCase):
def test_disable_fwd_grad_mixed(self, device):
def f(x):
with fwAD._set_fwd_grad_enabled(False):
- shift = x ** 2
- return x ** 2 - shift
+ shift = x**2
+ return x**2 - shift
x = torch.randn([], device=device)
t = torch.ones_like(x)
@@ -2654,7 +2786,7 @@ class TestJvp(TestCase):
@staticmethod
def backward(ctx, gx):
- x, = ctx.saved_tensors
+ (x,) = ctx.saved_tensors
t = torch.ones_like(x)
_, cos_x = jvp(torch.sin, (x,), (t,))
return gx * cos_x
@@ -2663,7 +2795,7 @@ class TestJvp(TestCase):
y = MySin.apply(x)
self.assertEqual(y, x.sin())
- gx, = torch.autograd.grad(y, x)
+ (gx,) = torch.autograd.grad(y, x)
self.assertEqual(gx, x.cos())
def test_zerotensor_vmapjvp_interaction(self, device):
@@ -2678,6 +2810,7 @@ class TestJvp(TestCase):
# Should not error
vmap(vmap(push_jvp, (0, None)))(dummy, x)
+
@markDynamoStrictTest
class TestLinearize(TestCase):
@dtypes(torch.float)
@@ -2721,6 +2854,7 @@ class TestLinearize(TestCase):
def jvp_fn(x_t):
return jvp(fn, (x_p,), (x_t,))[1]
+
expected_batched_jvp = vmap(jvp_fn)(x_t)
self.assertEqual(actual_batched_jvp, expected_batched_jvp)
@@ -2735,14 +2869,14 @@ class TestLinearize(TestCase):
z_t = make_tensor((3, 1), device=device, dtype=dtype)
def fn(arg):
- x = arg['x']
- y = arg['yz'][0]
- z = arg['yz'][1]
+ x = arg["x"]
+ y = arg["yz"][0]
+ z = arg["yz"][1]
- return {'a': x.sum(), 'b': {'c': y + z, 'd': (x * z, y.exp())}}
+ return {"a": x.sum(), "b": {"c": y + z, "d": (x * z, y.exp())}}
- inp_p = {'x': x_p, 'yz': (y_p, z_p)}
- inp_t = {'x': x_t, 'yz': (y_t, z_t)}
+ inp_p = {"x": x_p, "yz": (y_p, z_p)}
+ inp_t = {"x": x_t, "yz": (y_t, z_t)}
actual_output, jvp_fn = linearize(fn, inp_p)
actual_jvp = jvp_fn(inp_t)
@@ -2754,7 +2888,7 @@ class TestLinearize(TestCase):
@onlyCUDA
def test_linearize_errors(self):
dtype = torch.float
- device = torch.device('cpu')
+ device = torch.device("cpu")
x_p = make_tensor((3, 1), device=device, dtype=dtype)
x_t = make_tensor((3, 1), device=device, dtype=dtype)
@@ -2763,17 +2897,26 @@ class TestLinearize(TestCase):
_, jvp_fn = linearize(fn, x_p)
- with self.assertRaisesRegex(RuntimeError, "to have the same argspec as the primals"):
+ with self.assertRaisesRegex(
+ RuntimeError, "to have the same argspec as the primals"
+ ):
jvp_fn((x_t, x_t))
- with self.assertRaisesRegex(RuntimeError, "in flattened pytree doesn't match the shape"):
+ with self.assertRaisesRegex(
+ RuntimeError, "in flattened pytree doesn't match the shape"
+ ):
jvp_fn(x_t.unsqueeze(0))
- with self.assertRaisesRegex(RuntimeError, "in flattened pytree doesn't match the dtype"):
+ with self.assertRaisesRegex(
+ RuntimeError, "in flattened pytree doesn't match the dtype"
+ ):
jvp_fn(x_t.to(torch.double))
- with self.assertRaisesRegex(RuntimeError, "in flattened pytree doesn't match the device"):
- jvp_fn(x_t.to(torch.device('cuda')))
+ with self.assertRaisesRegex(
+ RuntimeError, "in flattened pytree doesn't match the device"
+ ):
+ jvp_fn(x_t.to(torch.device("cuda")))
+
# The tests here follow the cases in [Forward Grad View/inplace]
# https://github.com/pytorch/pytorch/blob/master/torch/csrc/autograd/autograd_meta.cpp#L18-L43
@@ -2786,11 +2929,13 @@ class TestVmapJvpInplaceView(TestCase):
def push_jvp(f):
def inner(x, xt, y, yt):
return jvp(f, (x, y), (xt, yt))
+
return inner
def f(x, y):
x.copy_(y)
return x
+
x = torch.randn(3, B, device=device)
xt = torch.randn(3, B, device=device)
y = torch.randn(3, B, device=device)
@@ -2814,6 +2959,7 @@ class TestVmapJvpInplaceView(TestCase):
def push_jvp(f):
def inner(x, xt, y, yt):
return jvp(f, (x, y), (xt, yt))
+
return inner
# with view, propagate from view to base
@@ -2851,6 +2997,7 @@ class TestVmapJvpInplaceView(TestCase):
def push_jvp(f):
def inner(x, xt, y, yt):
return jvp(f, (x, y), (xt, yt))
+
return inner
# Case 3: with view, propagate from base to view
@@ -2948,7 +3095,9 @@ class TestHelpers(TestCase):
@staticmethod
def backward(ctx, gy):
- wrapped = torch._functorch.autograd_function.CtxWithSavedTensors(ctx, (y,))
+ wrapped = torch._functorch.autograd_function.CtxWithSavedTensors(
+ ctx, (y,)
+ )
return gy
class B(torch.autograd.Function):
@@ -2960,14 +3109,16 @@ class TestHelpers(TestCase):
@staticmethod
def backward(ctx, gy):
- wrapped = torch._functorch.autograd_function.CtxWithSavedTensors(ctx, (y,))
+ wrapped = torch._functorch.autograd_function.CtxWithSavedTensors(
+ ctx, (y,)
+ )
return gy
out = A.apply(x)
- with self.assertRaisesRegex(RuntimeError, 'name collision'):
+ with self.assertRaisesRegex(RuntimeError, "name collision"):
out.backward()
out = B.apply(x)
- with self.assertRaisesRegex(RuntimeError, 'name collision'):
+ with self.assertRaisesRegex(RuntimeError, "name collision"):
out.backward()
def test_CtxWithSavedTensors_nesting(self, device):
@@ -3017,7 +3168,9 @@ class TestHelpers(TestCase):
def backward(ctx, gy):
# The override can be literally anything
override = (1, 2, 3)
- wrapped = torch._functorch.autograd_function.CtxWithSavedTensors(ctx, override)
+ wrapped = torch._functorch.autograd_function.CtxWithSavedTensors(
+ ctx, override
+ )
assert wrapped.saved_tensors == override
return gy
@@ -3038,13 +3191,15 @@ class TestHelpers(TestCase):
def backward(ctx, gz):
# The override can be literally anything
override = (1, 2, 3)
- wrapped = torch._functorch.autograd_function.CtxWithSavedTensors(ctx, override)
+ wrapped = torch._functorch.autograd_function.CtxWithSavedTensors(
+ ctx, override
+ )
assert wrapped.needs_input_grad[0] == ctx.needs_input_grad[0]
assert wrapped.needs_input_grad[1] == ctx.needs_input_grad[1]
- wrapped.foo = 'bar'
- assert wrapped.foo == 'bar'
- assert ctx.foo == 'bar'
+ wrapped.foo = "bar"
+ assert wrapped.foo == "bar"
+ assert ctx.foo == "bar"
return gz, gz
out = A.apply(x, y)
@@ -3105,9 +3260,10 @@ class TestComposability(TestCase):
torch.vmap(torch.sin)
# Some of these pass, some of these don't
- @parametrize('transform', [
- 'grad', 'jacrev', 'jacfwd', 'grad_and_value', 'hessian', 'functionalize'
- ])
+ @parametrize(
+ "transform",
+ ["grad", "jacrev", "jacfwd", "grad_and_value", "hessian", "functionalize"],
+ )
def test_deprecation_transforms(self, device, transform):
api = getattr(functorch, transform)
new_api = getattr(torch.func, transform)
@@ -3204,6 +3360,7 @@ class TestComposability(TestCase):
def test_make_fx_vmap(self, device):
def f(x):
return torch.sin(x)
+
inp = torch.randn(5, 3)
f = vmap(f)
fx_f = make_fx(f)(inp)
@@ -3213,6 +3370,7 @@ class TestComposability(TestCase):
def test_make_fx_jacrev(self, device):
def f(x):
return x.sin().sum()
+
inp = torch.randn(3)
f = jacrev(jacrev(f))
fx_f = make_fx(f)(inp)
@@ -3237,7 +3395,8 @@ class TestComposability(TestCase):
out = subprocess.check_output(
[sys.executable, "-W", "all", "-c", "import functorch"],
stderr=subprocess.STDOUT,
- cwd=os.path.dirname(os.path.realpath(__file__)),).decode("utf-8")
+ cwd=os.path.dirname(os.path.realpath(__file__)),
+ ).decode("utf-8")
self.assertEqual(out, "")
def test_requires_grad_inside_transform(self, device):
@@ -3315,17 +3474,29 @@ class TestComposability(TestCase):
def test_autograd_functional_jacfwd_inside_transform(self, device):
def f(x):
y = torch.autograd.functional.jacobian(
- lambda x: x.sin().sum(), x, strategy='forward-mode', vectorize=True)
+ lambda x: x.sin().sum(), x, strategy="forward-mode", vectorize=True
+ )
return y
B = 5
x = torch.randn(B, 3)
- with self.assertRaisesRegex(RuntimeError, "Batching rule not implemented for aten::_make_dual"):
+ with self.assertRaisesRegex(
+ RuntimeError, "Batching rule not implemented for aten::_make_dual"
+ ):
vmap(f)(x)
- @parametrize('transform', [
- 'vmap', 'grad', 'jacrev', 'jacfwd', 'grad_and_value', 'hessian', 'functionalize'
- ])
+ @parametrize(
+ "transform",
+ [
+ "vmap",
+ "grad",
+ "jacrev",
+ "jacfwd",
+ "grad_and_value",
+ "hessian",
+ "functionalize",
+ ],
+ )
def test_autograd_function_no_setup_context(self, device, transform):
class MySin(torch.autograd.Function):
@staticmethod
@@ -3335,18 +3506,27 @@ class TestComposability(TestCase):
@staticmethod
def backward(ctx, gy):
- x, = ctx.saved_tensors
+ (x,) = ctx.saved_tensors
return gy * x.cos()
x = torch.randn(3, device=device)
transform = getattr(functorch, transform)
- with self.assertRaisesRegex(RuntimeError, 'must override the setup_context'):
+ with self.assertRaisesRegex(RuntimeError, "must override the setup_context"):
transform(MySin.apply)(x)
# Some of these pass, some of these don't
- @parametrize('transform', [
- 'vmap', 'grad', 'jacrev', 'jacfwd', 'grad_and_value', 'hessian', 'functionalize'
- ])
+ @parametrize(
+ "transform",
+ [
+ "vmap",
+ "grad",
+ "jacrev",
+ "jacfwd",
+ "grad_and_value",
+ "hessian",
+ "functionalize",
+ ],
+ )
def test_transforms_dont_support_saved_tensor_hooks(self, device, transform):
def f(x):
return torch.sin(x).sum()
@@ -3357,7 +3537,7 @@ class TestComposability(TestCase):
x = torch.randn(3, device=device)
- if transform == 'functionalize':
+ if transform == "functionalize":
transform = functorch.experimental.functionalize
else:
transform = getattr(functorch, transform)
@@ -3413,7 +3593,7 @@ class TestComposability(TestCase):
with _ExcludeDispatchKeyGuard(DispatchKeySet(DispatchKey.Functionalize)):
gm = make_fx(functorch.functionalize(f))(x)
- self.assertTrue('sin_' not in gm.code)
+ self.assertTrue("sin_" not in gm.code)
self.assertEqual(gm(x), expected)
local_exclude_set = torch._C._dispatch_tls_local_exclude_set()
@@ -3448,7 +3628,7 @@ class TestComposability(TestCase):
@markDynamoStrictTest
class TestMakeFunctional(TestCase):
- @parametrize('disable_autograd_tracking', [True, False])
+ @parametrize("disable_autograd_tracking", [True, False])
def test_disable_autograd_tracking(self, disable_autograd_tracking):
class Foo(nn.Module):
def __init__(self):
@@ -3460,7 +3640,9 @@ class TestMakeFunctional(TestCase):
return x
mod = Foo()
- _, params = make_functional(mod, disable_autograd_tracking=disable_autograd_tracking)
+ _, params = make_functional(
+ mod, disable_autograd_tracking=disable_autograd_tracking
+ )
self.assertEqual(len(params), 2)
for param in params:
self.assertEqual(param.requires_grad, not disable_autograd_tracking)
@@ -3500,8 +3682,8 @@ class TestMakeFunctional(TestCase):
super().__init__()
self.bias = nn.Parameter(torch.randn(3))
self.linear = nn.Linear(3, 3)
- self.register_buffer('buffer', torch.randn(3))
- self.register_buffer('buffer_tied', self.buffer)
+ self.register_buffer("buffer", torch.randn(3))
+ self.register_buffer("buffer_tied", self.buffer)
def forward(self, x):
x = self.linear(x)
@@ -3525,13 +3707,13 @@ class TestMakeFunctional(TestCase):
expected = mod(x)
self.assertEqual(result, expected)
- @parametrize('disable_autograd_tracking', [True, False])
+ @parametrize("disable_autograd_tracking", [True, False])
def test_with_buffers_disable_autograd_tracking(self, disable_autograd_tracking):
class Foo(nn.Module):
def __init__(self):
super().__init__()
self.linear = nn.Linear(3, 3)
- self.register_buffer('buffer', torch.randn(3))
+ self.register_buffer("buffer", torch.randn(3))
def forward(self, x):
x = self.linear(x)
@@ -3539,19 +3721,21 @@ class TestMakeFunctional(TestCase):
return x
mod = Foo()
- _, params, buffers = make_functional_with_buffers(mod, disable_autograd_tracking=disable_autograd_tracking)
+ _, params, buffers = make_functional_with_buffers(
+ mod, disable_autograd_tracking=disable_autograd_tracking
+ )
self.assertEqual(len(params), 2)
self.assertEqual(len(buffers), 1)
for param in params:
self.assertEqual(param.requires_grad, not disable_autograd_tracking)
- @parametrize('detach_params', [True, False])
+ @parametrize("detach_params", [True, False])
def test_using_detach_functional_call(self, detach_params):
class Foo(nn.Module):
def __init__(self):
super().__init__()
self.linear = nn.Linear(3, 3)
- self.register_buffer('buffer', torch.randn(3))
+ self.register_buffer("buffer", torch.randn(3))
def forward(self, x):
x = self.linear(x)
@@ -3560,7 +3744,11 @@ class TestMakeFunctional(TestCase):
def params_dict(mod):
named_params = mod.named_parameters()
- return {k: v.detach() for k, v in named_params} if detach_params else dict(named_params)
+ return (
+ {k: v.detach() for k, v in named_params}
+ if detach_params
+ else dict(named_params)
+ )
mod = Foo()
x = torch.randn(3, 3)
@@ -3607,8 +3795,8 @@ class TestMakeFunctional(TestCase):
self.linear = nn.Linear(3, 3)
self.weight = self.linear.weight
self.bias = self.linear.bias
- self.register_buffer('buffer', torch.randn(3))
- self.register_buffer('buffer_tied', self.buffer)
+ self.register_buffer("buffer", torch.randn(3))
+ self.register_buffer("buffer_tied", self.buffer)
def forward(self, x):
x = self.linear(x)
@@ -3724,18 +3912,24 @@ class TestMakeFunctional(TestCase):
out_features = 2
models = []
- with self.assertRaisesRegex(RuntimeError, "stack_module_state:.* Expected at least one model"):
+ with self.assertRaisesRegex(
+ RuntimeError, "stack_module_state:.* Expected at least one model"
+ ):
_ = stack_module_state(models)
num_models = 3
models = [torch.nn.Linear(in_features, out_features) for i in range(num_models)]
models[1].eval()
- with self.assertRaisesRegex(RuntimeError, "stack_module_state:.* same training/eval mode."):
+ with self.assertRaisesRegex(
+ RuntimeError, "stack_module_state:.* same training/eval mode."
+ ):
_ = stack_module_state(models)
models = [torch.nn.Linear(in_features, out_features) for i in range(num_models)]
models[1] = torch.nn.Conv2d(3, 3, (3, 3))
- with self.assertRaisesRegex(RuntimeError, "stack_module_state:.* models to be of the same class"):
+ with self.assertRaisesRegex(
+ RuntimeError, "stack_module_state:.* models to be of the same class"
+ ):
_ = stack_module_state(models)
@parametrize("mechanism", ["make_functional", "functional_call"])
@@ -3784,6 +3978,7 @@ class TestMakeFunctional(TestCase):
self.assertEqual(old_state_linear_weight, new_state_linear_weight)
self.assertEqual(old_state_linear_bias, new_state_linear_bias)
+
@markDynamoStrictTest
class TestExamplesCorrectness(TestCase):
def _update_params(self, params, grads, alpha, mechanism):
@@ -3816,7 +4011,9 @@ class TestExamplesCorrectness(TestCase):
def mse_loss(x, y):
return torch.mean((x - y) ** 2)
- net, params = _get_weights_and_functional_call(ThreeLayerNet().to(device), mechanism)
+ net, params = _get_weights_and_functional_call(
+ ThreeLayerNet().to(device), mechanism
+ )
K = 20
num_tasks = 4
alpha = 0.1
@@ -3826,18 +4023,22 @@ class TestExamplesCorrectness(TestCase):
As = []
phases = []
for _ in range(outer_batch_size):
- As.append(np.random.uniform(low=0.1, high=.5))
- phases.append(np.random.uniform(low=0., high=np.pi))
+ As.append(np.random.uniform(low=0.1, high=0.5))
+ phases.append(np.random.uniform(low=0.0, high=np.pi))
def get_batch():
xs, ys = [], []
for A, phase in zip(As, phases):
- x = np.random.uniform(low=-5., high=5., size=(inner_batch_size, 1))
+ x = np.random.uniform(
+ low=-5.0, high=5.0, size=(inner_batch_size, 1)
+ )
y = A * np.sin(x + phase)
xs.append(x)
ys.append(y)
- return torch.tensor(xs, dtype=torch.float, device=device), \
- torch.tensor(ys, dtype=torch.float, device=device)
+ return torch.tensor(xs, dtype=torch.float, device=device), torch.tensor(
+ ys, dtype=torch.float, device=device
+ )
+
x1, y1 = get_batch()
x2, y2 = get_batch()
return x1, y1, x2, y2
@@ -3862,10 +4063,14 @@ class TestExamplesCorrectness(TestCase):
return mse_loss(v_f, y2)
task = sample_tasks(num_tasks, K)
- list_params = params if mechanism == "make_functional" else list(params.values())
+ list_params = (
+ params if mechanism == "make_functional" else list(params.values())
+ )
# Compute with vmap+grad
- inner_losses = vmap(partial(get_loss_for_task, True))(task[0], task[1], task[2], task[3])
+ inner_losses = vmap(partial(get_loss_for_task, True))(
+ task[0], task[1], task[2], task[3]
+ )
loss2 = sum(inner_losses) / len(inner_losses)
result_grads = torch.autograd.grad(loss2, list_params)
@@ -3892,23 +4097,30 @@ class TestExamplesCorrectness(TestCase):
# real example uses batch norm but it's numerically unstable in the first
# iteration, when near 0, and won't produce same gradients. Uses group norm instead
- net = nn.Sequential(
- nn.Conv2d(1, 64, 3),
- nn.GroupNorm(64, 64, affine=True),
- nn.ReLU(inplace=inplace_relu),
- nn.MaxPool2d(2, 2),
- nn.Conv2d(64, 64, 3),
- nn.GroupNorm(64, 64, affine=True),
- nn.ReLU(inplace=inplace_relu),
- nn.MaxPool2d(2, 2),
- nn.Conv2d(64, 64, 3),
- nn.GroupNorm(64, 64, affine=True),
- nn.ReLU(inplace=inplace_relu),
- nn.MaxPool2d(2, 2),
- nn.Flatten(),
- nn.Linear(64, n_way)).to(device).to(dtype)
-
- fnet, params, buffers = _get_weights_and_functional_call_with_buffers(net, mechanism)
+ net = (
+ nn.Sequential(
+ nn.Conv2d(1, 64, 3),
+ nn.GroupNorm(64, 64, affine=True),
+ nn.ReLU(inplace=inplace_relu),
+ nn.MaxPool2d(2, 2),
+ nn.Conv2d(64, 64, 3),
+ nn.GroupNorm(64, 64, affine=True),
+ nn.ReLU(inplace=inplace_relu),
+ nn.MaxPool2d(2, 2),
+ nn.Conv2d(64, 64, 3),
+ nn.GroupNorm(64, 64, affine=True),
+ nn.ReLU(inplace=inplace_relu),
+ nn.MaxPool2d(2, 2),
+ nn.Flatten(),
+ nn.Linear(64, n_way),
+ )
+ .to(device)
+ .to(dtype)
+ )
+
+ fnet, params, buffers = _get_weights_and_functional_call_with_buffers(
+ net, mechanism
+ )
net = (params, buffers, fnet)
def loss_for_task(net, n_inner_iter, use_transform, x_spt, y_spt, x_qry, y_qry):
@@ -3934,8 +4146,7 @@ class TestExamplesCorrectness(TestCase):
qry_logits = fnet(new_params, buffers, x_qry)
qry_loss = F.cross_entropy(qry_logits, y_qry)
- qry_acc = (qry_logits.argmax(
- dim=1) == y_qry).sum() / querysz
+ qry_acc = (qry_logits.argmax(dim=1) == y_qry).sum() / querysz
return qry_loss, qry_acc
@@ -3948,34 +4159,47 @@ class TestExamplesCorrectness(TestCase):
# compute with vmap + grad
compute_loss = partial(loss_for_task, net, n_inner_iter, True)
qry_losses, _ = vmap(compute_loss)(x_spt, y_spt, x_qry, y_qry)
- list_params = params if mechanism == "make_functional" else list(params.values())
+ list_params = (
+ params if mechanism == "make_functional" else list(params.values())
+ )
result_grads = torch.autograd.grad(qry_losses.sum(), list_params)
# compute without vmap + grad
compute_loss = partial(loss_for_task, net, n_inner_iter, False)
- losses = [compute_loss(x_spt[i], y_spt[i], x_qry[i], y_qry[i])[0]
- for i in range(num_tasks)]
+ losses = [
+ compute_loss(x_spt[i], y_spt[i], x_qry[i], y_qry[i])[0]
+ for i in range(num_tasks)
+ ]
expected_grads = torch.autograd.grad(sum(losses), list_params)
self.assertEqual(result_grads, expected_grads)
- @parametrize('mechanism', ["make_functional", "functional_call"])
- @parametrize('originally_track_running_stats', [True, False])
+ @parametrize("mechanism", ["make_functional", "functional_call"])
+ @parametrize("originally_track_running_stats", [True, False])
def test_update_batch_norm(self, device, originally_track_running_stats, mechanism):
dtype = torch.double
inplace_relu = False
classes = 5
num_batches = 2
- net = nn.Sequential(
- nn.Conv2d(64, 64, 3),
- nn.BatchNorm2d(64, affine=True, track_running_stats=originally_track_running_stats),
- nn.ReLU(inplace=inplace_relu),
- nn.Flatten(),
- nn.Linear(43264, classes)).to(device).to(dtype)
+ net = (
+ nn.Sequential(
+ nn.Conv2d(64, 64, 3),
+ nn.BatchNorm2d(
+ 64, affine=True, track_running_stats=originally_track_running_stats
+ ),
+ nn.ReLU(inplace=inplace_relu),
+ nn.Flatten(),
+ nn.Linear(43264, classes),
+ )
+ .to(device)
+ .to(dtype)
+ )
replace_all_batch_norm_modules_(net)
transformed_net = net
- fnet, params, buffers = _get_weights_and_functional_call_with_buffers(transformed_net, mechanism)
+ fnet, params, buffers = _get_weights_and_functional_call_with_buffers(
+ transformed_net, mechanism
+ )
criterion = nn.CrossEntropyLoss()
def compute_loss(x, y, params, buffers):
@@ -3986,10 +4210,14 @@ class TestExamplesCorrectness(TestCase):
y = torch.randint(0, classes, (num_batches, 1), device=device)
# compute some per sample grads with vmap + grad
- result_grads = vmap(grad(compute_loss, argnums=2), in_dims=(0, 0, None, None))(x, y, params, buffers)
+ result_grads = vmap(grad(compute_loss, argnums=2), in_dims=(0, 0, None, None))(
+ x, y, params, buffers
+ )
# compute some per sample grads without vmap + grad
- fnet, params, buffers = _get_weights_and_functional_call_with_buffers(transformed_net, mechanism)
+ fnet, params, buffers = _get_weights_and_functional_call_with_buffers(
+ transformed_net, mechanism
+ )
flat_params, spec = tree_flatten(params)
expected_grads = [
torch.autograd.grad(compute_loss(x[i], y[i], params, buffers), flat_params)
@@ -4000,29 +4228,29 @@ class TestExamplesCorrectness(TestCase):
self.assertEqual(result_grads, expected_grads)
- @parametrize('jac', ['jacfwd', 'jacrev'])
+ @parametrize("jac", ["jacfwd", "jacrev"])
def test_lennard_jones_batched_jac(self, device, jac):
sigma = 0.5
- epsilon = 4.
+ epsilon = 4.0
jac = getattr(functorch, jac)
def lennard_jones(r):
- return epsilon * ((sigma / r)**12 - (sigma / r)**6)
+ return epsilon * ((sigma / r) ** 12 - (sigma / r) ** 6)
def lennard_jones_force(r):
"""Get magnitude of LJ force"""
- return \
- -epsilon * ((-12 * sigma**12 / r**13) + (6 * sigma**6 / r**7))
+ return -epsilon * (
+ (-12 * sigma**12 / r**13) + (6 * sigma**6 / r**7)
+ )
r = torch.linspace(0.5, 2 * sigma, steps=100, requires_grad=True, device=device)
drs = torch.outer(r, torch.tensor([1.0, 0, 0], device=device))
norms = torch.norm(drs, dim=1).reshape(-1, 1)
- training_energies = \
- torch.stack(list(map(lennard_jones, norms))).reshape(-1, 1)
+ training_energies = torch.stack(list(map(lennard_jones, norms))).reshape(-1, 1)
training_forces = torch.stack(
- [force * dr
- for force, dr in zip(map(lennard_jones_force, norms), drs)])
+ [force * dr for force, dr in zip(map(lennard_jones_force, norms), drs)]
+ )
model = nn.Sequential(
nn.Linear(1, 16),
@@ -4033,7 +4261,7 @@ class TestExamplesCorrectness(TestCase):
nn.Tanh(),
nn.Linear(16, 16),
nn.Tanh(),
- nn.Linear(16, 1)
+ nn.Linear(16, 1),
).to(device)
def make_prediction(model, drs, use_functorch):
@@ -4047,15 +4275,18 @@ class TestExamplesCorrectness(TestCase):
forces = []
for r, dr in zip(norms, drs):
network_deriv = torch.autograd.functional.jacobian(
- model, r, create_graph=True)
+ model, r, create_graph=True
+ )
force = -network_deriv * dr / r
forces.append(force)
forces = torch.cat(forces)
return energies, forces
def loss_fn(energies, forces, predicted_energies, predicted_forces):
- return F.mse_loss(energies, predicted_energies) + \
- 0.01 * F.mse_loss(forces, predicted_forces) / 3
+ return (
+ F.mse_loss(energies, predicted_energies)
+ + 0.01 * F.mse_loss(forces, predicted_forces) / 3
+ )
energies, forces = make_prediction(model, drs, use_functorch=True)
loss = loss_fn(training_energies, training_forces, energies, forces)
@@ -4067,11 +4298,11 @@ class TestExamplesCorrectness(TestCase):
self.assertEqual(result, expected)
- @parametrize('mechanism', ["make_functional", "functional_call"])
+ @parametrize("mechanism", ["make_functional", "functional_call"])
def test_ensemble_regression(self, device, mechanism):
- def make_spirals(n_samples, noise_std=0., rotations=1.):
+ def make_spirals(n_samples, noise_std=0.0, rotations=1.0):
ts = torch.linspace(0, 1, n_samples)
- rs = ts ** 0.5
+ rs = ts**0.5
thetas = rs * rotations * 2 * math.pi
signs = torch.randint(0, 2, (n_samples,)) * 2 - 1
labels = (signs > 0).to(torch.long)
@@ -4101,7 +4332,9 @@ class TestExamplesCorrectness(TestCase):
loss_fn = nn.NLLLoss()
- func_model, weights = _get_weights_and_functional_call(MLPClassifier().to(device), mechanism)
+ func_model, weights = _get_weights_and_functional_call(
+ MLPClassifier().to(device), mechanism
+ )
def train_step_fn(use_transform, weights, batch, targets, lr=0.2):
def compute_loss(weights, batch, targets):
@@ -4110,7 +4343,9 @@ class TestExamplesCorrectness(TestCase):
return loss
if use_transform:
- grad_weights, loss = grad_and_value(compute_loss)(weights, batch, targets)
+ grad_weights, loss = grad_and_value(compute_loss)(
+ weights, batch, targets
+ )
else:
loss = compute_loss(weights, batch, targets)
flat_weights, spec = tree_flatten(weights)
@@ -4131,37 +4366,54 @@ class TestExamplesCorrectness(TestCase):
return stack_module_state(models)[0]
def slice_weights(batched_weights, index):
- return tree_map(lambda weight: weight[index].detach().requires_grad_(), batched_weights)
+ return tree_map(
+ lambda weight: weight[index].detach().requires_grad_(), batched_weights
+ )
batched_weights = init_fn(num_models=2)
- parallel_train_step_fn = vmap(partial(train_step_fn, True), in_dims=(0, None, None))
+ parallel_train_step_fn = vmap(
+ partial(train_step_fn, True), in_dims=(0, None, None)
+ )
- result_loss, result_weights = unpack(parallel_train_step_fn(batched_weights, points, labels))
+ result_loss, result_weights = unpack(
+ parallel_train_step_fn(batched_weights, points, labels)
+ )
- loss0, weights0 = unpack(train_step_fn(False, slice_weights(batched_weights, 0), points, labels))
- loss1, weights1 = unpack(train_step_fn(False, slice_weights(batched_weights, 1), points, labels))
+ loss0, weights0 = unpack(
+ train_step_fn(False, slice_weights(batched_weights, 0), points, labels)
+ )
+ loss1, weights1 = unpack(
+ train_step_fn(False, slice_weights(batched_weights, 1), points, labels)
+ )
expected_loss = torch.stack([loss0, loss1])
weights0, spec0 = tree_flatten(weights0)
weights1, spec1 = tree_flatten(weights1)
assert spec0 == spec1
- expected_weights = tuple(torch.stack([w0, w1]) for w0, w1 in zip(weights0, weights1))
+ expected_weights = tuple(
+ torch.stack([w0, w1]) for w0, w1 in zip(weights0, weights1)
+ )
expected_weights = tree_unflatten(expected_weights, spec0)
self.assertEqual(result_loss, expected_loss)
self.assertEqual(result_weights, expected_weights)
- @parametrize("dropout_layer", [
- subtest(nn.Dropout, 'Dropout'),
- subtest(nn.AlphaDropout, 'AlphaDropout'),
- subtest(nn.FeatureAlphaDropout, 'FeatureAlphaDropout'),
- ])
- @parametrize('mechanism', ["make_functional", "functional_call"])
+ @parametrize(
+ "dropout_layer",
+ [
+ subtest(nn.Dropout, "Dropout"),
+ subtest(nn.AlphaDropout, "AlphaDropout"),
+ subtest(nn.FeatureAlphaDropout, "FeatureAlphaDropout"),
+ ],
+ )
+ @parametrize("mechanism", ["make_functional", "functional_call"])
def test_find_learning_rate_ensembling(self, device, dropout_layer, mechanism):
# This example mimics what a user might do when trying to find the optimal learning rate. They would
# want to run a bunch of models with the same behavior (including the same dropout!) and have them
# each run with different learning rates. Specifically, this is an example of using same randomness with vmap
- points, labels = torch.randn(100, 2, 2, 2, 2, device=device), torch.randint(0, 2, (100,), device=device)
+ points, labels = torch.randn(100, 2, 2, 2, 2, device=device), torch.randint(
+ 0, 2, (100,), device=device
+ )
class MLPClassifier(nn.Module):
def __init__(self, hidden_dim=32, n_classes=2):
@@ -4184,7 +4436,9 @@ class TestExamplesCorrectness(TestCase):
loss_fn = nn.NLLLoss()
- func_model, weights = _get_weights_and_functional_call(MLPClassifier().to(device), mechanism)
+ func_model, weights = _get_weights_and_functional_call(
+ MLPClassifier().to(device), mechanism
+ )
def train_step_fn(weights, batch, targets, lr):
def compute_loss(weights, batch, targets):
@@ -4204,31 +4458,42 @@ class TestExamplesCorrectness(TestCase):
def init_fn(num_models):
og_model = MLPClassifier().to(device)
- models = tuple(copy.deepcopy(og_model) for _ in range(num_models)) # have same initialization
+ models = tuple(
+ copy.deepcopy(og_model) for _ in range(num_models)
+ ) # have same initialization
if mechanism == "make_functional":
return combine_state_for_ensemble(models)[1]
else:
return stack_module_state(models)[0]
batched_weights = init_fn(num_models=2)
- parallel_train_step_fn = vmap(train_step_fn, in_dims=(0, None, None, 0), randomness="same")
+ parallel_train_step_fn = vmap(
+ train_step_fn, in_dims=(0, None, None, 0), randomness="same"
+ )
lrs = torch.tensor([0.2, 0.4], device=device)
- result_loss, result_weights = unpack(parallel_train_step_fn(batched_weights, points, labels, lrs))
+ result_loss, result_weights = unpack(
+ parallel_train_step_fn(batched_weights, points, labels, lrs)
+ )
self.assertEqual(result_loss[0], result_loss[1])
- self.assertNotEqual(tuple(weight[0] for weight in result_weights),
- tuple(weight[1] for weight in result_weights))
+ self.assertNotEqual(
+ tuple(weight[0] for weight in result_weights),
+ tuple(weight[1] for weight in result_weights),
+ )
@with_tf32_off # https://github.com/pytorch/pytorch/issues/86798
@unittest.skipIf(not USE_TORCHVISION, "test requires torchvision")
- @parametrize('mechanism', ["make_functional", "functional_call"])
+ @parametrize("mechanism", ["make_functional", "functional_call"])
def test_resnet18_per_sample_grads(self, device, mechanism):
import torchvision.models as models
- model = models.__dict__['resnet18'](
+
+ model = models.__dict__["resnet18"](
pretrained=False, norm_layer=(lambda c: nn.GroupNorm(min(32, c), c))
).to(device)
- criterion = nn.CrossEntropyLoss(reduction='sum') # avoid cross batch reductions for for loop comparison
+ criterion = nn.CrossEntropyLoss(
+ reduction="sum"
+ ) # avoid cross batch reductions for for loop comparison
func_model, weights = _get_weights_and_functional_call(model, mechanism)
@@ -4243,34 +4508,40 @@ class TestExamplesCorrectness(TestCase):
images = torch.randn(batch_size, 3, 32, 32, device=device)
targets = torch.randint(0, 10, (batch_size,), device=device)
- result_grads = vmap(grad(compute_loss), in_dims=(None, 0, 0))(weights, images, targets)
+ result_grads = vmap(grad(compute_loss), in_dims=(None, 0, 0))(
+ weights, images, targets
+ )
flat_weights, spec = tree_flatten(weights)
expected_grads = [
- torch.autograd.grad(compute_loss(weights, images[i], targets[i]), flat_weights)
+ torch.autograd.grad(
+ compute_loss(weights, images[i], targets[i]), flat_weights
+ )
for i in range(batch_size)
]
expected_grads = [torch.stack(shards) for shards in zip(*expected_grads)]
expected_grads = tree_unflatten(expected_grads, spec)
- self.assertEqual(result_grads, expected_grads, atol=1e-3, rtol=1.)
+ self.assertEqual(result_grads, expected_grads, atol=1e-3, rtol=1.0)
+
def normalize_devices(fx_g):
for node in fx_g.graph.nodes:
args = list(node.args)
for idx, arg in enumerate(args):
if isinstance(arg, torch.device):
- args[idx] = 'cpu'
+ args[idx] = "cpu"
node.args = tuple(args)
new_kwargs = {}
for k, v in node.kwargs.items():
if isinstance(v, torch.device):
- v = 'cpu'
+ v = "cpu"
new_kwargs[k] = v
node.kwargs = new_kwargs
fx_g.recompile()
return fx_g
+
@markDynamoStrictTest
class TestFunctionalize(TestCase):
def _check_functionalize_correctness(self, f, inpt, *, skip_vmap=False):
@@ -4287,7 +4558,7 @@ class TestFunctionalize(TestCase):
# isn't being used with vmap
# That's because {view}_copy ops don't have batching rules yet
# (although we should probably fix that)
- actual_outputs_view_copy = functionalize(f, remove='mutations_and_views')(inpt3)
+ actual_outputs_view_copy = functionalize(f, remove="mutations_and_views")(inpt3)
# Check that outputs are the same
self.assertEqual(actual_outputs, expected_outputs)
self.assertEqual(actual_outputs_view_copy, expected_outputs)
@@ -4297,26 +4568,25 @@ class TestFunctionalize(TestCase):
self.assertEqual(inpt1, inpt3)
def test_simple_view(self, device):
-
def f(x: torch.Tensor) -> torch.Tensor:
tmp = torch.ones(2, device=device)
y = x.view(4, 2)
y.add_(tmp)
return x
+
self._check_functionalize_correctness(f, torch.zeros(4, 2, device=device))
def test_multioutput_view(self, device):
-
def f(x: torch.Tensor) -> torch.Tensor:
tmp = torch.ones(2, device=device)
y1, y2 = x.split(2)
y1_view = y1.diagonal()
y1_view.add_(tmp)
return x
+
self._check_functionalize_correctness(f, torch.zeros(4, 2, device=device))
def test_inplace_view(self, device):
-
def f(x: torch.Tensor) -> torch.Tensor:
tmp = torch.ones(4, device=device)
y = x + x
@@ -4324,11 +4594,13 @@ class TestFunctionalize(TestCase):
z = y2[0]
z.add_(tmp)
return y
- self._check_functionalize_correctness(f, torch.zeros(4, 2, device=device), skip_vmap=True)
+
+ self._check_functionalize_correctness(
+ f, torch.zeros(4, 2, device=device), skip_vmap=True
+ )
# See https://github.com/pytorch/functorch/issues/780
def test_linear(self, device):
-
def f(x, y, z) -> torch.Tensor:
return torch._C._nn.linear(x, y, z)
@@ -4341,7 +4613,6 @@ class TestFunctionalize(TestCase):
self.assertEqual(out_expected, out_actual)
def test_multioutput_inplace_slice_view(self, device):
-
def f(x: torch.Tensor) -> torch.Tensor:
tmp = torch.ones(2, 2, device=device)
y = x.view(8)
@@ -4352,13 +4623,15 @@ class TestFunctionalize(TestCase):
z2, z3 = z1.split(2)
z2.add_(tmp)
return x
+
# See Note [Fix vmap slice_scatter]
- self._check_functionalize_correctness(f, torch.zeros(4, 2, device=device), skip_vmap=True)
+ self._check_functionalize_correctness(
+ f, torch.zeros(4, 2, device=device), skip_vmap=True
+ )
# Ensure functionalize works with List[Optional[Tensor]] arguments.
# See the fix / discussion at https://github.com/pytorch/pytorch/pull/76085
def test_functionalize_opt_tensor_list(self, device):
-
def f(x: torch.Tensor, indices: torch.Tensor) -> torch.Tensor:
return x[indices]
@@ -4368,18 +4641,20 @@ class TestFunctionalize(TestCase):
out2 = functionalize(f)(inpta, inptb)
self.assertEqual(out1, out2)
out = make_fx(functionalize(f))(inpta, inptb)
- self.assertExpectedInline((out.code), """\
+ self.assertExpectedInline(
+ (out.code),
+ """\
def forward(self, x_1, indices_1) -> torch.Tensor:
index = torch.ops.aten.index.Tensor(x_1, [indices_1]); x_1 = indices_1 = None
return index
- """)
+ """,
+ )
# Ensure grad(functionalize(f)) works
def test_functionalize_grad(self, device):
-
def f(x: torch.Tensor) -> torch.Tensor:
tmp = torch.ones(2, device=device)
y = x + x
@@ -4394,9 +4669,8 @@ def forward(self, x_1, indices_1) -> torch.Tensor:
self.assertEqual(out1, out2)
self.assertEqual(inpt1, inpt2)
- @unittest.skipIf(IS_FBCODE, 'fails in fbcode')
+ @unittest.skipIf(IS_FBCODE, "fails in fbcode")
def test_vmap_functionalize_jvp(self, device):
-
def f(x: torch.Tensor) -> torch.Tensor:
y = x + x
z = y.view(-1)
@@ -4404,7 +4678,11 @@ def forward(self, x_1, indices_1) -> torch.Tensor:
return z
def jvp_wrapper(x, t):
- return jvp(f, (x,), (t,),)
+ return jvp(
+ f,
+ (x,),
+ (t,),
+ )
x = torch.randn(2, 3, device=device)
t = torch.randn(2, 3, device=device)
@@ -4416,7 +4694,6 @@ def forward(self, x_1, indices_1) -> torch.Tensor:
# TODO: move this test into test_fake_tensor.py
# once functionalize() can be used in core tests.
def test_functionalize_fake_tensors(self, device):
-
def f(x: torch.Tensor) -> torch.Tensor:
y = x.detach()
return y + y
@@ -4427,18 +4704,20 @@ def forward(self, x_1, indices_1) -> torch.Tensor:
self.assertEqual(x.size(), (2,))
def test_functionalize_fx_simple(self, device):
-
def f(x: torch.Tensor) -> torch.Tensor:
tmp = torch.ones(2, device=device)
y = x.view(4, 2)
y.add_(tmp)
return x
+
# There's a copy_ in the graph, because the input (x) was mutated.
# To preserve semantics, functionalize() needs to propagate the mutation.
- fn = make_fx(functionalize(f, remove='mutations_and_views'))
+ fn = make_fx(functionalize(f, remove="mutations_and_views"))
out = fn(torch.zeros(4, 2, device=device))
out = normalize_devices(out)
- self.assertExpectedInline((out.code), """\
+ self.assertExpectedInline(
+ (out.code),
+ """\
@@ -4450,26 +4729,29 @@ def forward(self, x_1) -> torch.Tensor:
view_copy_2 = torch.ops.aten.view_copy.default(view_copy_1, [4, 2])
copy_ = torch.ops.aten.copy_.default(x_1, view_copy_1); x_1 = None
return view_copy_1
- """)
+ """,
+ )
def test_functionalize_fx_transpose_simple(self, device):
-
def f(x: torch.Tensor) -> torch.Tensor:
return x.transpose(1, 0)
- fn = make_fx(functionalize(f, remove='mutations_and_views'))
+
+ fn = make_fx(functionalize(f, remove="mutations_and_views"))
out = fn(torch.zeros(4, 2, device=device))
out = normalize_devices(out)
- self.assertExpectedInline(out.code, """\
+ self.assertExpectedInline(
+ out.code,
+ """\
def forward(self, x_1) -> torch.Tensor:
transpose_copy = torch.ops.aten.transpose_copy.int(x_1, 1, 0); x_1 = None
return transpose_copy
- """)
+ """,
+ )
def test_functionalize_fx_out_op(self, device):
-
def f(inpt: torch.Tensor) -> torch.Tensor:
out = torch.empty((), dtype=torch.float32)
torch.add(inpt, inpt, out=out)
@@ -4477,10 +4759,12 @@ def forward(self, x_1) -> torch.Tensor:
out_view.add_(1)
return out
- fn = make_fx(functionalize(f, remove='mutations_and_views'))
+ fn = make_fx(functionalize(f, remove="mutations_and_views"))
out = fn(torch.arange(4, device=device, dtype=torch.float32))
out = normalize_devices(out)
- self.assertExpectedInline(out.code, """\
+ self.assertExpectedInline(
+ out.code,
+ """\
@@ -4493,10 +4777,10 @@ def forward(self, inpt_1) -> torch.Tensor:
view_copy_2 = torch.ops.aten.view_copy.default(add_1, [4]); add_1 = None
view_copy_3 = torch.ops.aten.view_copy.default(view_copy_2, [4])
return view_copy_2
- """)
+ """,
+ )
def test_functionalize_fx_multi_out_op(self, device):
-
def f(inpt: torch.Tensor) -> torch.Tensor:
mins = torch.empty(4, dtype=torch.float32)
maxs = torch.empty(2, 2, dtype=torch.float32)
@@ -4505,10 +4789,12 @@ def forward(self, inpt_1) -> torch.Tensor:
torch.aminmax(inpt_view, dim=0, out=(mins, maxs_view))
return (maxs, mins)
- fn = make_fx(functionalize(f, remove='mutations_and_views'))
+ fn = make_fx(functionalize(f, remove="mutations_and_views"))
out = fn(torch.arange(8, device=device, dtype=torch.float32))
out = normalize_devices(out)
- self.assertExpectedInline(out.code, """\
+ self.assertExpectedInline(
+ out.code,
+ """\
@@ -4523,10 +4809,10 @@ def forward(self, inpt_1) -> torch.Tensor:
view_copy_2 = torch.ops.aten.view_copy.default(getitem_1, [2, 2]); getitem_1 = None
view_copy_3 = torch.ops.aten.view_copy.default(view_copy_2, [4])
return (view_copy_2, getitem)
- """)
+ """,
+ )
def test_functionalize_fx_reapply_views_simple(self, device):
-
def f(x: torch.Tensor) -> torch.Tensor:
tmp = torch.ones(2, device=device)
y = x.view(4, 2)
@@ -4535,7 +4821,9 @@ def forward(self, inpt_1) -> torch.Tensor:
out = make_fx(functionalize(f))(torch.zeros(4, 2, device=device))
out = normalize_devices(out)
- self.assertExpectedInline(out.code, """\
+ self.assertExpectedInline(
+ out.code,
+ """\
@@ -4547,10 +4835,10 @@ def forward(self, x_1) -> torch.Tensor:
view_2 = torch.ops.aten.view.default(view_1, [4, 2])
copy_ = torch.ops.aten.copy_.default(x_1, view_1); x_1 = None
return view_1
- """)
+ """,
+ )
def test_functionalize_nonfunctional_output(self, device):
-
global_out = torch.ones(2, device=device)
def f() -> torch.Tensor:
@@ -4558,17 +4846,19 @@ def forward(self, x_1) -> torch.Tensor:
out = make_fx(functionalize(f))()
out = normalize_devices(out)
- self.assertExpectedInline(out.code, """\
+ self.assertExpectedInline(
+ out.code,
+ """\
def forward(self) -> torch.Tensor:
_tensor_constant0 = self._tensor_constant0
return _tensor_constant0
- """)
+ """,
+ )
def test_functionalize_optional_tensorlist1(self, device):
-
def f(a, b) -> torch.Tensor:
# at::index has OptionalTensorList arguments,
# test that here
@@ -4578,18 +4868,20 @@ def forward(self) -> torch.Tensor:
b = torch.ones(2, dtype=torch.long)
out = make_fx(functionalize(f))(a, b)
out = normalize_devices(out)
- self.assertExpectedInline(out.code, """\
+ self.assertExpectedInline(
+ out.code,
+ """\
def forward(self, a_1, b_1) -> torch.Tensor:
index = torch.ops.aten.index.Tensor(a_1, [b_1]); a_1 = b_1 = None
return index
- """)
+ """,
+ )
- @unittest.skipIf(IS_FBCODE, 'fails in fbcode')
+ @unittest.skipIf(IS_FBCODE, "fails in fbcode")
def test_functionalize_optional_tensorlist2(self, device):
-
def f(a, b) -> torch.Tensor:
# See https://github.com/pytorch/pytorch/pull/77846
return torch.ops.aten.index(a, b)
@@ -4597,7 +4889,9 @@ def forward(self, a_1, b_1) -> torch.Tensor:
a = torch.arange(4).reshape(2, 2)
b = torch.ones(2, dtype=torch.long)
out = make_fx(functionalize(f))(a, b)
- self.assertExpectedInline(out.code, """\
+ self.assertExpectedInline(
+ out.code,
+ """\
@@ -4607,7 +4901,8 @@ def forward(self, a_1, b_1) -> torch.Tensor:
getitem_1 = unbind[1]; unbind = None
index = torch.ops.aten.index.Tensor(a_1, [getitem, getitem_1]); a_1 = getitem = getitem_1 = None
return index
- """)
+ """,
+ )
def test_resize_program_inputs(self, device):
def f(x):
@@ -4617,7 +4912,9 @@ def forward(self, a_1, b_1) -> torch.Tensor:
fn = make_fx(functionalize(f))
out = fn(torch.zeros(0, device=device))
out = normalize_devices(out)
- self.assertExpectedInline((out.code), """\
+ self.assertExpectedInline(
+ (out.code),
+ """\
@@ -4627,7 +4924,8 @@ def forward(self, x_1):
resize_ = torch.ops.aten.resize_.default(x_1, [10]); x_1 = None
copy_ = torch.ops.aten.copy_.default(resize_, fill); resize_ = fill = None
return None
- """)
+ """,
+ )
def construct_sum_pyop():
@@ -4683,11 +4981,12 @@ def construct_sum_pyop():
return mysum
+
sum_pyop = construct_sum_pyop()
+
@markDynamoStrictTest
class TestHigherOrderOperatorInteraction(TestCase):
-
def test_basic_sum(self, device):
x = torch.randn(2, 3, 4, device=device)
result = sum_pyop(x, 1)
@@ -4734,8 +5033,8 @@ class TestHigherOrderOperatorInteraction(TestCase):
def test_no_grad_inside_grad(self, device):
def f(x):
with torch.no_grad():
- shift = sum_pyop(x ** 2, 0)
- return sum_pyop(x ** 2, 0) - shift
+ shift = sum_pyop(x**2, 0)
+ return sum_pyop(x**2, 0) - shift
x = torch.randn(3, device=device)
y = grad(f)(x)
@@ -4745,22 +5044,23 @@ class TestHigherOrderOperatorInteraction(TestCase):
x = torch.randn(3, device=device, requires_grad=True)
y = grad(f)(x)
- z, = torch.autograd.grad(y.sum(), x)
+ (z,) = torch.autograd.grad(y.sum(), x)
self.assertEqual(z, torch.full_like(x, 2))
def test_grad_name_wrapping(self, device):
-
def my_fn(x):
return x.sum()
+
grad_fn = grad(my_fn)
self.assertEqual(grad_fn.__name__, "my_fn")
def test_functional_call_multiple_dicts(self):
mod = nn.Linear(1, 1)
x = torch.randn((1, 1))
- params = ({'weight': torch.zeros(1, 1)}, {'bias': torch.ones(1)})
+ params = ({"weight": torch.zeros(1, 1)}, {"bias": torch.ones(1)})
functional_call(mod, params, x)
+
def traceable(f):
f = allow_in_graph(f)
@@ -4776,9 +5076,9 @@ class TestCompileTransforms(TestCase):
@skipIfRocm(msg="test leaks memory on ROCm")
# torch.compile is not supported on Windows
# Triton only supports GPU with SM70 or later.
- @expectedFailureIf((IS_ARM64 and not IS_MACOS) or
- IS_WINDOWS or
- (TEST_CUDA and not SM70OrLater))
+ @expectedFailureIf(
+ (IS_ARM64 and not IS_MACOS) or IS_WINDOWS or (TEST_CUDA and not SM70OrLater)
+ )
def test_compile_vmap_hessian(self, device):
# The model and inputs are a smaller version
# of code at benchmark repo:
@@ -4790,7 +5090,10 @@ class TestCompileTransforms(TestCase):
model = nn.Sequential(nn.Linear(D, D), nn.ReLU()).to(device)
- params_and_buffers = (dict(model.named_parameters()), dict(model.named_buffers()))
+ params_and_buffers = (
+ dict(model.named_parameters()),
+ dict(model.named_buffers()),
+ )
def predict(params_and_buffers, x):
out = torch.func.functional_call(model, params_and_buffers, x)
@@ -4818,17 +5121,18 @@ class TestCompileTransforms(TestCase):
return functorch.grad(torch.mul)(x, y)
actual = wrapper_fn(x, y)
- expected = torch.compile(wrapper_fn, backend='eager', fullgraph=True)(x, y)
- fn = torch.compile(wrapper_fn, backend='eager', fullgraph=True)
+ expected = torch.compile(wrapper_fn, backend="eager", fullgraph=True)(x, y)
+ fn = torch.compile(wrapper_fn, backend="eager", fullgraph=True)
self.assertEqual(actual, expected)
def wrapper_fn(x, y):
return functorch.grad(torch.mul, argnums=(0, 1))(x, y)
actual = wrapper_fn(x, y)
- expected = torch.compile(wrapper_fn, backend='eager', fullgraph=True)(x, y)
+ expected = torch.compile(wrapper_fn, backend="eager", fullgraph=True)(x, y)
self.assertEqual(actual, expected)
+
only_for = ("cpu", "cuda")
instantiate_device_type_tests(
TestGradTransform,
@@ -4909,5 +5213,5 @@ instantiate_device_type_tests(
only_for=only_for,
)
-if __name__ == '__main__':
+if __name__ == "__main__":
run_tests()
diff --git a/test/functorch/test_logging.py b/test/functorch/test_logging.py
index 042e8607cb..658750d323 100644
--- a/test/functorch/test_logging.py
+++ b/test/functorch/test_logging.py
@@ -1,25 +1,23 @@
# Owner(s): ["module: dynamo"]
+import logging
+
import torch
-from torch.testing._internal.common_utils import run_tests
-from torch.testing._internal.logging_utils import LoggingTestCase, make_logging_test
from torch._functorch.aot_autograd import aot_function
from torch._functorch.compilers import nop
-import logging
+from torch.testing._internal.common_utils import run_tests
+from torch.testing._internal.logging_utils import LoggingTestCase, make_logging_test
-class TestAOTLogging(LoggingTestCase):
+class TestAOTLogging(LoggingTestCase):
@make_logging_test(aot=logging.DEBUG)
def test_logging(self, records):
def f(x):
return torch.sin(x)
- compiled_f = aot_function(
- f,
- fw_compiler=nop,
- bw_compiler=nop
- )
+
+ compiled_f = aot_function(f, fw_compiler=nop, bw_compiler=nop)
compiled_f(torch.randn(3))
self.assertGreater(len(records), 0)
-if __name__ == '__main__':
+if __name__ == "__main__":
run_tests()
diff --git a/test/functorch/test_memory_efficient_fusion.py b/test/functorch/test_memory_efficient_fusion.py
index ff16252c2d..44cd5e5e5e 100644
--- a/test/functorch/test_memory_efficient_fusion.py
+++ b/test/functorch/test_memory_efficient_fusion.py
@@ -1,17 +1,18 @@
# Owner(s): ["module: functorch"]
+import inspect
+import random
+import unittest
+from typing import Callable
+
import torch
-import torch.nn as nn
import torch.fx as fx
+import torch.nn as nn
from functorch import make_fx
-from torch.nn import functional as F
from functorch.compile import memory_efficient_fusion
from torch._functorch.compile_utils import fx_graph_cse
-from torch.testing._internal.common_utils import TestCase, run_tests
-import inspect
-import random
-from typing import Callable
-import unittest
+from torch.nn import functional as F
+from torch.testing._internal.common_utils import run_tests, TestCase
HAS_CUDA = torch.cuda.is_available()
@@ -101,7 +102,10 @@ def run_and_compare_activation(self, fn, inps):
if isinstance(fn, nn.Module):
fn = fn.to(device=device, dtype=dtype)
- ref_args = [torch.randn(shape, device=device, dtype=dtype, requires_grad=True) for shape in inps]
+ ref_args = [
+ torch.randn(shape, device=device, dtype=dtype, requires_grad=True)
+ for shape in inps
+ ]
res_args = [i.clone().detach().requires_grad_(True) for i in ref_args]
ref = fn(*ref_args)
@@ -143,7 +147,7 @@ class TestMemoryEfficientOpAuthoring(TestCase):
mean = torch.mean(x, dim, keepdim=True)
centered = x - mean
var = torch.sum(centered * centered, dim, keepdim=True) / x.size(-1)
- rvar = 1. / torch.sqrt(var + eps)
+ rvar = 1.0 / torch.sqrt(var + eps)
normed = (x - mean) * rvar
return normed * weight + bias
@@ -165,13 +169,16 @@ class TestMemoryEfficientOpAuthoring(TestCase):
def forward(self, hidden_states):
# layer norm should always be calculated in float32
variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True)
- hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
+ hidden_states = hidden_states * torch.rsqrt(
+ variance + self.variance_epsilon
+ )
# convert into half-precision if necessary
if self.weight.dtype in [torch.float16, torch.bfloat16]:
hidden_states = hidden_states.to(self.weight.dtype)
return self.weight * hidden_states
+
bs = 256
seq = 256
hidden = 1024
@@ -200,31 +207,36 @@ def check(f, t, delta, check_val=True, graph_input=False):
old_num_nodes = len(fx_g.graph.nodes)
new_num_nodes = len(new_graph.nodes)
if delta == -1:
- assert old_num_nodes >= new_num_nodes, (
- f"number of nodes increased {old_num_nodes}, {new_num_nodes}")
+ assert (
+ old_num_nodes >= new_num_nodes
+ ), f"number of nodes increased {old_num_nodes}, {new_num_nodes}"
else:
- assert old_num_nodes == new_num_nodes + delta, (
- f"number of nodes not the same {old_num_nodes - delta}, {new_num_nodes}\n {fx_g.graph} \n {new_graph}")
+ assert (
+ old_num_nodes == new_num_nodes + delta
+ ), f"number of nodes not the same {old_num_nodes - delta}, {new_num_nodes}\n {fx_g.graph} \n {new_graph}"
# a second pass should not reduce more nodes
pass_2_graph = fx_graph_cse(new_graph)
pass_2_num_nodes = len(pass_2_graph.nodes)
- assert pass_2_num_nodes == new_num_nodes, (
- f"second pass graph has less node {pass_2_num_nodes}, {new_num_nodes}\n {new_graph} \n {pass_2_graph}")
+ assert (
+ pass_2_num_nodes == new_num_nodes
+ ), f"second pass graph has less node {pass_2_num_nodes}, {new_num_nodes}\n {new_graph} \n {pass_2_graph}"
# check correctness
if check_val:
true_result = fx_g(t)
our_result = new_g(t)
if true_result is None: # both return None
- assert our_result is None, f"true result is None, CSE result is {our_result}"
+ assert (
+ our_result is None
+ ), f"true result is None, CSE result is {our_result}"
else: # results returned are the same
- assert torch.all(true_result == our_result), (
- f"results are different {true_result}, {our_result}") # check results are the same
+ assert torch.all(
+ true_result == our_result
+ ), f"results are different {true_result}, {our_result}" # check results are the same
class NoChangeTestCase(TestCase):
-
def test_nochange(self):
def f(x):
a = x + 1
@@ -232,12 +244,14 @@ class NoChangeTestCase(TestCase):
a = x
d = x + a
return b + d
+
t = torch.randn(2, 2)
check(f, t, 0)
def test_empty(self):
def f(x):
pass
+
t = torch.randn(2, 2)
check(f, t, 0)
@@ -246,6 +260,7 @@ class NoChangeTestCase(TestCase):
a = torch.rand_like(x)
b = torch.rand_like(x)
return a + b
+
t = torch.randn(2, 2)
check(f, t, 0, check_val=False)
@@ -254,6 +269,7 @@ class NoChangeTestCase(TestCase):
a = torch.randn(4)
b = torch.randn(4)
return a + b
+
t = torch.randn(2, 2)
check(f, t, 0, check_val=False)
@@ -290,7 +306,6 @@ class NoChangeTestCase(TestCase):
class ReduceTestCase(TestCase):
-
def test_immutable_list_type(self):
def f(x):
a = x.sum(dim=1)
@@ -298,6 +313,7 @@ class ReduceTestCase(TestCase):
c = x.sum()
d = x.sum()
return a + b + c + d
+
t = torch.randn(2, 2)
check(f, t, 2)
@@ -308,6 +324,7 @@ class ReduceTestCase(TestCase):
c = x.sum(dim=1)
d = x.sum(dim=1)
return a + b + c + d
+
t = torch.randn(2, 2)
check(f, t, 2)
@@ -318,6 +335,7 @@ class ReduceTestCase(TestCase):
c = a + a
d = b + b
return c + d
+
t = torch.randn(2, 2)
check(f, t, 2)
@@ -328,6 +346,7 @@ class ReduceTestCase(TestCase):
c = a + a
d = b + b
return c + d
+
t = torch.randn(1)
check(f, t, 3)
@@ -338,6 +357,7 @@ class ReduceTestCase(TestCase):
c = x.sum(dim=1, keepdim=False)
d = x.sum(dim=1)
return a + b + c + d
+
t = torch.randn(2, 2)
check(f, t, 3)
@@ -348,6 +368,7 @@ class ReduceTestCase(TestCase):
c = x.sum(dim=1, keepdim=True)
d = x.sum(dim=1)
return a + b + c + d
+
t = torch.randn(2, 2)
check(f, t, 2)
@@ -358,6 +379,7 @@ class ReduceTestCase(TestCase):
c = x.sum()
d = x.sum()
return a + b + c + d
+
t = torch.randn(2, 2)
check(f, t, 3)
@@ -366,6 +388,7 @@ class ReduceTestCase(TestCase):
a = torch.cat((x, x))
b = torch.cat((x, x))
return a + b
+
t = torch.randn(2, 2)
check(f, t, 1)
@@ -374,6 +397,7 @@ class ReduceTestCase(TestCase):
a = torch.ones_like(x)
b = torch.ones_like(x)
return a + b
+
t = torch.randn(2, 2)
check(f, t, 1)
@@ -397,6 +421,5 @@ class RandomOpTestCase(TestCase):
check(fx_g, t, -1, graph_input=True)
-
if __name__ == "__main__":
run_tests()
diff --git a/test/functorch/test_minifier.py b/test/functorch/test_minifier.py
index 9e6f495bcd..c354cedc4e 100644
--- a/test/functorch/test_minifier.py
+++ b/test/functorch/test_minifier.py
@@ -1,10 +1,10 @@
# Owner(s): ["module: functorch"]
import torch
-from functorch.compile import minifier
-from torch._functorch.compile_utils import get_placeholders, get_outputs
from functorch import make_fx
-from torch.testing._internal.common_utils import TestCase, run_tests
+from functorch.compile import minifier
+from torch._functorch.compile_utils import get_outputs, get_placeholders
+from torch.testing._internal.common_utils import run_tests, TestCase
class TestMinifier(TestCase):
@@ -14,11 +14,12 @@ class TestMinifier(TestCase):
x = x + 3
x = x * y
return x + y
+
inps = [torch.randn(3), torch.randn(3)]
failing_f = make_fx(failing_f)(*inps)
def has_mul(fx_g, inps):
- return (torch.ops.aten.mul.Tensor in (i.target for i in fx_g.graph.nodes))
+ return torch.ops.aten.mul.Tensor in (i.target for i in fx_g.graph.nodes)
min_f, inps = minifier(failing_f, inps, has_mul)
self.assertEqual(len(min_f.graph.nodes), 4)
@@ -54,6 +55,7 @@ class TestMinifier(TestCase):
c = c.cos()
d = a * c
return (a, b, c, d)
+
inps = [torch.randn(3) for _ in range(3)]
def inputs_returned(fx_g, inps):
@@ -74,7 +76,7 @@ class TestMinifier(TestCase):
inps = [torch.randn(3), torch.randn(3)]
def has_add(fx_g, inps):
- return (torch.ops.aten.add.Tensor in (i.target for i in fx_g.graph.nodes))
+ return torch.ops.aten.add.Tensor in (i.target for i in fx_g.graph.nodes)
failing_f = make_fx(f)(*inps)
min_f, inps = minifier(failing_f, inps, has_add)
diff --git a/test/functorch/test_ops.py b/test/functorch/test_ops.py
index 5269fc47f3..18f8bf6ee8 100644
--- a/test/functorch/test_ops.py
+++ b/test/functorch/test_ops.py
@@ -6,50 +6,61 @@
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
+import functools
import itertools
import unittest
-from torch.testing._internal.common_utils import unMarkDynamoStrictTest
-from torch.testing._internal.common_utils import TestCase, run_tests, is_iterable_of_tensors, IS_MACOS, \
- IS_X86, parametrize, TEST_WITH_ASAN, TEST_WITH_ROCM, noncontiguous_like
-from torch.testing._internal.common_utils import skipIfRocm, runOnRocm
import torch
-from torch import Tensor
-import functools
-from torch.testing._internal.common_cuda import with_tf32_off
-from torch.testing._internal.common_device_type import instantiate_device_type_tests
-from torch.testing._internal.common_device_type import ops
-from torch.testing._internal.common_device_type import \
- toleranceOverride, tol
-from functorch_additional_op_db import additional_op_db
-from torch.testing._internal.common_methods_invocations import op_db
+import torch.autograd.forward_ad as fwAD
from common_utils import (
- get_fallback_and_vmap_exhaustive,
- generate_vmap_inputs,
- decorate,
- xfail,
- skip,
- skipOps,
- tol1,
- tol2,
- opsToleranceOverride,
check_vmap_fallback,
+ decorate,
+ expectedFailureIf,
+ generate_vmap_inputs,
+ get_fallback_and_vmap_exhaustive,
is_batch_norm_training,
is_valid_inplace_sample_input,
loop,
loop2,
- expectedFailureIf,
+ opsToleranceOverride,
+ skip,
+ skipOps,
+ tol1,
+ tol2,
+ xfail,
)
-from torch.testing._internal.autograd_function_db import (
- autograd_function_db
+from functorch import grad, jacfwd, jacrev, vjp, vmap
+from functorch_additional_op_db import additional_op_db
+from torch import Tensor
+from torch._functorch.eager_transforms import _as_tuple, jvp
+from torch.testing._internal.autograd_function_db import autograd_function_db
+from torch.testing._internal.common_cuda import with_tf32_off
+from torch.testing._internal.common_device_type import (
+ instantiate_device_type_tests,
+ ops,
+ tol,
+ toleranceOverride,
+)
+from torch.testing._internal.common_methods_invocations import op_db
+
+from torch.testing._internal.common_utils import (
+ is_iterable_of_tensors,
+ IS_MACOS,
+ IS_X86,
+ noncontiguous_like,
+ parametrize,
+ run_tests,
+ runOnRocm,
+ skipIfRocm,
+ TEST_WITH_ASAN,
+ TEST_WITH_ROCM,
+ TestCase,
+ unMarkDynamoStrictTest,
)
from torch.testing._internal.opinfo.core import SampleInput
-from torch.utils._pytree import tree_flatten, tree_unflatten, tree_map
from torch.utils import _pytree as pytree
-from functorch import grad, vjp, vmap, jacrev, jacfwd
-import torch.autograd.forward_ad as fwAD
-from torch._functorch.eager_transforms import _as_tuple, jvp
+from torch.utils._pytree import tree_flatten, tree_map, tree_unflatten
aten = torch.ops.aten
@@ -102,6 +113,7 @@ def diff_arg(arg, requires_grad=True):
return arg.requires_grad
else:
return arg.is_floating_point() or arg.is_complex()
+
if is_iterable_of_tensors(arg):
if all(is_differentiable_arg(a) for a in arg):
return True
@@ -115,9 +127,15 @@ def diff_arg(arg, requires_grad=True):
# - f' takes only positional arguments
# - All arguments to f' are floating-point Tensors
# - All outputs of f' are floating-point Tensors
-def normalize_op_input_output2(f, args, kwargs, output_process_fn_grad=None, requires_grad=True):
+def normalize_op_input_output2(
+ f, args, kwargs, output_process_fn_grad=None, requires_grad=True
+):
flat_args, args_spec = tree_flatten(args)
- diff_argnums = tuple(i for i, arg in enumerate(flat_args) if diff_arg(arg, requires_grad=requires_grad))
+ diff_argnums = tuple(
+ i
+ for i, arg in enumerate(flat_args)
+ if diff_arg(arg, requires_grad=requires_grad)
+ )
assert len(diff_argnums) > 0
primals = tuple(flat_args[i] for i in diff_argnums)
@@ -134,15 +152,21 @@ def normalize_op_input_output2(f, args, kwargs, output_process_fn_grad=None, req
result = tuple(r for r in result if torch.is_floating_point(r))
assert len(result) > 0
return result
+
return wrapped, primals
# TODO: consolidate with normalize_op_input_output2
-def normalize_op_input_output3(f, args, kwargs, sample_args, output_process_fn_grad=None):
+def normalize_op_input_output3(
+ f, args, kwargs, sample_args, output_process_fn_grad=None
+):
flat_args, args_spec = tree_flatten(args)
flat_sample_args = pytree.tree_leaves(sample_args)
- diff_argnums = tuple(i for i, (arg, sample) in enumerate(zip(flat_args, flat_sample_args))
- if diff_arg(sample, requires_grad=True))
+ diff_argnums = tuple(
+ i
+ for i, (arg, sample) in enumerate(zip(flat_args, flat_sample_args))
+ if diff_arg(sample, requires_grad=True)
+ )
assert len(diff_argnums) > 0
primals = tuple(flat_args[i] for i in diff_argnums)
@@ -159,13 +183,18 @@ def normalize_op_input_output3(f, args, kwargs, sample_args, output_process_fn_g
result = tuple(r for r in result if torch.is_floating_point(r))
assert len(result) > 0
return result
+
return wrapped, primals
def normalize_op_input_output(f, sample, requires_grad=True):
args = tuple([sample.input] + list(sample.args))
return normalize_op_input_output2(
- f, args, sample.kwargs, sample.output_process_fn_grad, requires_grad=requires_grad
+ f,
+ args,
+ sample.kwargs,
+ sample.output_process_fn_grad,
+ requires_grad=requires_grad,
)
@@ -209,14 +238,14 @@ def get_vjp_fn_and_args_with_cotangents(f, sample, cotangents):
@functools.wraps(f)
def wrapped(*args):
assert len(args) == len(flat_args) + len(flat_cotangents)
- actual_args = args[:len(flat_args)]
- cotangents = args[len(flat_args):]
+ actual_args = args[: len(flat_args)]
+ cotangents = args[len(flat_args) :]
actual_args = tree_unflatten(actual_args, args_spec)
cotangents = tree_unflatten(cotangents, cotangents_spec)
- fn, primals = normalize_op_input_output3(f, actual_args, kwargs,
- flat_args,
- sample.output_process_fn_grad)
+ fn, primals = normalize_op_input_output3(
+ f, actual_args, kwargs, flat_args, sample.output_process_fn_grad
+ )
_, vjp_fn = vjp(fn, *primals)
return vjp_fn(cotangents)
@@ -238,7 +267,8 @@ def get_vjpfull_variant2(f, args, kwargs):
def _get_vjpfull_variant(fn, primals):
result = fn(*primals)
cotangents = _as_tuple(
- tree_map(lambda x: torch.randn_like(x, requires_grad=True), result))
+ tree_map(lambda x: torch.randn_like(x, requires_grad=True), result)
+ )
num_primals = len(primals)
args = (*primals, *cotangents)
@@ -259,8 +289,7 @@ def get_jvp_variant(f, sample):
# We want this higher-order variant of jvp, so that it can
# be used to wrap vmap
fn, primals = normalize_op_input_output(f, sample, requires_grad=False)
- tangents = _as_tuple(
- tree_map(lambda x: torch.randn_like(x), primals))
+ tangents = _as_tuple(tree_map(lambda x: torch.randn_like(x), primals))
@functools.wraps(f)
def wrapped(*args):
@@ -277,12 +306,13 @@ def get_jvp_variant(f, sample):
return wrapped, tangents
-def get_jvp_variant_primals_tangents2(f, args, kwargs, output_process_fn_grad=None,
- requires_grad=False):
- fn, primals = normalize_op_input_output2(f, args, kwargs, output_process_fn_grad,
- requires_grad)
- tangents = _as_tuple(
- tree_map(lambda x: torch.randn_like(x), primals))
+def get_jvp_variant_primals_tangents2(
+ f, args, kwargs, output_process_fn_grad=None, requires_grad=False
+):
+ fn, primals = normalize_op_input_output2(
+ f, args, kwargs, output_process_fn_grad, requires_grad
+ )
+ tangents = _as_tuple(tree_map(lambda x: torch.randn_like(x), primals))
return _get_jvp_variant(fn, primals, tangents)
@@ -290,16 +320,15 @@ def get_jvp_variant_primals_tangents(f, sample):
# We want this higher-order variant of jvp, so that it can
# be used to wrap vmap
fn, primals = normalize_op_input_output(f, sample, requires_grad=False)
- tangents = _as_tuple(
- tree_map(lambda x: torch.randn_like(x), primals))
+ tangents = _as_tuple(tree_map(lambda x: torch.randn_like(x), primals))
return _get_jvp_variant(fn, primals, tangents)
def _get_jvp_variant(fn, primals, tangents):
@functools.wraps(fn)
def wrapped(*args):
- primals_in = args[:len(primals)]
- tangents_in = args[len(primals):]
+ primals_in = args[: len(primals)]
+ tangents_in = args[len(primals) :]
primals_out, tangents_out = jvp(fn, primals_in, tangents_in)
if isinstance(primals_out, torch.Tensor):
@@ -319,57 +348,57 @@ def is_inplace(op, variant):
vjp_fail = {
- xfail('tensor_split'), # data_ptr composite compliance
- decorate('nn.functional.batch_norm', decorator=skipIfRocm),
- decorate('nn.functional.instance_norm', decorator=skipIfRocm),
+ xfail("tensor_split"), # data_ptr composite compliance
+ decorate("nn.functional.batch_norm", decorator=skipIfRocm),
+ decorate("nn.functional.instance_norm", decorator=skipIfRocm),
# https://github.com/pytorch/pytorch/issues/96560
- decorate('nn.functional.scaled_dot_product_attention', decorator=skipIfRocm),
+ decorate("nn.functional.scaled_dot_product_attention", decorator=skipIfRocm),
}
aliasing_ops = {
- 'T',
- 'broadcast_to',
- 'conj',
- 'contiguous',
- 'diagonal', # linalg.diagonal is an alias
- 'expand',
- 'flatten',
- 'imag',
- 'mH', # adjoint is an alias
- 'mT',
- 'movedim', # moveaxis is an alias
- 'narrow',
- 'permute',
- 'positive',
+ "T",
+ "broadcast_to",
+ "conj",
+ "contiguous",
+ "diagonal", # linalg.diagonal is an alias
+ "expand",
+ "flatten",
+ "imag",
+ "mH", # adjoint is an alias
+ "mT",
+ "movedim", # moveaxis is an alias
+ "narrow",
+ "permute",
+ "positive",
# 'ravel', is composite implicit autograd and may call clone
- 'real',
- 'reshape',
- 'resolve_conj',
- 'resolve_neg',
- 'select',
- 'squeeze',
- 'transpose', # swapdims and swapaxes are aliases
- 'unflatten',
- 'unfold',
- 'unsqueeze',
- 'view',
- 'view_as',
- 'view_as_complex',
- 'view_as_real',
+ "real",
+ "reshape",
+ "resolve_conj",
+ "resolve_neg",
+ "select",
+ "squeeze",
+ "transpose", # swapdims and swapaxes are aliases
+ "unflatten",
+ "unfold",
+ "unsqueeze",
+ "view",
+ "view_as",
+ "view_as_complex",
+ "view_as_real",
}
aliasing_ops_list_return = {
- 'chunks',
- 'dsplit',
- 'hsplit',
- 'split',
- 'unbind',
- 'vsplit',
+ "chunks",
+ "dsplit",
+ "hsplit",
+ "split",
+ "unbind",
+ "vsplit",
# 'tensor_split' not composite compliant, see vjp_fail
}
skip_noncontig = {
- '_batch_norm_with_update',
+ "_batch_norm_with_update",
}
@@ -378,54 +407,80 @@ skip_noncontig = {
class TestOperators(TestCase):
@with_tf32_off # https://github.com/pytorch/pytorch/issues/86798
@ops(op_db + additional_op_db + autograd_function_db, allowed_dtypes=(torch.float,))
- @skipOps('TestOperators', 'test_grad', vjp_fail.union({
- xfail('chalf', '', device_type='cpu'), # RuntimeError: "sum_cpu" not implemented for 'ComplexHalf'
- xfail('sparse.sampled_addmm', ''), # RuntimeError: Sparse CSR tensors do not have strides
- xfail('sparse.mm', 'reduce'), # RuntimeError: Sparse CSR tensors do not have strides
-
- # Non-contiguous Bugs
- #
- # AssertionError: Tensor-likes are not close!
- xfail('_softmax_backward_data', device_type='cpu'),
- xfail('as_strided'),
- xfail('as_strided', 'partial_views'),
-
- # RuntimeError: !self.requires_grad() || self.is_contiguous()
- xfail('as_strided_scatter'),
-
- # RuntimeError: Tensor must have a last dimension with stride 1
- xfail('view_as_complex'),
- # query: last dimension must be contiguous
- # Fused attention kernels require last dim to be contiguous
- xfail('nn.functional.scaled_dot_product_attention'),
- xfail("torch.ops.aten._flash_attention_forward"),
- xfail("torch.ops.aten._efficient_attention_forward"),
-
- # RuntimeError: Expected contiguous tensor, but got
- # non-contiguous tensor for argument #2 'grad_output'
- decorate(
- '_batch_norm_with_update',
- decorator=expectedFailureIf(TEST_WITH_ROCM),
- device_type='cuda',
- )
- }))
- @opsToleranceOverride('TestOperators', 'test_grad', (
- tol1('nn.functional.binary_cross_entropy_with_logits',
- {torch.float32: tol(atol=1e-04, rtol=1e-04)}),
- tol1('masked.cumprod',
- {torch.float32: tol(atol=1e-05, rtol=1e-05)}),
- tol1('svd_lowrank',
- {torch.float32: tol(atol=3e-04, rtol=3e-04)}, device_type='cuda'),
- tol1('linalg.tensorsolve',
- {torch.float32: tol(atol=3e-04, rtol=3e-04)}, device_type='cuda'),
- tol1('nn.functional.multi_head_attention_forward',
- {torch.float32: tol(atol=8e-04, rtol=1e-03)}),
- tol1('__rmatmul__',
- {torch.float32: tol(atol=3e-04, rtol=3e-04)}, device_type='cuda'),
- tol1('matmul',
- {torch.float32: tol(atol=3e-04, rtol=3e-04)}, device_type='cuda'),
-
- ))
+ @skipOps(
+ "TestOperators",
+ "test_grad",
+ vjp_fail.union(
+ {
+ xfail(
+ "chalf", "", device_type="cpu"
+ ), # RuntimeError: "sum_cpu" not implemented for 'ComplexHalf'
+ xfail(
+ "sparse.sampled_addmm", ""
+ ), # RuntimeError: Sparse CSR tensors do not have strides
+ xfail(
+ "sparse.mm", "reduce"
+ ), # RuntimeError: Sparse CSR tensors do not have strides
+ # Non-contiguous Bugs
+ #
+ # AssertionError: Tensor-likes are not close!
+ xfail("_softmax_backward_data", device_type="cpu"),
+ xfail("as_strided"),
+ xfail("as_strided", "partial_views"),
+ # RuntimeError: !self.requires_grad() || self.is_contiguous()
+ xfail("as_strided_scatter"),
+ # RuntimeError: Tensor must have a last dimension with stride 1
+ xfail("view_as_complex"),
+ # query: last dimension must be contiguous
+ # Fused attention kernels require last dim to be contiguous
+ xfail("nn.functional.scaled_dot_product_attention"),
+ xfail("torch.ops.aten._flash_attention_forward"),
+ xfail("torch.ops.aten._efficient_attention_forward"),
+ # RuntimeError: Expected contiguous tensor, but got
+ # non-contiguous tensor for argument #2 'grad_output'
+ decorate(
+ "_batch_norm_with_update",
+ decorator=expectedFailureIf(TEST_WITH_ROCM),
+ device_type="cuda",
+ ),
+ }
+ ),
+ )
+ @opsToleranceOverride(
+ "TestOperators",
+ "test_grad",
+ (
+ tol1(
+ "nn.functional.binary_cross_entropy_with_logits",
+ {torch.float32: tol(atol=1e-04, rtol=1e-04)},
+ ),
+ tol1("masked.cumprod", {torch.float32: tol(atol=1e-05, rtol=1e-05)}),
+ tol1(
+ "svd_lowrank",
+ {torch.float32: tol(atol=3e-04, rtol=3e-04)},
+ device_type="cuda",
+ ),
+ tol1(
+ "linalg.tensorsolve",
+ {torch.float32: tol(atol=3e-04, rtol=3e-04)},
+ device_type="cuda",
+ ),
+ tol1(
+ "nn.functional.multi_head_attention_forward",
+ {torch.float32: tol(atol=8e-04, rtol=1e-03)},
+ ),
+ tol1(
+ "__rmatmul__",
+ {torch.float32: tol(atol=3e-04, rtol=3e-04)},
+ device_type="cuda",
+ ),
+ tol1(
+ "matmul",
+ {torch.float32: tol(atol=3e-04, rtol=3e-04)},
+ device_type="cuda",
+ ),
+ ),
+ )
def test_grad(self, device, dtype, op):
if op.name in vjp_fail:
self.skipTest("Skipped; Expected failures")
@@ -475,71 +530,102 @@ class TestOperators(TestCase):
self.assertEqual(result, expected)
if op.name not in skip_noncontig:
- result_noncontig = grad(wrapped_fn, diff_argnums)(*noncontig_args, **noncontig_kwargs)
+ result_noncontig = grad(wrapped_fn, diff_argnums)(
+ *noncontig_args, **noncontig_kwargs
+ )
self.assertEqual(result_noncontig, expected)
@with_tf32_off # https://github.com/pytorch/pytorch/issues/86798
@ops(op_db + additional_op_db + autograd_function_db, allowed_dtypes=(torch.float,))
- @skipOps('TestOperators', 'test_jvp', set({
- # Composite ops that do bad things. Need to be fixed in PyTorch core.
- # RuntimeError: Cannot access data pointer of Tensor that doesn't have storage
- xfail('tensor_split'),
-
- # BUG: silent incorrectness: runs and produces numerical differences
- skip('nn.functional.max_unpool1d'), # fails everywhere except on mac
- skip('nn.functional.max_unpool2d'), # fails everywhere except on windows
- skip('nn.functional.max_unpool3d'), # fails everywhere except on mac
- xfail("native_batch_norm"), # TODO: fails comparing None to tensor of 0s for saved_mean/var tangents
- xfail("_native_batch_norm_legit"), # TODO: fails comparing None to tensor of 0s for saved_mean/var tangents
- xfail("_batch_norm_with_update"), # TODO: fails comparing None to tensor of 0s for saved_mean/var tangents
-
- xfail('nn.functional.scaled_dot_product_attention'),
- xfail('torch.ops.aten._flash_attention_forward'),
- xfail('torch.ops.aten._efficient_attention_forward'),
-
- xfail('nn.functional.rrelu'), # in-place test errors out with no formula implemented
- xfail('NumpyExpMarkDirtyAutogradFunction'), # TODO: https://github.com/pytorch/pytorch/issues/91280
-
- # https://github.com/pytorch/pytorch/issues/96560
- # ROCm: NotImplementedError
- decorate('nn.functional.batch_norm', decorator=skipIfRocm),
- # ROCm: NotImplementedError
- decorate('nn.functional.instance_norm', decorator=skipIfRocm),
-
- # --- Non-Contiguous Failures! ---
- # This is expected to fail as the operator
- # expects last dim to have stride=1
- xfail('view_as_complex'),
- # BUG
- # AssertionError: Tensor-likes are not close!
- xfail('as_strided'),
- xfail('as_strided', 'partial_views'),
- xfail('as_strided_scatter'),
- decorate('linalg.det', 'singular',
- decorator=expectedFailureIf(IS_MACOS and IS_X86)),
- }))
- @opsToleranceOverride('TestOperators', 'test_jvp', (
- tol1('nn.functional.conv_transpose3d',
- {torch.float32: tol(atol=1e-04, rtol=1.3e-06)}, device_type='cuda'),
- tol1('linalg.tensorsolve',
- {torch.float32: tol(atol=1e-04, rtol=1.3e-05)}, device_type='cuda'),
- tol1('nn.functional.binary_cross_entropy_with_logits',
- {torch.float32: tol(atol=4e-04, rtol=4e-04)}),
- tol1('nn.functional.batch_norm',
- {torch.float32: tol(atol=4e-05, rtol=5e-05)}),
- tol1('nn.functional.conv2d',
- {torch.float32: tol(atol=4e-05, rtol=5e-05)}),
- tol1('pca_lowrank',
- {torch.float32: tol(atol=5e-05, rtol=5e-05)}),
- tol1('nn.functional.multi_head_attention_forward',
- {torch.float32: tol(atol=6e-05, rtol=2e-05)}),
- ))
+ @skipOps(
+ "TestOperators",
+ "test_jvp",
+ set(
+ {
+ # Composite ops that do bad things. Need to be fixed in PyTorch core.
+ # RuntimeError: Cannot access data pointer of Tensor that doesn't have storage
+ xfail("tensor_split"),
+ # BUG: silent incorrectness: runs and produces numerical differences
+ skip("nn.functional.max_unpool1d"), # fails everywhere except on mac
+ skip(
+ "nn.functional.max_unpool2d"
+ ), # fails everywhere except on windows
+ skip("nn.functional.max_unpool3d"), # fails everywhere except on mac
+ xfail(
+ "native_batch_norm"
+ ), # TODO: fails comparing None to tensor of 0s for saved_mean/var tangents
+ xfail(
+ "_native_batch_norm_legit"
+ ), # TODO: fails comparing None to tensor of 0s for saved_mean/var tangents
+ xfail(
+ "_batch_norm_with_update"
+ ), # TODO: fails comparing None to tensor of 0s for saved_mean/var tangents
+ xfail("nn.functional.scaled_dot_product_attention"),
+ xfail("torch.ops.aten._flash_attention_forward"),
+ xfail("torch.ops.aten._efficient_attention_forward"),
+ xfail(
+ "nn.functional.rrelu"
+ ), # in-place test errors out with no formula implemented
+ xfail(
+ "NumpyExpMarkDirtyAutogradFunction"
+ ), # TODO: https://github.com/pytorch/pytorch/issues/91280
+ # https://github.com/pytorch/pytorch/issues/96560
+ # ROCm: NotImplementedError
+ decorate("nn.functional.batch_norm", decorator=skipIfRocm),
+ # ROCm: NotImplementedError
+ decorate("nn.functional.instance_norm", decorator=skipIfRocm),
+ # --- Non-Contiguous Failures! ---
+ # This is expected to fail as the operator
+ # expects last dim to have stride=1
+ xfail("view_as_complex"),
+ # BUG
+ # AssertionError: Tensor-likes are not close!
+ xfail("as_strided"),
+ xfail("as_strided", "partial_views"),
+ xfail("as_strided_scatter"),
+ decorate(
+ "linalg.det",
+ "singular",
+ decorator=expectedFailureIf(IS_MACOS and IS_X86),
+ ),
+ }
+ ),
+ )
+ @opsToleranceOverride(
+ "TestOperators",
+ "test_jvp",
+ (
+ tol1(
+ "nn.functional.conv_transpose3d",
+ {torch.float32: tol(atol=1e-04, rtol=1.3e-06)},
+ device_type="cuda",
+ ),
+ tol1(
+ "linalg.tensorsolve",
+ {torch.float32: tol(atol=1e-04, rtol=1.3e-05)},
+ device_type="cuda",
+ ),
+ tol1(
+ "nn.functional.binary_cross_entropy_with_logits",
+ {torch.float32: tol(atol=4e-04, rtol=4e-04)},
+ ),
+ tol1(
+ "nn.functional.batch_norm", {torch.float32: tol(atol=4e-05, rtol=5e-05)}
+ ),
+ tol1("nn.functional.conv2d", {torch.float32: tol(atol=4e-05, rtol=5e-05)}),
+ tol1("pca_lowrank", {torch.float32: tol(atol=5e-05, rtol=5e-05)}),
+ tol1(
+ "nn.functional.multi_head_attention_forward",
+ {torch.float32: tol(atol=6e-05, rtol=2e-05)},
+ ),
+ ),
+ )
def test_jvp(self, device, dtype, op):
# TODO: get rid of vjp_decomp when we add decomposition support to
# PyTorch's forward-mode ad. Currently the decomposition support only
# works for functorch.jvp
VJP_DECOMP = {
- 'nn.functional.logsigmoid',
+ "nn.functional.logsigmoid",
}
if op.name in VJP_DECOMP:
fixme_ref_jvp_local = simulate_jvp
@@ -557,26 +643,40 @@ class TestOperators(TestCase):
for sample in samples:
if outplace_variant:
- self.jvp_opinfo_test(outplace_variant, sample,
- sample.output_process_fn_grad,
- clone_inputs=False,
- fixme_ref_jvp_local=fixme_ref_jvp_local,
- test_noncontig=op.name not in skip_noncontig)
+ self.jvp_opinfo_test(
+ outplace_variant,
+ sample,
+ sample.output_process_fn_grad,
+ clone_inputs=False,
+ fixme_ref_jvp_local=fixme_ref_jvp_local,
+ test_noncontig=op.name not in skip_noncontig,
+ )
if is_valid_inplace_sample_input(sample, op, inplace_variant):
- self.jvp_opinfo_test(inplace_variant, sample,
- sample.output_process_fn_grad,
- clone_inputs=True,
- fixme_ref_jvp_local=fixme_ref_jvp_local,
- test_noncontig=op.name not in skip_noncontig)
-
- def jvp_opinfo_test(self, fn, sample, output_process_fn,
- clone_inputs, fixme_ref_jvp_local, test_noncontig):
+ self.jvp_opinfo_test(
+ inplace_variant,
+ sample,
+ sample.output_process_fn_grad,
+ clone_inputs=True,
+ fixme_ref_jvp_local=fixme_ref_jvp_local,
+ test_noncontig=op.name not in skip_noncontig,
+ )
+
+ def jvp_opinfo_test(
+ self,
+ fn,
+ sample,
+ output_process_fn,
+ clone_inputs,
+ fixme_ref_jvp_local,
+ test_noncontig,
+ ):
# NB: we used requires_grad=True to determine where the primals are,
# but don't need that information otherwise
args = (sample.input,) + sample.args
kwargs = sample.kwargs
contig_fn, primals = normalize_op_input_output2(
- fn, args, kwargs, output_process_fn, requires_grad=True)
+ fn, args, kwargs, output_process_fn, requires_grad=True
+ )
orig_primals = tree_map(lambda x: x.detach(), primals)
orig_tangents = tree_map(lambda x: torch.randn_like(x), primals)
@@ -588,8 +688,9 @@ class TestOperators(TestCase):
return orig_primals, orig_tangents
primals, tangents = maybe_clone_inputs()
- expected_primal_outs, expected_tangent_outs = \
- fixme_ref_jvp_local(contig_fn, primals, tangents)
+ expected_primal_outs, expected_tangent_outs = fixme_ref_jvp_local(
+ contig_fn, primals, tangents
+ )
primals, tangents = maybe_clone_inputs()
primal_outs, tangent_outs = jvp(contig_fn, primals, tangents)
@@ -602,61 +703,78 @@ class TestOperators(TestCase):
noncontig_args = (noncontig_sample.input,) + noncontig_sample.args
noncontig_kwargs = sample.kwargs
noncontig_fn, primals = normalize_op_input_output2(
- fn, noncontig_args, noncontig_kwargs,
- output_process_fn, requires_grad=True)
+ fn,
+ noncontig_args,
+ noncontig_kwargs,
+ output_process_fn,
+ requires_grad=True,
+ )
noncontig_primals = tree_map(lambda x: x.detach(), primals)
- noncontig_tangents = tree_map(lambda x: noncontiguous_like(x), orig_tangents)
- noncontig_primal_outs, noncontig_tangent_outs = jvp(noncontig_fn,
- noncontig_primals,
- noncontig_tangents)
+ noncontig_tangents = tree_map(
+ lambda x: noncontiguous_like(x), orig_tangents
+ )
+ noncontig_primal_outs, noncontig_tangent_outs = jvp(
+ noncontig_fn, noncontig_primals, noncontig_tangents
+ )
self.assertEqual(noncontig_primal_outs, expected_primal_outs)
self.assertEqual(noncontig_tangent_outs, expected_tangent_outs)
@with_tf32_off # https://github.com/pytorch/pytorch/issues/86798
@ops(op_db + additional_op_db + autograd_function_db, allowed_dtypes=(torch.float,))
- @skipOps('TestOperators', 'test_vjp', vjp_fail.union({
- xfail('sparse.sampled_addmm', ''),
- xfail('sparse.mm', 'reduce'),
-
- # ---- Non-Contiguous Failures ----
- # This is expected to fail as the operator
- # expects last dim to have stride=1
- xfail('view_as_complex'),
- # RuntimeError: query: last dimension must be contiguous
- # The fused attention kernels require the last dim to be contiguous
- xfail('nn.functional.scaled_dot_product_attention'),
- xfail('torch.ops.aten._flash_attention_forward'),
- xfail('torch.ops.aten._efficient_attention_forward'),
- # BUG
- # AssertionError: Tensor-likes are not close!
- xfail('as_strided'),
- xfail('as_strided_scatter'),
- xfail('_softmax_backward_data', device_type='cpu'),
- xfail('as_strided', 'partial_views'),
- }))
- @opsToleranceOverride('TestOperators', 'test_vjp', (
- tol1('nn.functional.conv_transpose3d',
- {torch.float32: tol(atol=5e-05, rtol=9e-05)}, device_type='cuda'),
- tol1('nn.functional.binary_cross_entropy_with_logits',
- {torch.float32: tol(atol=1e-04, rtol=1e-04)}),
- tol1('nn.functional.multi_head_attention_forward',
- {torch.float32: tol(atol=2e-03, rtol=2e-04)}),
- tol1('__rmatmul__',
- {torch.float32: tol(atol=1e-05, rtol=1e-05)}),
- tol1('matmul',
- {torch.float32: tol(atol=1e-05, rtol=1e-05)}),
- tol2('linalg.pinv', 'hermitian',
- {torch.float32: tol(atol=1e-05, rtol=1e-05)}),
- tol1('linalg.tensorsolve',
- {torch.float32: tol(atol=1e-05, rtol=1e-05)}),
- tol1('linalg.multi_dot',
- {torch.float32: tol(atol=1e-04, rtol=1e-04)}),
- tol1('svd_lowrank',
- {torch.float32: tol(atol=1e-04, rtol=1e-04)}),
- tol1('pca_lowrank',
- {torch.float32: tol(atol=1e-04, rtol=1e-04)}),
- ))
+ @skipOps(
+ "TestOperators",
+ "test_vjp",
+ vjp_fail.union(
+ {
+ xfail("sparse.sampled_addmm", ""),
+ xfail("sparse.mm", "reduce"),
+ # ---- Non-Contiguous Failures ----
+ # This is expected to fail as the operator
+ # expects last dim to have stride=1
+ xfail("view_as_complex"),
+ # RuntimeError: query: last dimension must be contiguous
+ # The fused attention kernels require the last dim to be contiguous
+ xfail("nn.functional.scaled_dot_product_attention"),
+ xfail("torch.ops.aten._flash_attention_forward"),
+ xfail("torch.ops.aten._efficient_attention_forward"),
+ # BUG
+ # AssertionError: Tensor-likes are not close!
+ xfail("as_strided"),
+ xfail("as_strided_scatter"),
+ xfail("_softmax_backward_data", device_type="cpu"),
+ xfail("as_strided", "partial_views"),
+ }
+ ),
+ )
+ @opsToleranceOverride(
+ "TestOperators",
+ "test_vjp",
+ (
+ tol1(
+ "nn.functional.conv_transpose3d",
+ {torch.float32: tol(atol=5e-05, rtol=9e-05)},
+ device_type="cuda",
+ ),
+ tol1(
+ "nn.functional.binary_cross_entropy_with_logits",
+ {torch.float32: tol(atol=1e-04, rtol=1e-04)},
+ ),
+ tol1(
+ "nn.functional.multi_head_attention_forward",
+ {torch.float32: tol(atol=2e-03, rtol=2e-04)},
+ ),
+ tol1("__rmatmul__", {torch.float32: tol(atol=1e-05, rtol=1e-05)}),
+ tol1("matmul", {torch.float32: tol(atol=1e-05, rtol=1e-05)}),
+ tol2(
+ "linalg.pinv", "hermitian", {torch.float32: tol(atol=1e-05, rtol=1e-05)}
+ ),
+ tol1("linalg.tensorsolve", {torch.float32: tol(atol=1e-05, rtol=1e-05)}),
+ tol1("linalg.multi_dot", {torch.float32: tol(atol=1e-04, rtol=1e-04)}),
+ tol1("svd_lowrank", {torch.float32: tol(atol=1e-04, rtol=1e-04)}),
+ tol1("pca_lowrank", {torch.float32: tol(atol=1e-04, rtol=1e-04)}),
+ ),
+ )
def test_vjp(self, device, dtype, op):
if not op.supports_autograd:
self.skipTest("Skipped! Autograd not supported.")
@@ -666,7 +784,9 @@ class TestOperators(TestCase):
def _test(_op, inplace=False):
for sample in samples:
- if inplace and not is_valid_inplace_sample_input(sample, op, op.inplace_variant):
+ if inplace and not is_valid_inplace_sample_input(
+ sample, op, op.inplace_variant
+ ):
continue
fn, primals = normalize_op_input_output(_op, sample)
result = fn(*primals)
@@ -682,8 +802,12 @@ class TestOperators(TestCase):
self.assertEqual(result_vjps, expected_vjps)
if op.name not in skip_noncontig:
- noncontig_fn, noncontig_primals = normalize_op_input_output(_op, sample.noncontiguous())
- noncontig_cotangents = tree_map(lambda x: noncontiguous_like(x), cotangents)
+ noncontig_fn, noncontig_primals = normalize_op_input_output(
+ _op, sample.noncontiguous()
+ )
+ noncontig_cotangents = tree_map(
+ lambda x: noncontiguous_like(x), cotangents
+ )
out_noncontig, vjp_fn = vjp(noncontig_fn, *noncontig_primals)
self.assertEqual(out_noncontig, result)
noncontig_result_vjps = vjp_fn(noncontig_cotangents)
@@ -693,42 +817,56 @@ class TestOperators(TestCase):
for a_op in op.aliases:
_test(a_op)
if op.inplace_variant:
+
def f(inp, *args, **kwargs):
return op.inplace_variant(inp.clone(), *args, **kwargs)
+
_test(f, inplace=True)
@ops(op_db + additional_op_db + autograd_function_db, allowed_dtypes=(torch.float,))
- @skipOps('TestOperators', 'test_vjpvjp', vjp_fail.union({
- skip('nn.functional.max_unpool1d'), # silent incorrectness; Flaky
- skip('nn.functional.max_unpool2d'), # silent incorrectness; Flaky
- xfail('nn.functional.ctc_loss'), # Not Implemented
- xfail('native_layer_norm', ''), # Expected a proper Tensor but got None for argument #1 'other'
- xfail('sparse.sampled_addmm', ''), # sparse tensors have no strides
- xfail('sparse.mm', 'reduce'), # sparse tensors have no strides
- skip('nn.functional.scaled_dot_product_attention'),
- xfail('torch.ops.aten._flash_attention_forward'),
- xfail('torch.ops.aten._efficient_attention_forward'),
- # AssertionError: Tensor-likes are not close!
- # Mismatched elements: 1 / 15 (6.7%)
- # Greatest absolute difference: 24.0 at index (2, 4) (up to 1e-05 allowed)
- # Greatest relative difference: 1.7933241714393998e-06 at index (2, 4) (up to 1.3e-06 allowed)
- # The failure occurred for item [0]
- xfail('masked.prod')
- }))
- @opsToleranceOverride('TestOperators', 'test_vjpvjp', (
- tol1('nn.functional.conv_transpose3d',
- {torch.float32: tol(atol=5e-05, rtol=9e-05)}, device_type='cuda'),
- tol1('prod',
- {torch.float32: tol(atol=2e-05, rtol=1e-04)}),
- tol1('masked.cumprod',
- {torch.float32: tol(atol=5e-04, rtol=5e-04)}),
- tol1('cumprod',
- {torch.float32: tol(atol=5e-04, rtol=5e-04)}),
- tol1('linalg.vander',
- {torch.float32: tol(atol=5e-04, rtol=5e-04)}),
- tol2('linalg.det', 'singular',
- {torch.float32: tol(atol=2e-05, rtol=2e-05)}),
- ))
+ @skipOps(
+ "TestOperators",
+ "test_vjpvjp",
+ vjp_fail.union(
+ {
+ skip("nn.functional.max_unpool1d"), # silent incorrectness; Flaky
+ skip("nn.functional.max_unpool2d"), # silent incorrectness; Flaky
+ xfail("nn.functional.ctc_loss"), # Not Implemented
+ xfail(
+ "native_layer_norm", ""
+ ), # Expected a proper Tensor but got None for argument #1 'other'
+ xfail("sparse.sampled_addmm", ""), # sparse tensors have no strides
+ xfail("sparse.mm", "reduce"), # sparse tensors have no strides
+ skip("nn.functional.scaled_dot_product_attention"),
+ xfail("torch.ops.aten._flash_attention_forward"),
+ xfail("torch.ops.aten._efficient_attention_forward"),
+ # AssertionError: Tensor-likes are not close!
+ # Mismatched elements: 1 / 15 (6.7%)
+ # Greatest absolute difference: 24.0 at index (2, 4) (up to 1e-05 allowed)
+ # Greatest relative difference: 1.7933241714393998e-06 at index (2, 4) (up to 1.3e-06 allowed)
+ # The failure occurred for item [0]
+ xfail("masked.prod"),
+ }
+ ),
+ )
+ @opsToleranceOverride(
+ "TestOperators",
+ "test_vjpvjp",
+ (
+ tol1(
+ "nn.functional.conv_transpose3d",
+ {torch.float32: tol(atol=5e-05, rtol=9e-05)},
+ device_type="cuda",
+ ),
+ tol1("prod", {torch.float32: tol(atol=2e-05, rtol=1e-04)}),
+ tol1("masked.cumprod", {torch.float32: tol(atol=5e-04, rtol=5e-04)}),
+ tol1("cumprod", {torch.float32: tol(atol=5e-04, rtol=5e-04)}),
+ tol1("linalg.vander", {torch.float32: tol(atol=5e-04, rtol=5e-04)}),
+ tol2(
+ "linalg.det", "singular", {torch.float32: tol(atol=2e-05, rtol=2e-05)}
+ ),
+ ),
+ )
def test_vjpvjp(self, device, dtype, op):
if not op.supports_autograd:
self.skipTest("Skipped! Autograd not supported.")
@@ -741,7 +879,9 @@ class TestOperators(TestCase):
def test(_op, inplace=False):
for sample in samples:
- if inplace and not is_valid_inplace_sample_input(sample, op, op.inplace_variant):
+ if inplace and not is_valid_inplace_sample_input(
+ sample, op, op.inplace_variant
+ ):
continue
fn, args = get_vjpfull_variant(_op, sample)
result = fn(*args)
@@ -761,109 +901,147 @@ class TestOperators(TestCase):
test(op)
if op.inplace_variant:
+
def fn(inp, *args, **kwargs):
return op.inplace_variant(inp.clone(), *args, **kwargs)
+
test(fn, inplace=True)
@with_tf32_off # https://github.com/pytorch/pytorch/issues/86798
- @skipOps('TestOperators', 'test_vmapvjpvjp', vjp_fail.union({
- skip("atleast_1d"), # Takes too long
- skip("atleast_2d"), # Takes too long
- skip("atleast_3d"), # Takes too long
- skip("ormqr"), # Takes too long
- xfail("as_strided"), # incorrect output
- xfail("as_strided", "partial_views"), # incorrect output
- xfail("as_strided_scatter"), # incorrect output
- skip("bernoulli"), # calls random op
- xfail("bfloat16"), # rank 4 tensor for channels_last
- xfail("cdouble"), # rank 4 tensor for channels_last
- xfail("cfloat"), # rank 4 tensor for channels_last
- xfail("chalf"), # rank 4 tensor for channels_last
- xfail("double"), # rank 4 tensor for channels_last
- xfail("float"), # rank 4 tensor for channels_last
- xfail("half"), # rank 4 tensor for channels_last
- xfail("NumpyCubeNotComposableAutogradFunction"), # Not composable autograd.Function
- # It looks like you're either (1) calling .item() on a Tensor or
- # (2) attempting to use a Tensor in some data-dependent control flow or
- # (3) encountering this error in PyTorch internals.
- xfail("index_reduce"),
- decorate("linalg.householder_product", decorator=runOnRocm), # works on ROCm
- xfail("nanquantile", device_type='cpu'), # vmap not implemented for at::equal.
- xfail("native_layer_norm"), # vmap: inplace into a regular tensor
- # got a batched tensor as input while the running_mean or running_var,
- # which will be updated in place, were not batched.
- xfail("nn.functional.batch_norm"),
- xfail("nn.functional.binary_cross_entropy"), # vmap: inplace into a regular tensor
- xfail("nn.functional.ctc_loss"), # derivate not implemented for _ctc_loss_backward
- # flaky on ROCM needs investigation
- decorate('nn.functional.conv_transpose2d', decorator=skipIfRocm),
- skip("nn.functional.dropout"), # calls random op
- skip("nn.functional.dropout2d"), # calls random op
- skip("nn.functional.dropout3d"), # calls random op
- skip("nn.functional.alpha_dropout"), # calls random op
- skip("nn.functional.feature_alpha_dropout", "with_train"), # calls random op
- skip("nn.functional.fractional_max_pool2d"), # calls random op
- skip("nn.functional.fractional_max_pool3d"), # calls random op
- xfail('nn.functional.scaled_dot_product_attention'), # randomness
- xfail('torch.ops.aten._efficient_attention_forward'), # outputs ints
- xfail('nn.functional.multi_head_attention_forward'), # randomness
- # It looks like you're either (1) calling .item() on a Tensor or
- # (2) attempting to use a Tensor in some data-dependent control flow or
- # (3) encountering this error in PyTorch internals.
- xfail("nn.functional.gaussian_nll_loss"),
- # got a batched tensor as input while the running_mean or running_var,
- # which will be updated in place, were not batched.
- xfail("nn.functional.instance_norm"),
- xfail("nn.functional.layer_norm"), # vmap: inplace into a regular tensor
- # RuntimeError: NYI: querying is_contiguous inside of vmap
- # for memory_format other than torch.contiguous_formats
- xfail("nn.functional.max_pool2d"),
- # RuntimeError: NYI: Tensor.clone(memory_format) inside vmap is only
- # supported with memory_format torch.preserve_format or
- # torch.contiguous_format (got ChannelsLast)
- xfail("nn.functional.max_unpool2d"),
- # RuntimeError: NYI: Tensor.clone(memory_format) inside vmap is only
- # supported with memory_format torch.preserve_format
- # or torch.contiguous_format (got ChannelsLast)s
- xfail("nn.functional.max_unpool2d", "grad"),
- xfail("nn.functional.rrelu"), # RuntimeError: vmap: we do not yet support aten::rrelu_with_noise.
- xfail("normal"), # calls random op
- xfail("normal", "number_mean"), # calls random op
- xfail("pca_lowrank"), # calls random op
- # https://github.com/pytorch/pytorch/issues/96560
- decorate('linalg.pinv', 'hermitian', decorator=skipIfRocm),
- xfail("quantile", device_type='cpu'), # Batching rule not implemented for `at::equal`
- xfail("scatter_reduce", "prod"), # vmap (looks like you are calling item/data-dependent)
- xfail("sparse.sampled_addmm"), # RuntimeError: Sparse CSR tensors do not have strides
- xfail("sparse.mm", "reduce"), # RuntimeError: Sparse CSR tensors do not have strides
- xfail("svd_lowrank"), # calls random op
- xfail("to"), # rank 4 tensor for channels_last
- xfail("view_as_complex"), # RuntimeError: Tensor must have a last dimension with stride 1
- # got a batched tensor as input while the running_mean or running_var,
- # which will be updated in place, were not batched.
- xfail("nn.functional.batch_norm", 'without_cudnn'),
- # view doesn't work on sparse
- xfail("to_sparse"),
- xfail("native_batch_norm"),
- xfail("_native_batch_norm_legit"),
- # TODO: implement batching rule
- xfail("_batch_norm_with_update"),
- }))
+ @skipOps(
+ "TestOperators",
+ "test_vmapvjpvjp",
+ vjp_fail.union(
+ {
+ skip("atleast_1d"), # Takes too long
+ skip("atleast_2d"), # Takes too long
+ skip("atleast_3d"), # Takes too long
+ skip("ormqr"), # Takes too long
+ xfail("as_strided"), # incorrect output
+ xfail("as_strided", "partial_views"), # incorrect output
+ xfail("as_strided_scatter"), # incorrect output
+ skip("bernoulli"), # calls random op
+ xfail("bfloat16"), # rank 4 tensor for channels_last
+ xfail("cdouble"), # rank 4 tensor for channels_last
+ xfail("cfloat"), # rank 4 tensor for channels_last
+ xfail("chalf"), # rank 4 tensor for channels_last
+ xfail("double"), # rank 4 tensor for channels_last
+ xfail("float"), # rank 4 tensor for channels_last
+ xfail("half"), # rank 4 tensor for channels_last
+ xfail(
+ "NumpyCubeNotComposableAutogradFunction"
+ ), # Not composable autograd.Function
+ # It looks like you're either (1) calling .item() on a Tensor or
+ # (2) attempting to use a Tensor in some data-dependent control flow or
+ # (3) encountering this error in PyTorch internals.
+ xfail("index_reduce"),
+ decorate(
+ "linalg.householder_product", decorator=runOnRocm
+ ), # works on ROCm
+ xfail(
+ "nanquantile", device_type="cpu"
+ ), # vmap not implemented for at::equal.
+ xfail("native_layer_norm"), # vmap: inplace into a regular tensor
+ # got a batched tensor as input while the running_mean or running_var,
+ # which will be updated in place, were not batched.
+ xfail("nn.functional.batch_norm"),
+ xfail(
+ "nn.functional.binary_cross_entropy"
+ ), # vmap: inplace into a regular tensor
+ xfail(
+ "nn.functional.ctc_loss"
+ ), # derivate not implemented for _ctc_loss_backward
+ # flaky on ROCM needs investigation
+ decorate("nn.functional.conv_transpose2d", decorator=skipIfRocm),
+ skip("nn.functional.dropout"), # calls random op
+ skip("nn.functional.dropout2d"), # calls random op
+ skip("nn.functional.dropout3d"), # calls random op
+ skip("nn.functional.alpha_dropout"), # calls random op
+ skip(
+ "nn.functional.feature_alpha_dropout", "with_train"
+ ), # calls random op
+ skip("nn.functional.fractional_max_pool2d"), # calls random op
+ skip("nn.functional.fractional_max_pool3d"), # calls random op
+ xfail("nn.functional.scaled_dot_product_attention"), # randomness
+ xfail("torch.ops.aten._efficient_attention_forward"), # outputs ints
+ xfail("nn.functional.multi_head_attention_forward"), # randomness
+ # It looks like you're either (1) calling .item() on a Tensor or
+ # (2) attempting to use a Tensor in some data-dependent control flow or
+ # (3) encountering this error in PyTorch internals.
+ xfail("nn.functional.gaussian_nll_loss"),
+ # got a batched tensor as input while the running_mean or running_var,
+ # which will be updated in place, were not batched.
+ xfail("nn.functional.instance_norm"),
+ xfail(
+ "nn.functional.layer_norm"
+ ), # vmap: inplace into a regular tensor
+ # RuntimeError: NYI: querying is_contiguous inside of vmap
+ # for memory_format other than torch.contiguous_formats
+ xfail("nn.functional.max_pool2d"),
+ # RuntimeError: NYI: Tensor.clone(memory_format) inside vmap is only
+ # supported with memory_format torch.preserve_format or
+ # torch.contiguous_format (got ChannelsLast)
+ xfail("nn.functional.max_unpool2d"),
+ # RuntimeError: NYI: Tensor.clone(memory_format) inside vmap is only
+ # supported with memory_format torch.preserve_format
+ # or torch.contiguous_format (got ChannelsLast)s
+ xfail("nn.functional.max_unpool2d", "grad"),
+ xfail(
+ "nn.functional.rrelu"
+ ), # RuntimeError: vmap: we do not yet support aten::rrelu_with_noise.
+ xfail("normal"), # calls random op
+ xfail("normal", "number_mean"), # calls random op
+ xfail("pca_lowrank"), # calls random op
+ # https://github.com/pytorch/pytorch/issues/96560
+ decorate("linalg.pinv", "hermitian", decorator=skipIfRocm),
+ xfail(
+ "quantile", device_type="cpu"
+ ), # Batching rule not implemented for `at::equal`
+ xfail(
+ "scatter_reduce", "prod"
+ ), # vmap (looks like you are calling item/data-dependent)
+ xfail(
+ "sparse.sampled_addmm"
+ ), # RuntimeError: Sparse CSR tensors do not have strides
+ xfail(
+ "sparse.mm", "reduce"
+ ), # RuntimeError: Sparse CSR tensors do not have strides
+ xfail("svd_lowrank"), # calls random op
+ xfail("to"), # rank 4 tensor for channels_last
+ xfail(
+ "view_as_complex"
+ ), # RuntimeError: Tensor must have a last dimension with stride 1
+ # got a batched tensor as input while the running_mean or running_var,
+ # which will be updated in place, were not batched.
+ xfail("nn.functional.batch_norm", "without_cudnn"),
+ # view doesn't work on sparse
+ xfail("to_sparse"),
+ xfail("native_batch_norm"),
+ xfail("_native_batch_norm_legit"),
+ # TODO: implement batching rule
+ xfail("_batch_norm_with_update"),
+ }
+ ),
+ )
@ops(op_db + additional_op_db + autograd_function_db, allowed_dtypes=(torch.float,))
@toleranceOverride({torch.float32: tol(atol=1e-04, rtol=1e-04)})
- @opsToleranceOverride('TestOperators', 'test_vmapvjpvjp', (
- tol1('linalg.svd',
- {torch.float32: tol(atol=1e-03, rtol=5e-04)}),
- tol1('linalg.lu_factor',
- {torch.float32: tol(atol=2e-03, rtol=2e-02)}),
- tol1('svd',
- {torch.float32: tol(atol=1e-03, rtol=5e-04)}),
- tol1('matrix_exp',
- {torch.float32: tol(atol=1e-03, rtol=5e-04)}),
- ))
- @skipOps('TestOperators', 'test_vmapvjpvjp', {
- xfail('as_strided', 'partial_views'),
- })
+ @opsToleranceOverride(
+ "TestOperators",
+ "test_vmapvjpvjp",
+ (
+ tol1("linalg.svd", {torch.float32: tol(atol=1e-03, rtol=5e-04)}),
+ tol1("linalg.lu_factor", {torch.float32: tol(atol=2e-03, rtol=2e-02)}),
+ tol1("svd", {torch.float32: tol(atol=1e-03, rtol=5e-04)}),
+ tol1("matrix_exp", {torch.float32: tol(atol=1e-03, rtol=5e-04)}),
+ ),
+ )
+ @skipOps(
+ "TestOperators",
+ "test_vmapvjpvjp",
+ {
+ xfail("as_strided", "partial_views"),
+ },
+ )
def test_vmapvjpvjp(self, device, dtype, op):
# Since, we test `vjpvjp` independently,
# for this test, we just verify that vmap
@@ -902,93 +1080,121 @@ class TestOperators(TestCase):
is_batch_norm_and_training = is_batch_norm_training(op.name, sample.kwargs)
generator = get_fallback_and_vmap_exhaustive(
- vjp_of_vjp, args_and_cotangents, {}, is_batch_norm_and_training=is_batch_norm_and_training)
+ vjp_of_vjp,
+ args_and_cotangents,
+ {},
+ is_batch_norm_and_training=is_batch_norm_and_training,
+ )
for loop_out, batched_out in generator:
self.assertEqual(loop_out, batched_out)
- vmapvjp_fail = vjp_fail.union({
- # -------------------- ALLOWED FAILURES --------------------------------
- # The following are not bugs and are expected behavior
- xfail('masked_select'), # Not possible due to dynamic shapes
- skip('bernoulli'), # randomness
- skip('normal', ''), # randomness
- skip('normal', 'number_mean'), # randomness
- skip('nn.functional.rrelu'), # randomness
- skip('nn.functional.feature_alpha_dropout', 'with_train'), # randomness
- skip('nn.functional.feature_alpha_dropout', 'without_train'), # randomness
- skip('nn.functional.dropout'), # randomness
- skip('nn.functional.dropout2d'), # randomness
- skip('nn.functional.dropout3d', ''), # randomness
- skip('nn.functional.alpha_dropout'), # randomness
- skip('nn.functional.scaled_dot_product_attention'), # randomness
- xfail('torch.ops.aten._efficient_attention_forward'), # outputs ints
- skip('nn.functional.multi_head_attention_forward'), # randomness
- xfail('index_put', ''), # not possible due to dynamic shapes; we support a subset
- xfail('nn.functional.fractional_max_pool2d'), # random
- xfail('nn.functional.fractional_max_pool3d'), # random
- xfail('pca_lowrank', ''), # randomness
- xfail('svd_lowrank', ''), # randomness
- xfail('to_sparse', ''), # non-dense output
- skip('to'), # RuntimeError: required rank 4 tensor to use channels_last format
- xfail('as_strided', 'partial_views'),
- xfail("NumpyCubeNotComposableAutogradFunction"), # Not composable autograd.Function
- # ----------------------------------------------------------------------
-
- # ---------------------------- BUGS ------------------------------------
- # All of the following are bugs and need to be fixed
- skip('linalg.svdvals'), # # really annoying thing where it passes correctness check but not has_batch_rule
- skip("native_batch_norm"),
- skip("_native_batch_norm_legit"),
- # TODO: implement batching rule
- skip("_batch_norm_with_update"),
- xfail('__getitem__', ''), # dynamic error
- xfail('nanquantile', device_type='cpu'), # checks q via a .item() call
- xfail('nn.functional.gaussian_nll_loss'), # checks var for if any value < 0
- xfail('narrow'), # .item() call
- xfail('quantile', device_type='cpu'), # checks q via a .item() call
- xfail('view_as_complex'), # Tensor must have a last dimension with stride 1
-
- # required rank 4 tensor to use channels_last format
- xfail('bfloat16'),
- xfail('double'),
- xfail('float'),
- xfail('half'),
- xfail('cdouble', ''),
- xfail('cfloat', ''),
- xfail('chalf', ''),
-
- xfail('scatter_reduce', 'prod'), # item call
-
- # Batching rule not implemented for aten::_use_cudnn_ctc_loss.Tensor
- xfail('nn.functional.ctc_loss', device_type='cuda'),
- # NYI: querying is_contiguous inside of vmap for memory_format other than torch.contiguous_format
- xfail('nn.functional.max_unpool2d'),
- xfail('nn.functional.max_unpool2d', 'grad'),
-
- xfail('sparse.sampled_addmm', ''),
- xfail('sparse.mm', 'reduce'),
- xfail('as_strided_scatter', ''), # calls as_strided
- xfail('index_reduce', ''), # .item() call
- # ---------------------------------------------------------------------
- })
+ vmapvjp_fail = vjp_fail.union(
+ {
+ # -------------------- ALLOWED FAILURES --------------------------------
+ # The following are not bugs and are expected behavior
+ xfail("masked_select"), # Not possible due to dynamic shapes
+ skip("bernoulli"), # randomness
+ skip("normal", ""), # randomness
+ skip("normal", "number_mean"), # randomness
+ skip("nn.functional.rrelu"), # randomness
+ skip("nn.functional.feature_alpha_dropout", "with_train"), # randomness
+ skip("nn.functional.feature_alpha_dropout", "without_train"), # randomness
+ skip("nn.functional.dropout"), # randomness
+ skip("nn.functional.dropout2d"), # randomness
+ skip("nn.functional.dropout3d", ""), # randomness
+ skip("nn.functional.alpha_dropout"), # randomness
+ skip("nn.functional.scaled_dot_product_attention"), # randomness
+ xfail("torch.ops.aten._efficient_attention_forward"), # outputs ints
+ skip("nn.functional.multi_head_attention_forward"), # randomness
+ xfail(
+ "index_put", ""
+ ), # not possible due to dynamic shapes; we support a subset
+ xfail("nn.functional.fractional_max_pool2d"), # random
+ xfail("nn.functional.fractional_max_pool3d"), # random
+ xfail("pca_lowrank", ""), # randomness
+ xfail("svd_lowrank", ""), # randomness
+ xfail("to_sparse", ""), # non-dense output
+ skip(
+ "to"
+ ), # RuntimeError: required rank 4 tensor to use channels_last format
+ xfail("as_strided", "partial_views"),
+ xfail(
+ "NumpyCubeNotComposableAutogradFunction"
+ ), # Not composable autograd.Function
+ # ----------------------------------------------------------------------
+ # ---------------------------- BUGS ------------------------------------
+ # All of the following are bugs and need to be fixed
+ skip(
+ "linalg.svdvals"
+ ), # # really annoying thing where it passes correctness check but not has_batch_rule
+ skip("native_batch_norm"),
+ skip("_native_batch_norm_legit"),
+ # TODO: implement batching rule
+ skip("_batch_norm_with_update"),
+ xfail("__getitem__", ""), # dynamic error
+ xfail("nanquantile", device_type="cpu"), # checks q via a .item() call
+ xfail("nn.functional.gaussian_nll_loss"), # checks var for if any value < 0
+ xfail("narrow"), # .item() call
+ xfail("quantile", device_type="cpu"), # checks q via a .item() call
+ xfail("view_as_complex"), # Tensor must have a last dimension with stride 1
+ # required rank 4 tensor to use channels_last format
+ xfail("bfloat16"),
+ xfail("double"),
+ xfail("float"),
+ xfail("half"),
+ xfail("cdouble", ""),
+ xfail("cfloat", ""),
+ xfail("chalf", ""),
+ xfail("scatter_reduce", "prod"), # item call
+ # Batching rule not implemented for aten::_use_cudnn_ctc_loss.Tensor
+ xfail("nn.functional.ctc_loss", device_type="cuda"),
+ # NYI: querying is_contiguous inside of vmap for memory_format other than torch.contiguous_format
+ xfail("nn.functional.max_unpool2d"),
+ xfail("nn.functional.max_unpool2d", "grad"),
+ xfail("sparse.sampled_addmm", ""),
+ xfail("sparse.mm", "reduce"),
+ xfail("as_strided_scatter", ""), # calls as_strided
+ xfail("index_reduce", ""), # .item() call
+ # ---------------------------------------------------------------------
+ }
+ )
@with_tf32_off # https://github.com/pytorch/pytorch/issues/86798
@ops(op_db + additional_op_db + autograd_function_db, allowed_dtypes=(torch.float,))
@toleranceOverride({torch.float32: tol(atol=1e-04, rtol=1e-04)})
- @opsToleranceOverride('TestOperators', 'test_vmapvjp', (
- tol1('linalg.svd',
- {torch.float32: tol(atol=5e-04, rtol=1e-04)}, device_type="cuda"),
- tol1('svd',
- {torch.float32: tol(atol=5e-04, rtol=1e-04)}, device_type="cuda"),
- tol1('linalg.householder_product',
- {torch.float32: tol(atol=1e-04, rtol=1e-04)}),
- tol1('matrix_exp',
- {torch.float32: tol(atol=5e-04, rtol=1e-04)}, device_type="cuda"),
- ))
- @skipOps('TestOperators', 'test_vmapvjp', vmapvjp_fail.union({
- xfail('as_strided'),
- xfail('as_strided', 'partial_views'),
- }))
+ @opsToleranceOverride(
+ "TestOperators",
+ "test_vmapvjp",
+ (
+ tol1(
+ "linalg.svd",
+ {torch.float32: tol(atol=5e-04, rtol=1e-04)},
+ device_type="cuda",
+ ),
+ tol1(
+ "svd", {torch.float32: tol(atol=5e-04, rtol=1e-04)}, device_type="cuda"
+ ),
+ tol1(
+ "linalg.householder_product",
+ {torch.float32: tol(atol=1e-04, rtol=1e-04)},
+ ),
+ tol1(
+ "matrix_exp",
+ {torch.float32: tol(atol=5e-04, rtol=1e-04)},
+ device_type="cuda",
+ ),
+ ),
+ )
+ @skipOps(
+ "TestOperators",
+ "test_vmapvjp",
+ vmapvjp_fail.union(
+ {
+ xfail("as_strided"),
+ xfail("as_strided", "partial_views"),
+ }
+ ),
+ )
def test_vmapvjp(self, device, dtype, op):
if not op.supports_autograd:
self.skipTest("Skipped! Autograd not supported.")
@@ -1005,87 +1211,114 @@ class TestOperators(TestCase):
fn, args = get_vjp_fn_and_args_with_cotangents(op, sample, cotangents)
is_batch_norm_and_training = is_batch_norm_training(op.name, sample.kwargs)
generator = get_fallback_and_vmap_exhaustive(
- fn, args, {}, is_batch_norm_and_training=is_batch_norm_and_training)
+ fn, args, {}, is_batch_norm_and_training=is_batch_norm_and_training
+ )
for loop_out, batched_out in generator:
self.assertEqual(loop_out, batched_out)
vmapjvpall_fail = {
# -------------------- ALLOWED FAILURES --------------------------------
# The following are expected (not a bug)
- skip('bernoulli', ''), # randomness
- skip('nn.functional.dropout'), # randomness
- skip('nn.functional.rrelu'), # randomness
- skip('nn.functional.dropout2d', ''),
- skip('nn.functional.dropout3d', ''),
- skip('nn.functional.scaled_dot_product_attention'), # randomness
- xfail('torch.ops.aten._efficient_attention_forward'), # outputs ints
- skip('nn.functional.multi_head_attention_forward'), # randomness
- skip('nn.functional.alpha_dropout'), # randomness
- skip('nn.functional.feature_alpha_dropout', 'without_train'),
- skip('nn.functional.feature_alpha_dropout', 'with_train'),
- xfail('nn.functional.fractional_max_pool2d'), # Cannot access data pointer of Tensor that doesn't have storage
- xfail('nn.functional.fractional_max_pool3d'), # Cannot access data pointer of Tensor that doesn't have storage
+ skip("bernoulli", ""), # randomness
+ skip("nn.functional.dropout"), # randomness
+ skip("nn.functional.rrelu"), # randomness
+ skip("nn.functional.dropout2d", ""),
+ skip("nn.functional.dropout3d", ""),
+ skip("nn.functional.scaled_dot_product_attention"), # randomness
+ xfail("torch.ops.aten._efficient_attention_forward"), # outputs ints
+ skip("nn.functional.multi_head_attention_forward"), # randomness
+ skip("nn.functional.alpha_dropout"), # randomness
+ skip("nn.functional.feature_alpha_dropout", "without_train"),
+ skip("nn.functional.feature_alpha_dropout", "with_train"),
+ xfail(
+ "nn.functional.fractional_max_pool2d"
+ ), # Cannot access data pointer of Tensor that doesn't have storage
+ xfail(
+ "nn.functional.fractional_max_pool3d"
+ ), # Cannot access data pointer of Tensor that doesn't have storage
# Not actually a problem: embedding with max_norm mutates the weight
# and causes different runs to produce different results.
# skip because this is flaky depending on what the max_norm is!
- skip('nn.functional.embedding', ''),
- skip('to'), # RuntimeError: required rank 4 tensor to use channels_last format
- xfail('NumpyExpMarkDirtyAutogradFunction'), # vmap: inplace into a regular tensor
+ skip("nn.functional.embedding", ""),
+ skip("to"), # RuntimeError: required rank 4 tensor to use channels_last format
+ xfail(
+ "NumpyExpMarkDirtyAutogradFunction"
+ ), # vmap: inplace into a regular tensor
# ----------------------------------------------------------------------
-
# ---------------------------- BUGS ------------------------------------
# The following are bugs that we should fix
- xfail('masked.mean'), # silent incorrectness (nan difference)
- xfail('as_strided', 'partial_views'), # Tensor-likes are not close!
-
- xfail('nn.functional.soft_margin_loss', ''), # soft_margin_loss_backward does not support forward-ad
- xfail('tensor_split'), # data_ptr composite compliance
- xfail('quantile'), # at::equal batching rule (cpu), also, in-place vmap (cuda)
- skip('as_strided'), # Test runner cannot handle this
+ xfail("masked.mean"), # silent incorrectness (nan difference)
+ xfail("as_strided", "partial_views"), # Tensor-likes are not close!
+ xfail(
+ "nn.functional.soft_margin_loss", ""
+ ), # soft_margin_loss_backward does not support forward-ad
+ xfail("tensor_split"), # data_ptr composite compliance
+ xfail("quantile"), # at::equal batching rule (cpu), also, in-place vmap (cuda)
+ skip("as_strided"), # Test runner cannot handle this
# requires special handling, and does not yet have a batching rule. Feel free to file a github issue!
- xfail('as_strided_scatter'),
- xfail('nn.functional.gaussian_nll_loss'), # .item or data-dependent control flow
- xfail('scatter'), # forward-mode AD does not support at::scatter
- xfail('nanquantile'), # at::equal batching rule (cpu), also, in-place vmap (cuda)
- xfail('view_as_complex'), # Tensor must have a last dimension with stride 1
-
- skip('pca_lowrank', ''), # randomness
- skip('svd_lowrank', ''), # randomness
-
- xfail('double'), # required rank 4 tensor to use channels_last format
- xfail('cdouble'), # required rank 4 tensor to use channels_last format
-
+ xfail("as_strided_scatter"),
+ xfail(
+ "nn.functional.gaussian_nll_loss"
+ ), # .item or data-dependent control flow
+ xfail("scatter"), # forward-mode AD does not support at::scatter
+ xfail(
+ "nanquantile"
+ ), # at::equal batching rule (cpu), also, in-place vmap (cuda)
+ xfail("view_as_complex"), # Tensor must have a last dimension with stride 1
+ skip("pca_lowrank", ""), # randomness
+ skip("svd_lowrank", ""), # randomness
+ xfail("double"), # required rank 4 tensor to use channels_last format
+ xfail("cdouble"), # required rank 4 tensor to use channels_last format
# potential silent incorrectness
- skip('nn.functional.max_unpool1d'), # Flaky, seems to sometimes his max_unpool2d
- skip('nn.functional.max_unpool2d'), # fails everywhere except on mac
- skip('nn.functional.max_unpool3d'), # fails everywhere except on mac
-
+ skip(
+ "nn.functional.max_unpool1d"
+ ), # Flaky, seems to sometimes his max_unpool2d
+ skip("nn.functional.max_unpool2d"), # fails everywhere except on mac
+ skip("nn.functional.max_unpool3d"), # fails everywhere except on mac
# erroring because running_mean and running_var aren't differentiable
- xfail('nn.functional.batch_norm'),
- xfail('nn.functional.batch_norm', 'without_cudnn'),
+ xfail("nn.functional.batch_norm"),
+ xfail("nn.functional.batch_norm", "without_cudnn"),
xfail("native_batch_norm"),
xfail("_native_batch_norm_legit"),
# TODO: implement batching rule
xfail("_batch_norm_with_update"),
-
# https://github.com/pytorch/pytorch/issues/96560
# ROCm: NotImplementedError
- decorate('nn.functional.instance_norm', decorator=skipIfRocm),
+ decorate("nn.functional.instance_norm", decorator=skipIfRocm),
# ----------------------------------------------------------------------
}
@with_tf32_off # https://github.com/pytorch/pytorch/issues/86798
@ops(op_db + additional_op_db + autograd_function_db, allowed_dtypes=(torch.float,))
@toleranceOverride({torch.float32: tol(atol=1e-04, rtol=1e-04)})
- @opsToleranceOverride('TestOperators', 'test_vmapjvpall', (
- tol1('nn.functional.conv_transpose3d',
- {torch.float32: tol(atol=2e-04, rtol=9e-3)}, device_type='cuda'),
- tol1('linalg.householder_product',
- {torch.float32: tol(atol=2e-04, rtol=9e-3)}),
- ))
- @skipOps('TestOperators', 'test_vmapjvpall', vmapjvpall_fail.union({
- decorate('linalg.det', 'singular', decorator=expectedFailureIf(IS_MACOS and IS_X86)),
- }))
+ @opsToleranceOverride(
+ "TestOperators",
+ "test_vmapjvpall",
+ (
+ tol1(
+ "nn.functional.conv_transpose3d",
+ {torch.float32: tol(atol=2e-04, rtol=9e-3)},
+ device_type="cuda",
+ ),
+ tol1(
+ "linalg.householder_product",
+ {torch.float32: tol(atol=2e-04, rtol=9e-3)},
+ ),
+ ),
+ )
+ @skipOps(
+ "TestOperators",
+ "test_vmapjvpall",
+ vmapjvpall_fail.union(
+ {
+ decorate(
+ "linalg.det",
+ "singular",
+ decorator=expectedFailureIf(IS_MACOS and IS_X86),
+ ),
+ }
+ ),
+ )
# This is technically a superset of test_vmapjvp. We should either delete test_vmapjvp
# or figure out if we can split vmapjvpall. It's useful to keep test_vmapjvp intact
# because that corresponds to "batched forward-mode AD" testing in PyTorch core
@@ -1108,46 +1341,65 @@ class TestOperators(TestCase):
fn, args = get_jvp_variant_primals_tangents(op, sample)
is_batch_norm_and_training = is_batch_norm_training(op.name, kwarg_values)
generator = get_fallback_and_vmap_exhaustive(
- fn, args, {}, is_batch_norm_and_training=is_batch_norm_and_training)
+ fn, args, {}, is_batch_norm_and_training=is_batch_norm_and_training
+ )
for loop_out, batched_out in generator:
self.assertEqual(loop_out, batched_out)
@ops(op_db + additional_op_db + autograd_function_db, allowed_dtypes=(torch.float,))
- @skipOps('TestOperators', 'test_vmapjvpall_has_batch_rule', vmapjvpall_fail.union({
- skip('to'), # RuntimeError: required rank 4 tensor to use channels_last format
- xfail('cdouble'), # RuntimeError: required rank 4 tensor to use channels_last format
- xfail('cumprod'),
- xfail('masked_fill'),
- xfail('fill'),
- skip('masked.mean'), # ???
- xfail('masked_scatter'),
- xfail('put'),
- xfail('take'),
- xfail('nn.functional.feature_alpha_dropout', 'without_train'),
- xfail('nn.functional.dropout2d', ''),
- xfail('pca_lowrank', ''),
- xfail('svd_lowrank', ''),
- xfail('nn.functional.feature_alpha_dropout', 'with_train'),
- xfail('special.log_ndtr', ''),
- xfail('fft.ihfft2'), # conj_physical fallback
- xfail('fft.ihfftn'), # conj_physical fallback
- xfail('nn.functional.max_unpool3d', 'grad'),
- xfail('nn.functional.max_unpool2d', 'grad'),
- xfail('nn.functional.soft_margin_loss', ''),
- xfail('nn.functional.max_unpool1d', 'grad'),
- xfail('nn.functional.embedding', ''),
- xfail('scatter_reduce', "sum"), # aten::scatter_reduce.two hit the vmap fallback
- xfail('scatter_reduce', "mean"), # aten::scatter_reduce.two hit the vmap fallback
- xfail('scatter_reduce', "amin"), # aten::scatter_reduce.two hit the vmap fallback
- xfail('scatter_reduce', "amax"), # aten::scatter_reduce.two hit the vmap fallback
- xfail('nn.functional.glu'),
- xfail('nn.functional.bilinear'), # trilinear doesn't have batching rule
- xfail('linalg.lu', ''),
- xfail('nn.functional.dropout3d', ''),
- xfail('as_strided_scatter', ''),
- xfail('masked.cumprod', ''),
- xfail("renorm"), # hit vmap fallback, which is disabled
- }))
+ @skipOps(
+ "TestOperators",
+ "test_vmapjvpall_has_batch_rule",
+ vmapjvpall_fail.union(
+ {
+ skip(
+ "to"
+ ), # RuntimeError: required rank 4 tensor to use channels_last format
+ xfail(
+ "cdouble"
+ ), # RuntimeError: required rank 4 tensor to use channels_last format
+ xfail("cumprod"),
+ xfail("masked_fill"),
+ xfail("fill"),
+ skip("masked.mean"), # ???
+ xfail("masked_scatter"),
+ xfail("put"),
+ xfail("take"),
+ xfail("nn.functional.feature_alpha_dropout", "without_train"),
+ xfail("nn.functional.dropout2d", ""),
+ xfail("pca_lowrank", ""),
+ xfail("svd_lowrank", ""),
+ xfail("nn.functional.feature_alpha_dropout", "with_train"),
+ xfail("special.log_ndtr", ""),
+ xfail("fft.ihfft2"), # conj_physical fallback
+ xfail("fft.ihfftn"), # conj_physical fallback
+ xfail("nn.functional.max_unpool3d", "grad"),
+ xfail("nn.functional.max_unpool2d", "grad"),
+ xfail("nn.functional.soft_margin_loss", ""),
+ xfail("nn.functional.max_unpool1d", "grad"),
+ xfail("nn.functional.embedding", ""),
+ xfail(
+ "scatter_reduce", "sum"
+ ), # aten::scatter_reduce.two hit the vmap fallback
+ xfail(
+ "scatter_reduce", "mean"
+ ), # aten::scatter_reduce.two hit the vmap fallback
+ xfail(
+ "scatter_reduce", "amin"
+ ), # aten::scatter_reduce.two hit the vmap fallback
+ xfail(
+ "scatter_reduce", "amax"
+ ), # aten::scatter_reduce.two hit the vmap fallback
+ xfail("nn.functional.glu"),
+ xfail("nn.functional.bilinear"), # trilinear doesn't have batching rule
+ xfail("linalg.lu", ""),
+ xfail("nn.functional.dropout3d", ""),
+ xfail("as_strided_scatter", ""),
+ xfail("masked.cumprod", ""),
+ xfail("renorm"), # hit vmap fallback, which is disabled
+ }
+ ),
+ )
@toleranceOverride({torch.float32: tol(atol=1e-04, rtol=1e-04)})
def test_vmapjvpall_has_batch_rule(self, device, dtype, op):
if is_inplace(op, op.get_op()):
@@ -1167,85 +1419,112 @@ class TestOperators(TestCase):
kwarg_values = sample.kwargs
args = tuple(arg_values) + tuple(kwarg_values)
fn, args = get_jvp_variant_primals_tangents(op, sample)
- is_batch_norm_and_training = is_batch_norm_training(op.name, kwarg_values)
+ is_batch_norm_and_training = is_batch_norm_training(
+ op.name, kwarg_values
+ )
for loop_out, batched_out in get_fallback_and_vmap_exhaustive(
- fn, args, {}, is_batch_norm_and_training=is_batch_norm_and_training, compute_loop_out=False):
+ fn,
+ args,
+ {},
+ is_batch_norm_and_training=is_batch_norm_and_training,
+ compute_loop_out=False,
+ ):
pass
+
check_vmap_fallback(self, test, op, dry_run=False)
@ops(op_db + additional_op_db + autograd_function_db, allowed_dtypes=(torch.float,))
@toleranceOverride({torch.float32: tol(atol=1e-04, rtol=1e-04)})
- @skipOps('TestOperators', 'test_vmapvjp_has_batch_rule', vmapvjp_fail.union({
- skip('to'), # RuntimeError: required rank 4 tensor to use channels_last format
- xfail('view_as_complex'),
- xfail('cummax'),
- xfail('cummin'),
- xfail('fill'),
- xfail('narrow'), # Batching rule not implemented for `narrow.Tensor` (and view op)
- xfail('special.log_ndtr'),
- xfail('linalg.householder_product'),
- xfail('masked_fill'),
- xfail('masked_scatter'),
- xfail('masked_select'),
- xfail('nanquantile'),
- xfail('ormqr'),
- xfail('put'),
- xfail('scatter_reduce', "sum"), # aten::scatter_reduce.two hit the vmap fallback
- xfail('scatter_reduce', "mean"), # aten::scatter_reduce.two hit the vmap fallback
- xfail('scatter_reduce', "amin"), # aten::scatter_reduce.two hit the vmap fallback
- xfail('scatter_reduce', "amax"), # aten::scatter_reduce.two hit the vmap fallback
- xfail('quantile'),
- xfail('renorm'),
- xfail('take'),
- xfail('tensor_split'),
- xfail('to_sparse'),
- xfail('unfold'),
- xfail('unfold_copy'),
- xfail('nn.functional.dropout'),
- xfail('fft.ihfft2'),
- xfail('fft.ihfftn'),
- xfail('nn.functional.gaussian_nll_loss'),
- xfail('nn.functional.bilinear'),
- xfail('nn.functional.fractional_max_pool3d'),
- xfail('nn.functional.ctc_loss'),
- xfail('nn.functional.rrelu'),
- xfail('nn.functional.embedding_bag'),
- xfail('nn.functional.fractional_max_pool2d'),
- xfail('nn.functional.feature_alpha_dropout', 'with_train'),
- xfail('pca_lowrank', ''),
- xfail('nn.functional.dropout2d', ''),
- xfail('nn.functional.feature_alpha_dropout', 'without_train'),
- xfail('svd_lowrank', ''),
-
- xfail('nn.functional.max_unpool2d', ''),
- xfail('nn.functional.multi_margin_loss', ''),
- xfail('nn.functional.multilabel_margin_loss', ''),
- xfail('nn.functional.pdist', ''),
- xfail('scatter_reduce', 'prod'),
- xfail('nn.functional.max_unpool1d', ''),
- xfail('nn.functional.max_unpool3d', ''),
- xfail('nn.functional.max_unpool3d', 'grad'),
- xfail('nn.functional.soft_margin_loss', ''),
- xfail('nn.functional.max_unpool1d', 'grad'),
- xfail('nn.functional.max_unpool2d', 'grad'),
- xfail('linalg.lu', ''),
- xfail('cdouble', ''),
- xfail('cfloat', ''),
- xfail('chalf', ''),
- xfail('index_reduce', ''),
- xfail('nn.functional.dropout3d', ''),
- xfail('as_strided_scatter', ''),
- xfail('_segment_reduce', 'offsets'),
- xfail('_segment_reduce', 'lengths'),
- xfail('sparse.sampled_addmm', ''),
- xfail('sparse.mm', 'reduce'),
- xfail("native_batch_norm"),
- xfail("_native_batch_norm_legit"),
- # TODO: implement batching rule
- xfail("_batch_norm_with_update"),
- xfail("native_dropout_backward"),
- xfail("index_fill"), # aten::_unique hit the vmap fallback which is currently disabled
- }))
+ @skipOps(
+ "TestOperators",
+ "test_vmapvjp_has_batch_rule",
+ vmapvjp_fail.union(
+ {
+ skip(
+ "to"
+ ), # RuntimeError: required rank 4 tensor to use channels_last format
+ xfail("view_as_complex"),
+ xfail("cummax"),
+ xfail("cummin"),
+ xfail("fill"),
+ xfail(
+ "narrow"
+ ), # Batching rule not implemented for `narrow.Tensor` (and view op)
+ xfail("special.log_ndtr"),
+ xfail("linalg.householder_product"),
+ xfail("masked_fill"),
+ xfail("masked_scatter"),
+ xfail("masked_select"),
+ xfail("nanquantile"),
+ xfail("ormqr"),
+ xfail("put"),
+ xfail(
+ "scatter_reduce", "sum"
+ ), # aten::scatter_reduce.two hit the vmap fallback
+ xfail(
+ "scatter_reduce", "mean"
+ ), # aten::scatter_reduce.two hit the vmap fallback
+ xfail(
+ "scatter_reduce", "amin"
+ ), # aten::scatter_reduce.two hit the vmap fallback
+ xfail(
+ "scatter_reduce", "amax"
+ ), # aten::scatter_reduce.two hit the vmap fallback
+ xfail("quantile"),
+ xfail("renorm"),
+ xfail("take"),
+ xfail("tensor_split"),
+ xfail("to_sparse"),
+ xfail("unfold"),
+ xfail("unfold_copy"),
+ xfail("nn.functional.dropout"),
+ xfail("fft.ihfft2"),
+ xfail("fft.ihfftn"),
+ xfail("nn.functional.gaussian_nll_loss"),
+ xfail("nn.functional.bilinear"),
+ xfail("nn.functional.fractional_max_pool3d"),
+ xfail("nn.functional.ctc_loss"),
+ xfail("nn.functional.rrelu"),
+ xfail("nn.functional.embedding_bag"),
+ xfail("nn.functional.fractional_max_pool2d"),
+ xfail("nn.functional.feature_alpha_dropout", "with_train"),
+ xfail("pca_lowrank", ""),
+ xfail("nn.functional.dropout2d", ""),
+ xfail("nn.functional.feature_alpha_dropout", "without_train"),
+ xfail("svd_lowrank", ""),
+ xfail("nn.functional.max_unpool2d", ""),
+ xfail("nn.functional.multi_margin_loss", ""),
+ xfail("nn.functional.multilabel_margin_loss", ""),
+ xfail("nn.functional.pdist", ""),
+ xfail("scatter_reduce", "prod"),
+ xfail("nn.functional.max_unpool1d", ""),
+ xfail("nn.functional.max_unpool3d", ""),
+ xfail("nn.functional.max_unpool3d", "grad"),
+ xfail("nn.functional.soft_margin_loss", ""),
+ xfail("nn.functional.max_unpool1d", "grad"),
+ xfail("nn.functional.max_unpool2d", "grad"),
+ xfail("linalg.lu", ""),
+ xfail("cdouble", ""),
+ xfail("cfloat", ""),
+ xfail("chalf", ""),
+ xfail("index_reduce", ""),
+ xfail("nn.functional.dropout3d", ""),
+ xfail("as_strided_scatter", ""),
+ xfail("_segment_reduce", "offsets"),
+ xfail("_segment_reduce", "lengths"),
+ xfail("sparse.sampled_addmm", ""),
+ xfail("sparse.mm", "reduce"),
+ xfail("native_batch_norm"),
+ xfail("_native_batch_norm_legit"),
+ # TODO: implement batching rule
+ xfail("_batch_norm_with_update"),
+ xfail("native_dropout_backward"),
+ xfail(
+ "index_fill"
+ ), # aten::_unique hit the vmap fallback which is currently disabled
+ }
+ ),
+ )
def test_vmapvjp_has_batch_rule(self, device, dtype, op):
if not op.supports_autograd:
self.skipTest("Skipped! Autograd not supported.")
@@ -1262,73 +1541,106 @@ class TestOperators(TestCase):
for sample in samples:
cotangents = get_sample_cotangents(op, sample)
fn, args = get_vjp_fn_and_args_with_cotangents(op, sample, cotangents)
- is_batch_norm_and_training = is_batch_norm_training(op.name, sample.kwargs)
+ is_batch_norm_and_training = is_batch_norm_training(
+ op.name, sample.kwargs
+ )
for loop_out, batched_out in get_fallback_and_vmap_exhaustive(
- fn, args, {}, is_batch_norm_and_training=is_batch_norm_and_training, compute_loop_out=False):
+ fn,
+ args,
+ {},
+ is_batch_norm_and_training=is_batch_norm_and_training,
+ compute_loop_out=False,
+ ):
pass
for a_op in op.aliases:
- fn, args = get_vjp_fn_and_args_with_cotangents(a_op, sample, cotangents)
+ fn, args = get_vjp_fn_and_args_with_cotangents(
+ a_op, sample, cotangents
+ )
for loop_out, batched_out in get_fallback_and_vmap_exhaustive(
- fn, args, {}, is_batch_norm_and_training=is_batch_norm_and_training, compute_loop_out=False):
+ fn,
+ args,
+ {},
+ is_batch_norm_and_training=is_batch_norm_and_training,
+ compute_loop_out=False,
+ ):
pass
check_vmap_fallback(self, test, op, dry_run=False)
@ops(op_db + additional_op_db + autograd_function_db, allowed_dtypes=(torch.float,))
- @skipOps('TestOperators', 'test_vjpvmap', vjp_fail.union({
- skip('bernoulli', ''), # vjpvmap testing can't handle randomness
- skip('normal', ''), # vjpvmap testing can't handle randomness
- skip('normal', 'number_mean'), # vjpvmap testing can't handle randomness
- skip('nn.functional.rrelu'), # randomness
- skip('nn.functional.feature_alpha_dropout', 'with_train'), # randomness
- skip('nn.functional.feature_alpha_dropout', 'without_train'), # randomness
- skip('nn.functional.scaled_dot_product_attention'),
- xfail('torch.ops.aten._efficient_attention_forward'), # outputs ints
- skip('nn.functional.multi_head_attention_forward'), # randomness
- skip('nn.functional.alpha_dropout'), # randomness
- skip('to'), # RuntimeError: required rank 4 tensor to use channels_last format
- skip('to_sparse', ''), # non-dense output
- skip('ormqr', ''), # takes too long
- xfail("NumpyCubeNotComposableAutogradFunction"), # Not composable autograd.Function
-
- # fallback path doesn't work
- # All of the following are bugs and need to be fixed
- xfail('__getitem__', ''),
- xfail('index_put', ''),
- xfail('view_as_complex'),
- xfail('nn.functional.gaussian_nll_loss'),
- xfail('masked_select'),
- xfail('narrow'), # Batching rule not implemented for `narrow.Tensor` (and view op)
- skip('nn.functional.fractional_max_pool3d'), # generator works on cpu, fails on cuda
- skip('nn.functional.fractional_max_pool2d'), # generator works on cpu, fails on cuda
- xfail('column_stack', ''),
- xfail('nn.functional.dropout2d', ''),
- xfail('svd_lowrank', ''),
- xfail('pca_lowrank', ''),
- xfail('clamp'),
- # something weird happening with channels_last
- xfail('bfloat16'),
- xfail('double'),
- xfail('float'),
- xfail('half'),
- xfail('cdouble'),
- xfail('cfloat'),
- xfail('nn.functional.dropout3d', ''),
- xfail('as_strided_scatter', ''),
- xfail('sparse.sampled_addmm', ''),
- xfail('sparse.mm', 'reduce'),
- xfail("native_batch_norm"),
- xfail("_native_batch_norm_legit"),
- # TODO: implement batching rule
- xfail("_batch_norm_with_update"),
- xfail('as_strided', 'partial_views'),
- }))
+ @skipOps(
+ "TestOperators",
+ "test_vjpvmap",
+ vjp_fail.union(
+ {
+ skip("bernoulli", ""), # vjpvmap testing can't handle randomness
+ skip("normal", ""), # vjpvmap testing can't handle randomness
+ skip(
+ "normal", "number_mean"
+ ), # vjpvmap testing can't handle randomness
+ skip("nn.functional.rrelu"), # randomness
+ skip("nn.functional.feature_alpha_dropout", "with_train"), # randomness
+ skip(
+ "nn.functional.feature_alpha_dropout", "without_train"
+ ), # randomness
+ skip("nn.functional.scaled_dot_product_attention"),
+ xfail("torch.ops.aten._efficient_attention_forward"), # outputs ints
+ skip("nn.functional.multi_head_attention_forward"), # randomness
+ skip("nn.functional.alpha_dropout"), # randomness
+ skip(
+ "to"
+ ), # RuntimeError: required rank 4 tensor to use channels_last format
+ skip("to_sparse", ""), # non-dense output
+ skip("ormqr", ""), # takes too long
+ xfail(
+ "NumpyCubeNotComposableAutogradFunction"
+ ), # Not composable autograd.Function
+ # fallback path doesn't work
+ # All of the following are bugs and need to be fixed
+ xfail("__getitem__", ""),
+ xfail("index_put", ""),
+ xfail("view_as_complex"),
+ xfail("nn.functional.gaussian_nll_loss"),
+ xfail("masked_select"),
+ xfail(
+ "narrow"
+ ), # Batching rule not implemented for `narrow.Tensor` (and view op)
+ skip(
+ "nn.functional.fractional_max_pool3d"
+ ), # generator works on cpu, fails on cuda
+ skip(
+ "nn.functional.fractional_max_pool2d"
+ ), # generator works on cpu, fails on cuda
+ xfail("column_stack", ""),
+ xfail("nn.functional.dropout2d", ""),
+ xfail("svd_lowrank", ""),
+ xfail("pca_lowrank", ""),
+ xfail("clamp"),
+ # something weird happening with channels_last
+ xfail("bfloat16"),
+ xfail("double"),
+ xfail("float"),
+ xfail("half"),
+ xfail("cdouble"),
+ xfail("cfloat"),
+ xfail("nn.functional.dropout3d", ""),
+ xfail("as_strided_scatter", ""),
+ xfail("sparse.sampled_addmm", ""),
+ xfail("sparse.mm", "reduce"),
+ xfail("native_batch_norm"),
+ xfail("_native_batch_norm_legit"),
+ # TODO: implement batching rule
+ xfail("_batch_norm_with_update"),
+ xfail("as_strided", "partial_views"),
+ }
+ ),
+ )
def test_vjpvmap(self, device, dtype, op):
# NB: there is no vjpvmap_has_batch_rule test because that is almost
# certainly redundant with the vmap_has_batch_rule test in test_vmap.py
# one-off skip
- if op.name == 'nn.functional.dropout':
+ if op.name == "nn.functional.dropout":
self.skipTest("Skipped!")
if not op.supports_autograd:
@@ -1342,21 +1654,28 @@ class TestOperators(TestCase):
return
samples = op.sample_inputs(device, dtype, requires_grad=True)
- batch_norm_fns = ("nn.functional.batch_norm", "nn.functional.instance_norm") # instance norm calls batch norm
+ batch_norm_fns = (
+ "nn.functional.batch_norm",
+ "nn.functional.instance_norm",
+ ) # instance norm calls batch norm
is_batch_norm = op.name in batch_norm_fns
for sample in samples:
args = [sample.input] + list(sample.args)
kwargs = sample.kwargs
- is_batch_norm_and_training = is_batch_norm and is_batch_norm_training(op.name, kwargs)
- generator = generate_vmap_inputs(args, kwargs,
- is_batch_norm_and_training=is_batch_norm_and_training)
+ is_batch_norm_and_training = is_batch_norm and is_batch_norm_training(
+ op.name, kwargs
+ )
+ generator = generate_vmap_inputs(
+ args, kwargs, is_batch_norm_and_training=is_batch_norm_and_training
+ )
for batched_args, in_dims, kwargs in generator:
vmapped_op = vmap(op, in_dims)
- fn, primals = normalize_op_input_output2(vmapped_op, batched_args, kwargs,
- sample.output_process_fn_grad)
+ fn, primals = normalize_op_input_output2(
+ vmapped_op, batched_args, kwargs, sample.output_process_fn_grad
+ )
result = fn(*primals)
cotangents = tree_map(lambda x: torch.randn_like(x), result)
@@ -1368,7 +1687,9 @@ class TestOperators(TestCase):
self.assertEqual(result_vjps, expected_vjps)
- def _compare_jacobians_of_vjp(self, fn, cotangents_and_primals, argnums=None, atol_rtol=None):
+ def _compare_jacobians_of_vjp(
+ self, fn, cotangents_and_primals, argnums=None, atol_rtol=None
+ ):
if argnums is None:
argnums = tuple(range(len(cotangents_and_primals)))
@@ -1390,50 +1711,85 @@ class TestOperators(TestCase):
self.assertEqual(jacobian_jvp, jacobian_vjp)
@ops(op_db + additional_op_db + autograd_function_db, allowed_dtypes=(torch.float,))
- @skipOps('TestOperators', 'test_jvpvjp', vjp_fail.union({
- xfail('to_sparse', ''), # NYI
- # RuntimeError: Trying to set a forward gradient that has a different size than that of the original Tensor,
- # this is not supported. Tensor is of size [5, 2, 3] while the given forward gradient is of size [1, 2, 3].
- xfail('normal', ''),
- xfail('cdist', ''), # NYI: forward-AD for _cdist_forward
- xfail('cholesky', ''), # NYI: forward-AD for cholesky
- xfail('nn.functional.embedding_bag', ''), # NYI: forward-AD for _embedding_bag
- xfail('nn.functional.grid_sample', ''), # NYI: forward AD for grid_sampler_2d
- xfail('grid_sampler_2d', ''), # NYI: forward AD for grid_sampler_2d
- xfail('nn.functional.hardsigmoid', ''), # NYI: forward AD for hardsigmoid_backward
- xfail('nn.functional.huber_loss', ''), # NYI: forward AD for huber_loss_backward
- xfail('NumpyCubeNotComposableAutogradFunction'), # not composable
- xfail('ormqr', ''), # NYI: forward AD for ormqr
- xfail('nn.functional.multilabel_margin_loss', ''), # NYI: multilabel_margin_loss_forward
- xfail('nn.functional.soft_margin_loss', ''), # NYI: forward-AD for soft_margin_loss_backward
- xfail('nn.functional.ctc_loss', ''), # NYI: forward-AD for _ctc_loss
- xfail('nn.functional.pdist', ''), # NYI: forward-AD with _pdist_forward
- skip('nn.functional.scaled_dot_product_attention'),
- xfail('torch.ops.aten._efficient_attention_forward'), # outputs ints
- xfail('nn.functional.multi_margin_loss', ''), # NYI: forward AD with multi_margin_loss
- skip('linalg.householder_product', '', device_type='cuda'), # flaky, I'm not sure why
- xfail('sparse.sampled_addmm', ''), # Sparse tensors have no strides
- xfail('_segment_reduce', 'offsets'), # NYI: forward-AD for _segment_reduce
- xfail('sparse.mm', 'reduce'), # Sparse tensors have no strides
- xfail('index_reduce', ''), # NYI: forward-AD for index_reduce
- xfail('_segment_reduce', 'lengths'), # NYI: forward-AD for _segment_reduce
- xfail('native_dropout_backward'), # NYI
-
- }))
- @opsToleranceOverride('TestOperators', 'test_jvpvjp', (
- tol1('masked.prod',
- {torch.float32: tol(atol=1e-04, rtol=1.3e-05)}),
- tol1('masked.cumprod',
- {torch.float32: tol(atol=1e-04, rtol=5e-04)}),
- tol1('cumprod',
- {torch.float32: tol(atol=1e-04, rtol=1.3e-05)}, device_type='cuda'),
- tol1('linalg.vander',
- {torch.float32: tol(atol=1e-04, rtol=1.3e-05)}, device_type='cuda'),
- tol1('nn.functional.group_norm',
- {torch.float32: tol(atol=1e-03, rtol=1e-03)}),
- tol2('linalg.pinv', 'hermitian',
- {torch.float32: tol(atol=5e-03, rtol=5e-03)}),
- ))
+ @skipOps(
+ "TestOperators",
+ "test_jvpvjp",
+ vjp_fail.union(
+ {
+ xfail("to_sparse", ""), # NYI
+ # RuntimeError: Trying to set a forward gradient that has a different size than that of the original Tensor,
+ # this is not supported. Tensor is of size [5, 2, 3] while the given forward gradient is of size [1, 2, 3].
+ xfail("normal", ""),
+ xfail("cdist", ""), # NYI: forward-AD for _cdist_forward
+ xfail("cholesky", ""), # NYI: forward-AD for cholesky
+ xfail(
+ "nn.functional.embedding_bag", ""
+ ), # NYI: forward-AD for _embedding_bag
+ xfail(
+ "nn.functional.grid_sample", ""
+ ), # NYI: forward AD for grid_sampler_2d
+ xfail("grid_sampler_2d", ""), # NYI: forward AD for grid_sampler_2d
+ xfail(
+ "nn.functional.hardsigmoid", ""
+ ), # NYI: forward AD for hardsigmoid_backward
+ xfail(
+ "nn.functional.huber_loss", ""
+ ), # NYI: forward AD for huber_loss_backward
+ xfail("NumpyCubeNotComposableAutogradFunction"), # not composable
+ xfail("ormqr", ""), # NYI: forward AD for ormqr
+ xfail(
+ "nn.functional.multilabel_margin_loss", ""
+ ), # NYI: multilabel_margin_loss_forward
+ xfail(
+ "nn.functional.soft_margin_loss", ""
+ ), # NYI: forward-AD for soft_margin_loss_backward
+ xfail("nn.functional.ctc_loss", ""), # NYI: forward-AD for _ctc_loss
+ xfail("nn.functional.pdist", ""), # NYI: forward-AD with _pdist_forward
+ skip("nn.functional.scaled_dot_product_attention"),
+ xfail("torch.ops.aten._efficient_attention_forward"), # outputs ints
+ xfail(
+ "nn.functional.multi_margin_loss", ""
+ ), # NYI: forward AD with multi_margin_loss
+ skip(
+ "linalg.householder_product", "", device_type="cuda"
+ ), # flaky, I'm not sure why
+ xfail("sparse.sampled_addmm", ""), # Sparse tensors have no strides
+ xfail(
+ "_segment_reduce", "offsets"
+ ), # NYI: forward-AD for _segment_reduce
+ xfail("sparse.mm", "reduce"), # Sparse tensors have no strides
+ xfail("index_reduce", ""), # NYI: forward-AD for index_reduce
+ xfail(
+ "_segment_reduce", "lengths"
+ ), # NYI: forward-AD for _segment_reduce
+ xfail("native_dropout_backward"), # NYI
+ }
+ ),
+ )
+ @opsToleranceOverride(
+ "TestOperators",
+ "test_jvpvjp",
+ (
+ tol1("masked.prod", {torch.float32: tol(atol=1e-04, rtol=1.3e-05)}),
+ tol1("masked.cumprod", {torch.float32: tol(atol=1e-04, rtol=5e-04)}),
+ tol1(
+ "cumprod",
+ {torch.float32: tol(atol=1e-04, rtol=1.3e-05)},
+ device_type="cuda",
+ ),
+ tol1(
+ "linalg.vander",
+ {torch.float32: tol(atol=1e-04, rtol=1.3e-05)},
+ device_type="cuda",
+ ),
+ tol1(
+ "nn.functional.group_norm", {torch.float32: tol(atol=1e-03, rtol=1e-03)}
+ ),
+ tol2(
+ "linalg.pinv", "hermitian", {torch.float32: tol(atol=5e-03, rtol=5e-03)}
+ ),
+ ),
+ )
def test_jvpvjp(self, device, dtype, op):
if not op.supports_autograd:
self.skipTest("Skipped! Autograd not supported.")
@@ -1458,7 +1814,9 @@ class TestOperators(TestCase):
_, vjp_fn = vjp(fn, *primals)
return vjp_fn(cotangents)
- result = jvp(push_vjp, (primals, cotangents), (primals_tangents, cotangents_tangents))
+ result = jvp(
+ push_vjp, (primals, cotangents), (primals_tangents, cotangents_tangents)
+ )
self.assertEqual(len(result), 2)
def tree_map2(fn, first, second):
@@ -1473,131 +1831,203 @@ class TestOperators(TestCase):
primal_duals = tree_map2(fwAD.make_dual, primals, primals_tangents)
_, vjp_fn = ref_vjp(fn, *primal_duals)
- cotangent_duals = tree_map2(fwAD.make_dual, cotangents, cotangents_tangents)
+ cotangent_duals = tree_map2(
+ fwAD.make_dual, cotangents, cotangents_tangents
+ )
result = vjp_fn(cotangent_duals)
flat_result, spec = tree_flatten(result)
- primals_out, tangents_out = zip(*[fwAD.unpack_dual(r) for r in flat_result])
- tangents_out = [t if t is not None else torch.zeros_like(p)
- for p, t in zip(primals_out, tangents_out)]
- expected = (tree_unflatten(primals_out, spec), tree_unflatten(tangents_out, spec))
+ primals_out, tangents_out = zip(
+ *[fwAD.unpack_dual(r) for r in flat_result]
+ )
+ tangents_out = [
+ t if t is not None else torch.zeros_like(p)
+ for p, t in zip(primals_out, tangents_out)
+ ]
+ expected = (
+ tree_unflatten(primals_out, spec),
+ tree_unflatten(tangents_out, spec),
+ )
return expected
- expected = reference(primals, cotangents, primals_tangents, cotangents_tangents)
+ expected = reference(
+ primals, cotangents, primals_tangents, cotangents_tangents
+ )
self.assertEqual(result, expected)
@with_tf32_off # https://github.com/pytorch/pytorch/issues/86798
- @skipOps('TestOperators', 'test_vmapjvpvjp', vjp_fail.union({
- # Following operators take too long, hence skipped
- skip('atleast_1d'),
- skip('atleast_2d'),
- skip('atleast_3d'),
- skip('meshgrid', 'list_of_tensors'),
- skip('meshgrid', 'variadic_tensors'),
- skip('broadcast_tensors'),
- skip('linalg.lstsq'),
- skip('nn.functional.bilinear'),
- skip('native_layer_norm'),
- skip('ormqr'),
-
- # Not actually a problem
- xfail('NumpyCubeNotComposableAutogradFunction'), # not composable
- xfail('NumpyExpMarkDirtyAutogradFunction'), # vmap: inplace into a regular tensor
-
- # Potential bugs/errors
- xfail('as_strided'), # AssertionError: Tensor-likes are not close!
- xfail('as_strided', 'partial_views'), # AssertionError: Tensor-likes are not close!
- xfail('as_strided_scatter'), # AssertionError: Tensor-likes are not close!
- xfail('bernoulli'), # calls random op
- xfail('bfloat16'), # required rank 4 tensor to use channels_last format
- xfail('cdist'), # Forward AD not implemented and no decomposition
- xfail('cdouble'), # required rank 4 tensor to use channels_last format
- xfail('cfloat'), # required rank 4 tensor to use channels_last format
- xfail('chalf'), # required rank 4 tensor to use channels_last format
- xfail('cholesky'), # Forward AD not implemented and no decomposition
- xfail('ormqr'), # Forward AD not implemented and no decomposition
- xfail('double'), # required rank 4 tensor to use channels_last format
- xfail('float'), # required rank 4 tensor to use channels_last format
- xfail('half'), # required rank 4 tensor to use channels_last format
- xfail('index_reduce'), # Forward AD not implemented and no decomposition
- xfail('mvlgamma', 'mvlgamma_p_1'), # vmap: inplace into a regular tensor
- xfail('mvlgamma', 'mvlgamma_p_3'), # vmap: inplace into a regular tensor
- xfail('mvlgamma', 'mvlgamma_p_5'), # vmap: inplace into a regular tensor
- xfail('nanquantile'), # Batching rule not implemented for aten::equal
- # RuntimeError: Batch norm got a batched tensor as input while the
- # running_mean or running_var, which will be updated in place,
- # were not batched.
- xfail('nn.functional.batch_norm'),
- xfail('nn.functional.batch_norm', 'without_cudnn'),
- xfail("nn.functional.ctc_loss"), # ForwardAD not implemented and no decomposition
- xfail('nn.functional.dropout2d'), # calls random op
- xfail('nn.functional.dropout3d'), # calls random op
- xfail('nn.functional.dropout'), # calls random op
- xfail('nn.functional.scaled_dot_product_attention'), # randomness
- xfail('torch.ops.aten._efficient_attention_forward'), # outputs ints
- xfail('nn.functional.multi_head_attention_forward'), # randomness
- xfail('nn.functional.embedding_bag'), # Forward AD not implemented and no decomposition
- xfail('nn.functional.alpha_dropout'), # calls randomn op
- xfail('nn.functional.feature_alpha_dropout', 'with_train'), # calls random op
- xfail('nn.functional.fractional_max_pool2d'), # calls random op
- xfail('nn.functional.fractional_max_pool3d'), # calls random op
- xfail('nn.functional.gaussian_nll_loss'), # data depenedant flow
- xfail('nn.functional.grid_sample'), # Forward AD not implemented and no decomposition
- xfail('grid_sampler_2d'), # Forward AD not implemented and no decomposition
- xfail('nn.functional.hardsigmoid'), # Forward AD not implemented and no decomposition
- xfail('nn.functional.hinge_embedding_loss'), # vmap: inplace into a regular tensor
- xfail('nn.functional.huber_loss'), # Forward AD not implemented and no decomposition
- # RuntimeError: Batch norm got a batched tensor as input while the
- # running_mean or running_var, which will be updated in place,
- # were not batched.
- xfail('nn.functional.instance_norm'),
- # NYI: Tensor.clone(memory_format) inside vmap is only supported with
- # memory_format torch.preserve_format or torch.contiguous_format (got ChannelsLast)
- xfail('nn.functional.max_unpool2d'),
- xfail('nn.functional.max_unpool2d', 'grad'),
- xfail('nn.functional.multi_margin_loss'), # Forward AD not implemented and no decomposition
- xfail('nn.functional.multilabel_margin_loss'), # Forward AD not implemented and no decomposition
- xfail('nn.functional.pdist'), # Forward AD not implemented and no decomposition
- xfail('nn.functional.rrelu'), # vmap: we do not yet support aten::rrelu_with_noise.
- xfail('nn.functional.soft_margin_loss'), # Forward AD not implemented and no decomposition
- xfail('normal'), # calls random op
- xfail('normal', 'number_mean'), # calls random op
- xfail('pca_lowrank'), # calls random op
- xfail('quantile'), # Batching rule not implemented for aten::equal
- xfail('scatter_reduce', 'prod'), # Forward AD not implemented and no decomposition
- xfail('_segment_reduce', 'lengths'), # Forward AD not implemented and no decomposition
- xfail('_segment_reduce', 'offsets'), # Forward AD not implemented and no decomposition
- xfail('sparse.sampled_addmm'), # RuntimeError: Sparse CSR tensors do not have strides
- xfail('sparse.mm', 'reduce'), # RuntimeError: Sparse CSR tensors do not have strides
- xfail('svd_lowrank'), # calls random op
- xfail('to'), # RuntimeError: required rank 4 tensor to use channels_last format
- xfail('to_sparse'), # Forward AD not implemented and no decomposition
- xfail('view_as_complex'), # RuntimeError: Tensor must have a last dimension with stride 1
- # RuntimeError: Batch norm got a batched tensor as
- # input while the running_mean or running_var, which will be updated in
- # place, were not batched.
- xfail("native_batch_norm"),
- xfail("_native_batch_norm_legit"),
- # TODO: implement batching rule
- xfail("_batch_norm_with_update"),
- xfail('native_dropout_backward'),
- }))
+ @skipOps(
+ "TestOperators",
+ "test_vmapjvpvjp",
+ vjp_fail.union(
+ {
+ # Following operators take too long, hence skipped
+ skip("atleast_1d"),
+ skip("atleast_2d"),
+ skip("atleast_3d"),
+ skip("meshgrid", "list_of_tensors"),
+ skip("meshgrid", "variadic_tensors"),
+ skip("broadcast_tensors"),
+ skip("linalg.lstsq"),
+ skip("nn.functional.bilinear"),
+ skip("native_layer_norm"),
+ skip("ormqr"),
+ # Not actually a problem
+ xfail("NumpyCubeNotComposableAutogradFunction"), # not composable
+ xfail(
+ "NumpyExpMarkDirtyAutogradFunction"
+ ), # vmap: inplace into a regular tensor
+ # Potential bugs/errors
+ xfail("as_strided"), # AssertionError: Tensor-likes are not close!
+ xfail(
+ "as_strided", "partial_views"
+ ), # AssertionError: Tensor-likes are not close!
+ xfail(
+ "as_strided_scatter"
+ ), # AssertionError: Tensor-likes are not close!
+ xfail("bernoulli"), # calls random op
+ xfail("bfloat16"), # required rank 4 tensor to use channels_last format
+ xfail("cdist"), # Forward AD not implemented and no decomposition
+ xfail("cdouble"), # required rank 4 tensor to use channels_last format
+ xfail("cfloat"), # required rank 4 tensor to use channels_last format
+ xfail("chalf"), # required rank 4 tensor to use channels_last format
+ xfail("cholesky"), # Forward AD not implemented and no decomposition
+ xfail("ormqr"), # Forward AD not implemented and no decomposition
+ xfail("double"), # required rank 4 tensor to use channels_last format
+ xfail("float"), # required rank 4 tensor to use channels_last format
+ xfail("half"), # required rank 4 tensor to use channels_last format
+ xfail(
+ "index_reduce"
+ ), # Forward AD not implemented and no decomposition
+ xfail(
+ "mvlgamma", "mvlgamma_p_1"
+ ), # vmap: inplace into a regular tensor
+ xfail(
+ "mvlgamma", "mvlgamma_p_3"
+ ), # vmap: inplace into a regular tensor
+ xfail(
+ "mvlgamma", "mvlgamma_p_5"
+ ), # vmap: inplace into a regular tensor
+ xfail("nanquantile"), # Batching rule not implemented for aten::equal
+ # RuntimeError: Batch norm got a batched tensor as input while the
+ # running_mean or running_var, which will be updated in place,
+ # were not batched.
+ xfail("nn.functional.batch_norm"),
+ xfail("nn.functional.batch_norm", "without_cudnn"),
+ xfail(
+ "nn.functional.ctc_loss"
+ ), # ForwardAD not implemented and no decomposition
+ xfail("nn.functional.dropout2d"), # calls random op
+ xfail("nn.functional.dropout3d"), # calls random op
+ xfail("nn.functional.dropout"), # calls random op
+ xfail("nn.functional.scaled_dot_product_attention"), # randomness
+ xfail("torch.ops.aten._efficient_attention_forward"), # outputs ints
+ xfail("nn.functional.multi_head_attention_forward"), # randomness
+ xfail(
+ "nn.functional.embedding_bag"
+ ), # Forward AD not implemented and no decomposition
+ xfail("nn.functional.alpha_dropout"), # calls randomn op
+ xfail(
+ "nn.functional.feature_alpha_dropout", "with_train"
+ ), # calls random op
+ xfail("nn.functional.fractional_max_pool2d"), # calls random op
+ xfail("nn.functional.fractional_max_pool3d"), # calls random op
+ xfail("nn.functional.gaussian_nll_loss"), # data depenedant flow
+ xfail(
+ "nn.functional.grid_sample"
+ ), # Forward AD not implemented and no decomposition
+ xfail(
+ "grid_sampler_2d"
+ ), # Forward AD not implemented and no decomposition
+ xfail(
+ "nn.functional.hardsigmoid"
+ ), # Forward AD not implemented and no decomposition
+ xfail(
+ "nn.functional.hinge_embedding_loss"
+ ), # vmap: inplace into a regular tensor
+ xfail(
+ "nn.functional.huber_loss"
+ ), # Forward AD not implemented and no decomposition
+ # RuntimeError: Batch norm got a batched tensor as input while the
+ # running_mean or running_var, which will be updated in place,
+ # were not batched.
+ xfail("nn.functional.instance_norm"),
+ # NYI: Tensor.clone(memory_format) inside vmap is only supported with
+ # memory_format torch.preserve_format or torch.contiguous_format (got ChannelsLast)
+ xfail("nn.functional.max_unpool2d"),
+ xfail("nn.functional.max_unpool2d", "grad"),
+ xfail(
+ "nn.functional.multi_margin_loss"
+ ), # Forward AD not implemented and no decomposition
+ xfail(
+ "nn.functional.multilabel_margin_loss"
+ ), # Forward AD not implemented and no decomposition
+ xfail(
+ "nn.functional.pdist"
+ ), # Forward AD not implemented and no decomposition
+ xfail(
+ "nn.functional.rrelu"
+ ), # vmap: we do not yet support aten::rrelu_with_noise.
+ xfail(
+ "nn.functional.soft_margin_loss"
+ ), # Forward AD not implemented and no decomposition
+ xfail("normal"), # calls random op
+ xfail("normal", "number_mean"), # calls random op
+ xfail("pca_lowrank"), # calls random op
+ xfail("quantile"), # Batching rule not implemented for aten::equal
+ xfail(
+ "scatter_reduce", "prod"
+ ), # Forward AD not implemented and no decomposition
+ xfail(
+ "_segment_reduce", "lengths"
+ ), # Forward AD not implemented and no decomposition
+ xfail(
+ "_segment_reduce", "offsets"
+ ), # Forward AD not implemented and no decomposition
+ xfail(
+ "sparse.sampled_addmm"
+ ), # RuntimeError: Sparse CSR tensors do not have strides
+ xfail(
+ "sparse.mm", "reduce"
+ ), # RuntimeError: Sparse CSR tensors do not have strides
+ xfail("svd_lowrank"), # calls random op
+ xfail(
+ "to"
+ ), # RuntimeError: required rank 4 tensor to use channels_last format
+ xfail("to_sparse"), # Forward AD not implemented and no decomposition
+ xfail(
+ "view_as_complex"
+ ), # RuntimeError: Tensor must have a last dimension with stride 1
+ # RuntimeError: Batch norm got a batched tensor as
+ # input while the running_mean or running_var, which will be updated in
+ # place, were not batched.
+ xfail("native_batch_norm"),
+ xfail("_native_batch_norm_legit"),
+ # TODO: implement batching rule
+ xfail("_batch_norm_with_update"),
+ xfail("native_dropout_backward"),
+ }
+ ),
+ )
@ops(op_db + additional_op_db + autograd_function_db, allowed_dtypes=(torch.float,))
@toleranceOverride({torch.float32: tol(atol=1e-04, rtol=1e-04)})
- @opsToleranceOverride('TestOperators', 'test_vmapjvpvjp', (
- tol1('linalg.svd',
- {torch.float32: tol(atol=5e-04, rtol=5e-04)}),
- tol1('linalg.householder_product',
- {torch.float32: tol(atol=5e-03, rtol=5e-03)}),
- tol1('linalg.multi_dot',
- {torch.float32: tol(atol=5e-04, rtol=5e-04)}),
- tol2('linalg.pinv', 'hermitian',
- {torch.float32: tol(atol=5e-04, rtol=5e-04)}),
- tol1('svd',
- {torch.float32: tol(atol=5e-04, rtol=5e-04)}),
- tol1('matrix_exp',
- {torch.float32: tol(atol=5e-04, rtol=5e-04)}),
- ))
+ @opsToleranceOverride(
+ "TestOperators",
+ "test_vmapjvpvjp",
+ (
+ tol1("linalg.svd", {torch.float32: tol(atol=5e-04, rtol=5e-04)}),
+ tol1(
+ "linalg.householder_product",
+ {torch.float32: tol(atol=5e-03, rtol=5e-03)},
+ ),
+ tol1("linalg.multi_dot", {torch.float32: tol(atol=5e-04, rtol=5e-04)}),
+ tol2(
+ "linalg.pinv", "hermitian", {torch.float32: tol(atol=5e-04, rtol=5e-04)}
+ ),
+ tol1("svd", {torch.float32: tol(atol=5e-04, rtol=5e-04)}),
+ tol1("matrix_exp", {torch.float32: tol(atol=5e-04, rtol=5e-04)}),
+ ),
+ )
def test_vmapjvpvjp(self, device, dtype, op):
# Since we test `jvpvjp` separately,
# in this we just check that vmap of `jvpvjp`
@@ -1625,7 +2055,9 @@ class TestOperators(TestCase):
_, vjp_fn = vjp(fn, *primals)
return vjp_fn(cotangents)
- args, spec = tree_flatten(((primals, cotangents), (primals_tangents, cotangents_tangents)))
+ args, spec = tree_flatten(
+ ((primals, cotangents), (primals_tangents, cotangents_tangents))
+ )
def jvp_of_vjp(*args):
(primals, tangents) = tree_unflatten(args, spec)
@@ -1637,7 +2069,11 @@ class TestOperators(TestCase):
is_batch_norm_and_training = is_batch_norm_training(op, sample.kwargs)
generator = get_fallback_and_vmap_exhaustive(
- jvp_of_vjp, args, {}, is_batch_norm_and_training=is_batch_norm_and_training)
+ jvp_of_vjp,
+ args,
+ {},
+ is_batch_norm_and_training=is_batch_norm_and_training,
+ )
for loop_out, batched_out in generator:
self.assertEqual(loop_out, batched_out)
@@ -1645,9 +2081,9 @@ class TestOperators(TestCase):
if shape is None:
return (None,)
return (
- torch.full(shape, -1000., device=device),
+ torch.full(shape, -1000.0, device=device),
torch.zeros(shape, device=device),
- torch.full(shape, 1000., device=device),
+ torch.full(shape, 1000.0, device=device),
)
def _arg_and_kwarg_options(self, args_options, kwargs_options):
@@ -1662,18 +2098,29 @@ class TestOperators(TestCase):
((N, C, d1, d2, d3), (N, d1, d2, d3), (C,)),
((N, C, d1, d2, d3), (N, d1, d2, d3), None),
)
- kwargs_options = ({'ignore_index': 0, 'reduction': 'mean'}, {'reduction': 'sum'}, {'reduction': 'none'}, {})
+ kwargs_options = (
+ {"ignore_index": 0, "reduction": "mean"},
+ {"reduction": "sum"},
+ {"reduction": "none"},
+ {},
+ )
for input_shape, target_shape, weight_shape in shapes:
input_options = self._make_extremal_inputs(input_shape, device)
- for input, kwargs in self._arg_and_kwarg_options((input_options,), kwargs_options):
+ for input, kwargs in self._arg_and_kwarg_options(
+ (input_options,), kwargs_options
+ ):
if weight_shape is None:
weight = None
else:
weight = torch.randn(weight_shape, device=device)
target = torch.randint(0, C, target_shape, device=device)
- target[0] = 1 # since we're ignoring index 0, at least one element must be non-zero
+ target[
+ 0
+ ] = 1 # since we're ignoring index 0, at least one element must be non-zero
- fn = functools.partial(torch.nn.functional.nll_loss, target=target, weight=weight, **kwargs)
+ fn = functools.partial(
+ torch.nn.functional.nll_loss, target=target, weight=weight, **kwargs
+ )
result = fn(input)
cotangents = torch.randn_like(result, device=device)
self._compare_jacobians_of_vjp(fn, (cotangents, input))
@@ -1681,48 +2128,64 @@ class TestOperators(TestCase):
def test_extremal_numerics_l1_loss(self, device):
N, C, H, W = 3, 4, 5, 6
shapes = ((N, C), (N, C, H), (N, C, H, W))
- kwargs_options = ({'reduction': 'sum'}, {'reduction': 'none'}, {})
+ kwargs_options = ({"reduction": "sum"}, {"reduction": "none"}, {})
for shape in shapes:
input_options = self._make_extremal_inputs(shape, device)
target_options = self._make_extremal_inputs(shape, device)
- for input, target, kwargs in self._arg_and_kwarg_options((input_options, target_options), kwargs_options):
+ for input, target, kwargs in self._arg_and_kwarg_options(
+ (input_options, target_options), kwargs_options
+ ):
result = torch.nn.functional.l1_loss(input, target)
cotangents = torch.randn_like(result, device=device)
- self._compare_jacobians_of_vjp(torch.nn.functional.l1_loss, (cotangents, input, target))
+ self._compare_jacobians_of_vjp(
+ torch.nn.functional.l1_loss, (cotangents, input, target)
+ )
def test_extremal_numerics_mse_loss(self, device):
N, C, H, W = 3, 4, 5, 6
shapes = ((N, C), (N, C, H), (N, C, H, W))
- kwargs_options = ({'reduction': 'sum'}, {'reduction': 'none'}, {})
+ kwargs_options = ({"reduction": "sum"}, {"reduction": "none"}, {})
for shape in shapes:
input_options = self._make_extremal_inputs(shape, device)
target_options = self._make_extremal_inputs(shape, device)
- for input, target, kwargs in self._arg_and_kwarg_options((input_options, target_options), kwargs_options):
+ for input, target, kwargs in self._arg_and_kwarg_options(
+ (input_options, target_options), kwargs_options
+ ):
result = torch.nn.functional.mse_loss(input, target)
cotangents = torch.randn_like(result, device=device)
- self._compare_jacobians_of_vjp(torch.nn.functional.mse_loss, (cotangents, input, target))
+ self._compare_jacobians_of_vjp(
+ torch.nn.functional.mse_loss, (cotangents, input, target)
+ )
def test_extremal_numerics_softmax(self, device):
N, C, H, W = 3, 4, 5, 6
shapes = ((N, C), (N, C, H), (N, C, H, W))
- kwargs_options = ({'dim': 1}, {})
+ kwargs_options = ({"dim": 1}, {})
for shape in shapes:
input_options = self._make_extremal_inputs(shape, device)
- for input, kwargs in self._arg_and_kwarg_options((input_options,), kwargs_options):
+ for input, kwargs in self._arg_and_kwarg_options(
+ (input_options,), kwargs_options
+ ):
result = torch.nn.functional.softmax(input)
cotangents = torch.randn_like(result, device=device)
- self._compare_jacobians_of_vjp(torch.nn.functional.softmax, (cotangents, input))
+ self._compare_jacobians_of_vjp(
+ torch.nn.functional.softmax, (cotangents, input)
+ )
def test_extremal_numerics_log_softmax(self, device):
N, C, H, W = 3, 4, 5, 6
shapes = ((N, C), (N, C, H), (N, C, H, W))
- kwargs_options = ({'dim': 1}, {})
+ kwargs_options = ({"dim": 1}, {})
for shape in shapes:
input_options = self._make_extremal_inputs(shape, device)
- for input, kwargs in self._arg_and_kwarg_options((input_options,), kwargs_options):
+ for input, kwargs in self._arg_and_kwarg_options(
+ (input_options,), kwargs_options
+ ):
result = torch.nn.functional.log_softmax(input)
cotangents = torch.randn_like(result, device=device)
- self._compare_jacobians_of_vjp(torch.nn.functional.log_softmax, (cotangents, input))
+ self._compare_jacobians_of_vjp(
+ torch.nn.functional.log_softmax, (cotangents, input)
+ )
def test_extremal_numerics_cross_entropy(self, device):
N, C = 3, 4
@@ -1743,11 +2206,13 @@ class TestOperators(TestCase):
)
for input_shape, target_shape, weight_shape in shapes:
input_options = self._make_extremal_inputs(input_shape, device)
- kwargs_options = [{'reduction': 'sum'}, {'reduction': 'none'}, {}]
+ kwargs_options = [{"reduction": "sum"}, {"reduction": "none"}, {}]
if input_shape != target_shape:
- kwargs_options.append({'ignore_index': 0, 'reduction': 'mean'})
+ kwargs_options.append({"ignore_index": 0, "reduction": "mean"})
- for input, kwargs in self._arg_and_kwarg_options((input_options,), kwargs_options):
+ for input, kwargs in self._arg_and_kwarg_options(
+ (input_options,), kwargs_options
+ ):
if weight_shape is None:
weight = None
else:
@@ -1756,29 +2221,47 @@ class TestOperators(TestCase):
if input_shape == target_shape:
target = torch.rand(target_shape, device=device)
elif len(target_shape) == 0:
- target = torch.tensor(1, device=device) # must be non-zero since ignore_index may be 0
+ target = torch.tensor(
+ 1, device=device
+ ) # must be non-zero since ignore_index may be 0
else:
target = torch.randint(0, C, target_shape, device=device)
- fn = functools.partial(torch.nn.functional.cross_entropy, target=target, weight=weight, **kwargs)
+ fn = functools.partial(
+ torch.nn.functional.cross_entropy,
+ target=target,
+ weight=weight,
+ **kwargs,
+ )
result = fn(input)
cotangents = torch.randn_like(result, device=device)
- self._compare_jacobians_of_vjp(fn, (cotangents, input), atol_rtol=(1e-4, 1e-5))
+ self._compare_jacobians_of_vjp(
+ fn, (cotangents, input), atol_rtol=(1e-4, 1e-5)
+ )
def test_extremal_numerics_binary_cross_entropy(self, device):
N, C, H, W = 3, 4, 5, 6
shapes = ((N, C), (N, C, H), (N, C, H, W))
for shape in shapes:
weight_options = self._make_extremal_inputs(shape, device)
- kwargs_options = [{'reduction': 'sum'}, {'reduction': 'none'}, {}]
+ kwargs_options = [{"reduction": "sum"}, {"reduction": "none"}, {}]
- for weight, kwargs in self._arg_and_kwarg_options((weight_options,), kwargs_options):
+ for weight, kwargs in self._arg_and_kwarg_options(
+ (weight_options,), kwargs_options
+ ):
input = torch.rand(shape, device=device)
target = torch.rand(shape, device=device)
- fn = functools.partial(torch.nn.functional.binary_cross_entropy, target=target, weight=weight, **kwargs)
+ fn = functools.partial(
+ torch.nn.functional.binary_cross_entropy,
+ target=target,
+ weight=weight,
+ **kwargs,
+ )
result = fn(input)
cotangents = torch.randn_like(result, device=device)
- self._compare_jacobians_of_vjp(fn, (cotangents, input), atol_rtol=(1e-4, 2e-5))
+ self._compare_jacobians_of_vjp(
+ fn, (cotangents, input), atol_rtol=(1e-4, 2e-5)
+ )
def test_extremal_numerics_layer_norm(self, device):
N, C, H, W = 3, 4, 5, 6
@@ -1789,65 +2272,100 @@ class TestOperators(TestCase):
weight_options = self._make_extremal_inputs(normalized_shape, device)
bias_options = self._make_extremal_inputs(normalized_shape, device)
- for input, bias, weight in self._arg_and_kwarg_options((input_options, bias_options, weight_options), ()):
+ for input, bias, weight in self._arg_and_kwarg_options(
+ (input_options, bias_options, weight_options), ()
+ ):
+
def fn(input, weight, bias):
- return torch.nn.functional.layer_norm(input, normalized_shape, weight=weight, bias=bias)
+ return torch.nn.functional.layer_norm(
+ input, normalized_shape, weight=weight, bias=bias
+ )
+
result = fn(input, weight, bias)
cotangents = torch.randn_like(result, device=device)
self._compare_jacobians_of_vjp(fn, (cotangents, input, weight, bias))
@with_tf32_off # https://github.com/pytorch/pytorch/issues/86798
- @ops(op_db + additional_op_db + autograd_function_db, allowed_dtypes=(torch.float32, torch.double))
- @skipOps('TestOperators', 'test_vmap_autograd_grad', {
- # The size of tensor a (4) must match the size of tensor b (10) at non-singleton dimension 0
- xfail('masked_select'),
- xfail('nn.functional.max_unpool2d', 'grad'), # contiguous call
- xfail('nn.functional.max_unpool2d'), # contiguous call
- xfail('to_sparse'), # dispatch key issue
- xfail('torch.ops.aten._efficient_attention_forward'), # outputs ints
-
- # https://github.com/pytorch/pytorch/issues/96560
- decorate('xlogy', decorator=skipIfRocm),
-
- # numerical inconsistencies, look like bugs
- skip('matrix_exp', dtypes=(torch.float32,), device_type='cuda'), # fails on linux, passes on windows
- skip('ldexp', dtypes=(torch.float32,), device_type='cpu'), # fails on all but mac
- skip('__rmatmul__'), # flaky needs investigation
- skip('matmul'), # flaky needs investigation
- skip('nn.functional.conv_transpose3d'), # flaky needs investigation
- skip('nn.functional.conv_transpose2d'), # flaky needs investigation
- skip('nn.functional.conv_transpose1d'), # flaky needs investigation
- skip('nn.functional.layer_norm', dtypes=(torch.float32,), device_type='cpu'), # fails on windows
- skip('linalg.lu_factor', dtypes=(torch.float32,), device_type='cuda'), # fails on all but windows
- skip('linalg.lu_factor_ex', dtypes=(torch.float32,), device_type='cuda'), # fails on all but windows
- skip('linalg.multi_dot', '', device_type='cpu'),
- skip('sparse.sampled_addmm', ''),
- skip('sparse.mm', 'reduce'),
- skip('native_layer_norm', '', device_type='cpu'),
-
- # RuntimeError: Expected contiguous tensor, but got
- # non-contiguous tensor for argument #2 'grad_output'
- decorate(
- '_batch_norm_with_update',
- decorator=expectedFailureIf(TEST_WITH_ROCM),
- device_type='cuda',
- )
- })
- @opsToleranceOverride('TestOperators', 'test_vmap_autograd_grad', (
- tol1('linalg.householder_product',
- {torch.float32: tol(atol=5e-04, rtol=9e-03)}, device_type='cuda'),
- tol1('linalg.householder_product',
- {torch.float32: tol(atol=1e-04, rtol=1e-04)}, device_type='cpu'),
- tol1('linalg.multi_dot',
- {torch.float32: tol(atol=2e-04, rtol=1e-04)}, device_type='cuda'),
- tol2('linalg.pinv', 'hermitian',
- {torch.float32: tol(atol=5e-06, rtol=5e-06)}),
- tol1('nn.functional.conv3d',
- {torch.float32: tol(atol=5e-04, rtol=9e-03)}),
- ))
+ @ops(
+ op_db + additional_op_db + autograd_function_db,
+ allowed_dtypes=(torch.float32, torch.double),
+ )
+ @skipOps(
+ "TestOperators",
+ "test_vmap_autograd_grad",
+ {
+ # The size of tensor a (4) must match the size of tensor b (10) at non-singleton dimension 0
+ xfail("masked_select"),
+ xfail("nn.functional.max_unpool2d", "grad"), # contiguous call
+ xfail("nn.functional.max_unpool2d"), # contiguous call
+ xfail("to_sparse"), # dispatch key issue
+ xfail("torch.ops.aten._efficient_attention_forward"), # outputs ints
+ # https://github.com/pytorch/pytorch/issues/96560
+ decorate("xlogy", decorator=skipIfRocm),
+ # numerical inconsistencies, look like bugs
+ skip(
+ "matrix_exp", dtypes=(torch.float32,), device_type="cuda"
+ ), # fails on linux, passes on windows
+ skip(
+ "ldexp", dtypes=(torch.float32,), device_type="cpu"
+ ), # fails on all but mac
+ skip("__rmatmul__"), # flaky needs investigation
+ skip("matmul"), # flaky needs investigation
+ skip("nn.functional.conv_transpose3d"), # flaky needs investigation
+ skip("nn.functional.conv_transpose2d"), # flaky needs investigation
+ skip("nn.functional.conv_transpose1d"), # flaky needs investigation
+ skip(
+ "nn.functional.layer_norm", dtypes=(torch.float32,), device_type="cpu"
+ ), # fails on windows
+ skip(
+ "linalg.lu_factor", dtypes=(torch.float32,), device_type="cuda"
+ ), # fails on all but windows
+ skip(
+ "linalg.lu_factor_ex", dtypes=(torch.float32,), device_type="cuda"
+ ), # fails on all but windows
+ skip("linalg.multi_dot", "", device_type="cpu"),
+ skip("sparse.sampled_addmm", ""),
+ skip("sparse.mm", "reduce"),
+ skip("native_layer_norm", "", device_type="cpu"),
+ # RuntimeError: Expected contiguous tensor, but got
+ # non-contiguous tensor for argument #2 'grad_output'
+ decorate(
+ "_batch_norm_with_update",
+ decorator=expectedFailureIf(TEST_WITH_ROCM),
+ device_type="cuda",
+ ),
+ },
+ )
+ @opsToleranceOverride(
+ "TestOperators",
+ "test_vmap_autograd_grad",
+ (
+ tol1(
+ "linalg.householder_product",
+ {torch.float32: tol(atol=5e-04, rtol=9e-03)},
+ device_type="cuda",
+ ),
+ tol1(
+ "linalg.householder_product",
+ {torch.float32: tol(atol=1e-04, rtol=1e-04)},
+ device_type="cpu",
+ ),
+ tol1(
+ "linalg.multi_dot",
+ {torch.float32: tol(atol=2e-04, rtol=1e-04)},
+ device_type="cuda",
+ ),
+ tol2(
+ "linalg.pinv", "hermitian", {torch.float32: tol(atol=5e-06, rtol=5e-06)}
+ ),
+ tol1("nn.functional.conv3d", {torch.float32: tol(atol=5e-04, rtol=9e-03)}),
+ ),
+ )
def test_vmap_autograd_grad(self, device, dtype, op):
def is_differentiable(inp):
- return isinstance(inp, Tensor) and (inp.grad_fn is not None or inp.requires_grad)
+ return isinstance(inp, Tensor) and (
+ inp.grad_fn is not None or inp.requires_grad
+ )
def get_flat_differentiable(tree):
flattened = pytree.tree_leaves(tree)
@@ -1855,7 +2373,11 @@ class TestOperators(TestCase):
def get_differentiable_linked(list1, list2):
paired_list = zip(list1, list2)
- paired_list = tuple((first, second) for (first, second) in paired_list if is_differentiable(first))
+ paired_list = tuple(
+ (first, second)
+ for (first, second) in paired_list
+ if is_differentiable(first)
+ )
return zip(*paired_list)
def filter_none(out):
@@ -1879,15 +2401,27 @@ class TestOperators(TestCase):
if not isinstance(out_flattened, torch.Tensor):
out_flattened = pytree.tree_leaves(out)
cotangents_flattened = pytree.tree_leaves(cotangents)
- out_flattened, cotangents_flattened = get_differentiable_linked(out_flattened, cotangents_flattened)
+ out_flattened, cotangents_flattened = get_differentiable_linked(
+ out_flattened, cotangents_flattened
+ )
return filter_none(
- torch.autograd.grad(out_flattened, get_flat_differentiable(primals), cotangents_flattened,
- retain_graph=True, allow_unused=True))
+ torch.autograd.grad(
+ out_flattened,
+ get_flat_differentiable(primals),
+ cotangents_flattened,
+ retain_graph=True,
+ allow_unused=True,
+ )
+ )
is_batch_norm_and_training = is_batch_norm_training(op, sample_input.kwargs)
generator = get_fallback_and_vmap_exhaustive(
- compute_grad, (cotangents,), {}, is_batch_norm_and_training=is_batch_norm_and_training)
+ compute_grad,
+ (cotangents,),
+ {},
+ is_batch_norm_and_training=is_batch_norm_and_training,
+ )
for loop_out, batched_out in generator:
self.assertEqual(loop_out, batched_out)
@@ -1905,42 +2439,78 @@ class TestOperators(TestCase):
# non-contiguous because vmap will expand. This will happen during both levels of vmap
A = torch.randn(4, 4)
k = torch.randn(4, 5, B1, B0)
- fn, args = get_jvp_variant_primals_tangents(torch.linalg.solve, SampleInput(A, args=(k,)))
+ fn, args = get_jvp_variant_primals_tangents(
+ torch.linalg.solve, SampleInput(A, args=(k,))
+ )
in_dims_all = (None, -1, None, -1)
batched_out = vmap(vmap(fn, in_dims=in_dims_all), in_dims=in_dims_all)(*args)
loop_out = loop2(fn, in_dims_all, in_dims_all, 0, 0, B0, B1, *args)
self.assertEqual(loop_out, batched_out)
- @ops(filter(lambda op: op.name in aliasing_ops, op_db + additional_op_db), allowed_dtypes=(torch.float,))
+ @ops(
+ filter(lambda op: op.name in aliasing_ops, op_db + additional_op_db),
+ allowed_dtypes=(torch.float,),
+ )
@parametrize("grad_op", ["jvp", "vjp"])
def test_view_then_inplace(self, device, dtype, op, grad_op):
for sample_input in op.sample_inputs(device, dtype):
+
def f(x):
- op(sample_input.input, *sample_input.args, **sample_input.kwargs).copy_(x)
+ op(sample_input.input, *sample_input.args, **sample_input.kwargs).copy_(
+ x
+ )
return x
- without_grad = op(sample_input.input, *sample_input.args, **sample_input.kwargs)
+ without_grad = op(
+ sample_input.input, *sample_input.args, **sample_input.kwargs
+ )
if grad_op == "jvp":
- with self.assertRaisesRegex(RuntimeError, "During a grad .* attempted to call in-place operation"):
- jvp(f, (torch.randn_like(without_grad),), (torch.randn_like(without_grad),))
+ with self.assertRaisesRegex(
+ RuntimeError,
+ "During a grad .* attempted to call in-place operation",
+ ):
+ jvp(
+ f,
+ (torch.randn_like(without_grad),),
+ (torch.randn_like(without_grad),),
+ )
else:
assert grad_op == "vjp"
- with self.assertRaisesRegex(RuntimeError, "During a grad .* attempted to call in-place operation"):
+ with self.assertRaisesRegex(
+ RuntimeError,
+ "During a grad .* attempted to call in-place operation",
+ ):
vjp(f, torch.randn_like(without_grad))
- @ops(filter(lambda op: op.name in aliasing_ops_list_return, op_db + additional_op_db), allowed_dtypes=(torch.float,))
+ @ops(
+ filter(
+ lambda op: op.name in aliasing_ops_list_return, op_db + additional_op_db
+ ),
+ allowed_dtypes=(torch.float,),
+ )
@parametrize("grad_op", ["jvp", "vjp"])
def test_view_then_inplace_list_return(self, device, dtype, op, grad_op):
for sample_input in op.sample_inputs(device, dtype):
+
def f(x):
- op(sample_input.input, *sample_input.args, **sample_input.kwargs)[0].copy_(x)
+ op(sample_input.input, *sample_input.args, **sample_input.kwargs)[
+ 0
+ ].copy_(x)
return x
- without_grad = op(sample_input.input, *sample_input.args, **sample_input.kwargs)[0]
- with self.assertRaisesRegex(RuntimeError, "During a grad .* attempted to call in-place operation"):
+ without_grad = op(
+ sample_input.input, *sample_input.args, **sample_input.kwargs
+ )[0]
+ with self.assertRaisesRegex(
+ RuntimeError, "During a grad .* attempted to call in-place operation"
+ ):
if grad_op == "jvp":
- jvp(f, (torch.randn_like(without_grad),), (torch.randn_like(without_grad),))
+ jvp(
+ f,
+ (torch.randn_like(without_grad),),
+ (torch.randn_like(without_grad),),
+ )
else:
assert grad_op == "vjp"
vjp(f, torch.randn_like(without_grad))
@@ -1957,6 +2527,7 @@ class TestOperators(TestCase):
]
for op in ops:
+
def f(x):
op(captured).copy_(x)
return x
@@ -1964,11 +2535,21 @@ class TestOperators(TestCase):
captured = torch.randn(4, 3, 3)
without_grad = op(captured)
if grad_op == "jvp":
- with self.assertRaisesRegex(RuntimeError, "During a grad .* attempted to call in-place operation"):
- jvp(f, (torch.randn_like(without_grad),), (torch.randn_like(without_grad),))
+ with self.assertRaisesRegex(
+ RuntimeError,
+ "During a grad .* attempted to call in-place operation",
+ ):
+ jvp(
+ f,
+ (torch.randn_like(without_grad),),
+ (torch.randn_like(without_grad),),
+ )
else:
assert grad_op == "vjp"
- with self.assertRaisesRegex(RuntimeError, "During a grad .* attempted to call in-place operation"):
+ with self.assertRaisesRegex(
+ RuntimeError,
+ "During a grad .* attempted to call in-place operation",
+ ):
vjp(f, torch.randn_like(without_grad))
@with_tf32_off # https://github.com/pytorch/pytorch/issues/86798
@@ -1981,9 +2562,13 @@ class TestOperators(TestCase):
# - autograd.Function. The mechanism is via PyDispatcher/HigherOrderOperator, not the
# regular PyTorch dispatcher, so it's good to exercise more caution.
@ops(autograd_function_db, allowed_dtypes=(torch.float32,))
- @skipOps('TestOperators', 'test_vmapvjpvmap', {
- xfail('NumpyCubeNotComposableAutogradFunction'), # Not composable
- })
+ @skipOps(
+ "TestOperators",
+ "test_vmapvjpvmap",
+ {
+ xfail("NumpyCubeNotComposableAutogradFunction"), # Not composable
+ },
+ )
def test_vmapvjpvmap(self, device, dtype, op):
samples = op.sample_inputs(device, dtype, requires_grad=True)
B = 2
@@ -1996,9 +2581,14 @@ class TestOperators(TestCase):
inner_mapped_op = functools.partial(loop, op, in_dims, 0, B)
inner_vmapped_fn, primals = normalize_op_input_output2(
- inner_vmapped_op, batched_args, kwargs, sample.output_process_fn_grad)
+ inner_vmapped_op,
+ batched_args,
+ kwargs,
+ sample.output_process_fn_grad,
+ )
inner_mapped_fn, _ = normalize_op_input_output2(
- inner_mapped_op, batched_args, kwargs, sample.output_process_fn_grad)
+ inner_mapped_op, batched_args, kwargs, sample.output_process_fn_grad
+ )
result = inner_mapped_fn(*primals)
cotangents = tree_map(lambda x: torch.rand_like(x), result)
@@ -2006,6 +2596,7 @@ class TestOperators(TestCase):
def inner(primals, cotangents):
_, vjp_fn = vjp(fn, *primals)
return vjp_fn(cotangents)
+
return inner
vjpvmap_fn = apply_vjp(inner_vmapped_fn)
@@ -2024,9 +2615,13 @@ class TestOperators(TestCase):
# See NOTE: [three-transform testing]
@ops(autograd_function_db, allowed_dtypes=(torch.float32,))
- @skipOps('TestOperators', 'test_vjpvmapvmap', {
- xfail('NumpyCubeNotComposableAutogradFunction'), # Not composable
- })
+ @skipOps(
+ "TestOperators",
+ "test_vjpvmapvmap",
+ {
+ xfail("NumpyCubeNotComposableAutogradFunction"), # Not composable
+ },
+ )
def test_vjpvmapvmap(self, device, dtype, op):
samples = op.sample_inputs(device, dtype, requires_grad=True)
B = 2
@@ -2044,9 +2639,11 @@ class TestOperators(TestCase):
mapped_op = functools.partial(loop, inner_mapped_op, in_dims, 0, B)
vmapped_fn, primals = normalize_op_input_output2(
- vmapped_op, batched_args, kwargs, sample.output_process_fn_grad)
+ vmapped_op, batched_args, kwargs, sample.output_process_fn_grad
+ )
mapped_fn, _ = normalize_op_input_output2(
- mapped_op, batched_args, kwargs, sample.output_process_fn_grad)
+ mapped_op, batched_args, kwargs, sample.output_process_fn_grad
+ )
result = mapped_fn(*primals)
cotangents = tree_map(lambda x: torch.rand_like(x), result)
@@ -2061,9 +2658,13 @@ class TestOperators(TestCase):
# See NOTE: [three-transform testing]
@ops(autograd_function_db, allowed_dtypes=(torch.float32,))
- @skipOps('TestOperators', 'test_vjpvjpvmap', {
- xfail('NumpyCubeNotComposableAutogradFunction'), # Not composable
- })
+ @skipOps(
+ "TestOperators",
+ "test_vjpvjpvmap",
+ {
+ xfail("NumpyCubeNotComposableAutogradFunction"), # Not composable
+ },
+ )
def test_vjpvjpvmap(self, device, dtype, op):
samples = op.sample_inputs(device, dtype, requires_grad=True)
B = 2
@@ -2075,8 +2676,12 @@ class TestOperators(TestCase):
inner_vmapped_op = vmap(op, in_dims)
inner_mapped_op = functools.partial(loop, op, in_dims, 0, B)
- vjpmap_fn, args = get_vjpfull_variant2(inner_mapped_op, batched_args, kwargs)
- vjpvmap_fn, _ = get_vjpfull_variant2(inner_vmapped_op, batched_args, kwargs)
+ vjpmap_fn, args = get_vjpfull_variant2(
+ inner_mapped_op, batched_args, kwargs
+ )
+ vjpvmap_fn, _ = get_vjpfull_variant2(
+ inner_vmapped_op, batched_args, kwargs
+ )
vjpvjpvmap_fn, new_args = get_vjpfull_variant2(vjpvmap_fn, args, {})
vjpvjpmap_fn, _ = get_vjpfull_variant2(vjpmap_fn, args, {})
@@ -2090,9 +2695,13 @@ class TestOperators(TestCase):
# we only test it on the things we're not sure about:
# - the autograd.Function <> functorch interaction
@ops(autograd_function_db, allowed_dtypes=(torch.float32,))
- @skipOps('TestOperators', 'test_jvpvmap', {
- xfail('NumpyCubeNotComposableAutogradFunction'), # Not composable
- })
+ @skipOps(
+ "TestOperators",
+ "test_jvpvmap",
+ {
+ xfail("NumpyCubeNotComposableAutogradFunction"), # Not composable
+ },
+ )
def test_jvpvmap(self, device, dtype, op):
samples = op.sample_inputs(device, dtype, requires_grad=True)
B = 2
@@ -2105,11 +2714,14 @@ class TestOperators(TestCase):
inner_mapped_op = functools.partial(loop, op, in_dims, 0, B)
jvpvmap_op, primals = get_jvp_variant_primals_tangents2(
- inner_vmapped_op, batched_args, kwargs,
- sample.output_process_fn_grad)
+ inner_vmapped_op,
+ batched_args,
+ kwargs,
+ sample.output_process_fn_grad,
+ )
jvpmap_op, _ = get_jvp_variant_primals_tangents2(
- inner_mapped_op, batched_args, kwargs,
- sample.output_process_fn_grad)
+ inner_mapped_op, batched_args, kwargs, sample.output_process_fn_grad
+ )
expected = jvpmap_op(*primals)
result = jvpvmap_op(*primals)
@@ -2117,9 +2729,13 @@ class TestOperators(TestCase):
# See NOTE: [three-transform testing]
@ops(autograd_function_db, allowed_dtypes=(torch.float32,))
- @skipOps('TestOperators', 'test_jvpvmapvmap', {
- xfail('NumpyCubeNotComposableAutogradFunction'), # Not composable
- })
+ @skipOps(
+ "TestOperators",
+ "test_jvpvmapvmap",
+ {
+ xfail("NumpyCubeNotComposableAutogradFunction"), # Not composable
+ },
+ )
def test_jvpvmapvmap(self, device, dtype, op):
samples = op.sample_inputs(device, dtype, requires_grad=True)
B = 2
@@ -2137,11 +2753,11 @@ class TestOperators(TestCase):
mapped_op = functools.partial(loop, inner_mapped_op, in_dims, 0, B)
jvpvmapvmap_fn, primals = get_jvp_variant_primals_tangents2(
- vmapped_op, batched_args, kwargs,
- sample.output_process_fn_grad)
+ vmapped_op, batched_args, kwargs, sample.output_process_fn_grad
+ )
jvpmapmap_fn, _ = get_jvp_variant_primals_tangents2(
- mapped_op, batched_args, kwargs,
- sample.output_process_fn_grad)
+ mapped_op, batched_args, kwargs, sample.output_process_fn_grad
+ )
expected = jvpmapmap_fn(*primals)
result = jvpvmapvmap_fn(*primals)
@@ -2150,9 +2766,13 @@ class TestOperators(TestCase):
# See NOTE: [three-transform testing]
@with_tf32_off # https://github.com/pytorch/pytorch/issues/86798
@ops(autograd_function_db, allowed_dtypes=(torch.float32,))
- @skipOps('TestOperators', 'test_vmapjvpvmap', {
- xfail('NumpyCubeNotComposableAutogradFunction'), # Not composable
- })
+ @skipOps(
+ "TestOperators",
+ "test_vmapjvpvmap",
+ {
+ xfail("NumpyCubeNotComposableAutogradFunction"), # Not composable
+ },
+ )
def test_vmapjvpvmap(self, device, dtype, op):
samples = op.sample_inputs(device, dtype, requires_grad=True)
B = 2
@@ -2165,11 +2785,14 @@ class TestOperators(TestCase):
inner_mapped_op = functools.partial(loop, op, in_dims, 0, B)
jvpvmap_fn, primals = get_jvp_variant_primals_tangents2(
- inner_vmapped_op, batched_args, kwargs,
- sample.output_process_fn_grad)
+ inner_vmapped_op,
+ batched_args,
+ kwargs,
+ sample.output_process_fn_grad,
+ )
jvpmap_fn, _ = get_jvp_variant_primals_tangents2(
- inner_mapped_op, batched_args, kwargs,
- sample.output_process_fn_grad)
+ inner_mapped_op, batched_args, kwargs, sample.output_process_fn_grad
+ )
generator = generate_vmap_inputs(primals, {})
@@ -2184,9 +2807,13 @@ class TestOperators(TestCase):
# See NOTE: [three-transform testing]
@ops(autograd_function_db, allowed_dtypes=(torch.float32,))
- @skipOps('TestOperators', 'test_jvpjvpvmap', {
- xfail('NumpyCubeNotComposableAutogradFunction'), # Not composable
- })
+ @skipOps(
+ "TestOperators",
+ "test_jvpjvpvmap",
+ {
+ xfail("NumpyCubeNotComposableAutogradFunction"), # Not composable
+ },
+ )
def test_jvpjvpvmap(self, device, dtype, op):
samples = op.sample_inputs(device, dtype, requires_grad=True)
B = 2
@@ -2199,11 +2826,18 @@ class TestOperators(TestCase):
inner_mapped_op = functools.partial(loop, op, in_dims, 0, B)
jvpmap_fn, args = get_jvp_variant_primals_tangents2(
- inner_mapped_op, batched_args, kwargs, sample.output_process_fn_grad)
+ inner_mapped_op, batched_args, kwargs, sample.output_process_fn_grad
+ )
jvpvmap_fn, _ = get_jvp_variant_primals_tangents2(
- inner_vmapped_op, batched_args, kwargs, sample.output_process_fn_grad)
-
- jvpjvpvmap_fn, new_args = get_jvp_variant_primals_tangents2(jvpvmap_fn, args, {})
+ inner_vmapped_op,
+ batched_args,
+ kwargs,
+ sample.output_process_fn_grad,
+ )
+
+ jvpjvpvmap_fn, new_args = get_jvp_variant_primals_tangents2(
+ jvpvmap_fn, args, {}
+ )
jvpjvpmap_fn, _ = get_jvp_variant_primals_tangents2(jvpmap_fn, args, {})
expected = jvpjvpmap_fn(*new_args)
@@ -2212,9 +2846,13 @@ class TestOperators(TestCase):
# See NOTE: [three-transform testing]
@ops(autograd_function_db, allowed_dtypes=(torch.float32,))
- @skipOps('TestOperators', 'test_jvpvjpvmap', {
- xfail('NumpyCubeNotComposableAutogradFunction'), # Not composable
- })
+ @skipOps(
+ "TestOperators",
+ "test_jvpvjpvmap",
+ {
+ xfail("NumpyCubeNotComposableAutogradFunction"), # Not composable
+ },
+ )
def test_jvpvjpvmap(self, device, dtype, op):
samples = op.sample_inputs(device, dtype, requires_grad=True)
B = 2
@@ -2226,10 +2864,16 @@ class TestOperators(TestCase):
inner_vmapped_op = vmap(op, in_dims)
inner_mapped_op = functools.partial(loop, op, in_dims, 0, B)
- vjpmap_fn, args = get_vjpfull_variant2(inner_mapped_op, batched_args, kwargs)
- vjpvmap_fn, _ = get_vjpfull_variant2(inner_vmapped_op, batched_args, kwargs)
+ vjpmap_fn, args = get_vjpfull_variant2(
+ inner_mapped_op, batched_args, kwargs
+ )
+ vjpvmap_fn, _ = get_vjpfull_variant2(
+ inner_vmapped_op, batched_args, kwargs
+ )
- jvpvjpvmap_fn, new_args = get_jvp_variant_primals_tangents2(vjpvmap_fn, args, {})
+ jvpvjpvmap_fn, new_args = get_jvp_variant_primals_tangents2(
+ vjpvmap_fn, args, {}
+ )
jvpvjpmap_fn, _ = get_jvp_variant_primals_tangents2(vjpmap_fn, args, {})
expected = jvpvjpmap_fn(*new_args)
@@ -2266,11 +2910,14 @@ class TestOperators(TestCase):
expected_o, expected_fn = vjp(func, x)
self.assertEqual(actual_o, expected_o)
- self.assertEqual(expected_fn(torch.ones_like(expected_o)), actual_fn(torch.ones_like(actual_o)))
+ self.assertEqual(
+ expected_fn(torch.ones_like(expected_o)),
+ actual_fn(torch.ones_like(actual_o)),
+ )
only_for = ("cpu", "cuda")
instantiate_device_type_tests(TestOperators, globals(), only_for=only_for)
-if __name__ == '__main__':
+if __name__ == "__main__":
run_tests()
diff --git a/test/functorch/test_parsing.py b/test/functorch/test_parsing.py
index 56043ab64c..ec0f16c724 100644
--- a/test/functorch/test_parsing.py
+++ b/test/functorch/test_parsing.py
@@ -28,9 +28,13 @@ from typing import Any, Callable, Dict
from unittest import mock
from functorch.einops._parsing import (
- AnonymousAxis, ParsedExpression, parse_pattern, validate_rearrange_expressions, _ellipsis
+ _ellipsis,
+ AnonymousAxis,
+ parse_pattern,
+ ParsedExpression,
+ validate_rearrange_expressions,
)
-from torch.testing._internal.common_utils import TestCase, run_tests
+from torch.testing._internal.common_utils import run_tests, TestCase
mock_anonymous_axis_eq: Callable[[AnonymousAxis, object], bool] = (
lambda self, other: isinstance(other, AnonymousAxis) and self.value == other.value
@@ -39,11 +43,11 @@ mock_anonymous_axis_eq: Callable[[AnonymousAxis, object], bool] = (
class TestAnonymousAxis(TestCase):
def test_anonymous_axes(self) -> None:
- a, b = AnonymousAxis('2'), AnonymousAxis('2')
+ a, b = AnonymousAxis("2"), AnonymousAxis("2")
self.assertNotEqual(a, b)
- with mock.patch.object(AnonymousAxis, '__eq__', mock_anonymous_axis_eq):
- c, d = AnonymousAxis('2'), AnonymousAxis('3')
+ with mock.patch.object(AnonymousAxis, "__eq__", mock_anonymous_axis_eq):
+ c, d = AnonymousAxis("2"), AnonymousAxis("3")
self.assertEqual(a, c)
self.assertEqual(b, c)
self.assertNotEqual(a, d)
@@ -53,92 +57,146 @@ class TestAnonymousAxis(TestCase):
class TestParsedExpression(TestCase):
def test_elementary_axis_name(self) -> None:
- for name in ['a', 'b', 'h', 'dx', 'h1', 'zz', 'i9123', 'somelongname',
- 'Alex', 'camelCase', 'u_n_d_e_r_score', 'unreasonablyLongAxisName']:
+ for name in [
+ "a",
+ "b",
+ "h",
+ "dx",
+ "h1",
+ "zz",
+ "i9123",
+ "somelongname",
+ "Alex",
+ "camelCase",
+ "u_n_d_e_r_score",
+ "unreasonablyLongAxisName",
+ ]:
self.assertTrue(ParsedExpression.check_axis_name(name))
- for name in ['', '2b', '12', '_startWithUnderscore', 'endWithUnderscore_', '_', '...', _ellipsis]:
+ for name in [
+ "",
+ "2b",
+ "12",
+ "_startWithUnderscore",
+ "endWithUnderscore_",
+ "_",
+ "...",
+ _ellipsis,
+ ]:
self.assertFalse(ParsedExpression.check_axis_name(name))
def test_invalid_expressions(self) -> None:
# double ellipsis should raise an error
- ParsedExpression('... a b c d')
+ ParsedExpression("... a b c d")
with self.assertRaises(ValueError):
- ParsedExpression('... a b c d ...')
+ ParsedExpression("... a b c d ...")
with self.assertRaises(ValueError):
- ParsedExpression('... a b c (d ...)')
+ ParsedExpression("... a b c (d ...)")
with self.assertRaises(ValueError):
- ParsedExpression('(... a) b c (d ...)')
+ ParsedExpression("(... a) b c (d ...)")
# double/missing/enclosed parenthesis
- ParsedExpression('(a) b c (d ...)')
+ ParsedExpression("(a) b c (d ...)")
with self.assertRaises(ValueError):
- ParsedExpression('(a)) b c (d ...)')
+ ParsedExpression("(a)) b c (d ...)")
with self.assertRaises(ValueError):
- ParsedExpression('(a b c (d ...)')
+ ParsedExpression("(a b c (d ...)")
with self.assertRaises(ValueError):
- ParsedExpression('(a) (()) b c (d ...)')
+ ParsedExpression("(a) (()) b c (d ...)")
with self.assertRaises(ValueError):
- ParsedExpression('(a) ((b c) (d ...))')
+ ParsedExpression("(a) ((b c) (d ...))")
# invalid identifiers
- ParsedExpression('camelCase under_scored cApiTaLs ß ...')
+ ParsedExpression("camelCase under_scored cApiTaLs ß ...")
with self.assertRaises(ValueError):
- ParsedExpression('1a')
+ ParsedExpression("1a")
with self.assertRaises(ValueError):
- ParsedExpression('_pre')
+ ParsedExpression("_pre")
with self.assertRaises(ValueError):
- ParsedExpression('...pre')
+ ParsedExpression("...pre")
with self.assertRaises(ValueError):
- ParsedExpression('pre...')
+ ParsedExpression("pre...")
- @mock.patch.object(AnonymousAxis, '__eq__', mock_anonymous_axis_eq)
+ @mock.patch.object(AnonymousAxis, "__eq__", mock_anonymous_axis_eq)
def test_parse_expression(self, *mocks: mock.MagicMock) -> None:
- parsed = ParsedExpression('a1 b1 c1 d1')
- self.assertSetEqual(parsed.identifiers, {'a1', 'b1', 'c1', 'd1'})
- self.assertListEqual(parsed.composition, [['a1'], ['b1'], ['c1'], ['d1']])
+ parsed = ParsedExpression("a1 b1 c1 d1")
+ self.assertSetEqual(parsed.identifiers, {"a1", "b1", "c1", "d1"})
+ self.assertListEqual(parsed.composition, [["a1"], ["b1"], ["c1"], ["d1"]])
self.assertFalse(parsed.has_non_unitary_anonymous_axes)
self.assertFalse(parsed.has_ellipsis)
- parsed = ParsedExpression('() () () ()')
+ parsed = ParsedExpression("() () () ()")
self.assertSetEqual(parsed.identifiers, set())
self.assertListEqual(parsed.composition, [[], [], [], []])
self.assertFalse(parsed.has_non_unitary_anonymous_axes)
self.assertFalse(parsed.has_ellipsis)
- parsed = ParsedExpression('1 1 1 ()')
+ parsed = ParsedExpression("1 1 1 ()")
self.assertSetEqual(parsed.identifiers, set())
self.assertListEqual(parsed.composition, [[], [], [], []])
self.assertFalse(parsed.has_non_unitary_anonymous_axes)
self.assertFalse(parsed.has_ellipsis)
- parsed = ParsedExpression('5 (3 4)')
+ parsed = ParsedExpression("5 (3 4)")
self.assertEqual(len(parsed.identifiers), 3)
- self.assertSetEqual({i.value if isinstance(i, AnonymousAxis) else i for i in parsed.identifiers}, {3, 4, 5})
- self.assertListEqual(parsed.composition, [[AnonymousAxis('5')], [AnonymousAxis('3'), AnonymousAxis('4')]])
+ self.assertSetEqual(
+ {
+ i.value if isinstance(i, AnonymousAxis) else i
+ for i in parsed.identifiers
+ },
+ {3, 4, 5},
+ )
+ self.assertListEqual(
+ parsed.composition,
+ [[AnonymousAxis("5")], [AnonymousAxis("3"), AnonymousAxis("4")]],
+ )
self.assertTrue(parsed.has_non_unitary_anonymous_axes)
self.assertFalse(parsed.has_ellipsis)
- parsed = ParsedExpression('5 1 (1 4) 1')
+ parsed = ParsedExpression("5 1 (1 4) 1")
self.assertEqual(len(parsed.identifiers), 2)
- self.assertSetEqual({i.value if isinstance(i, AnonymousAxis) else i for i in parsed.identifiers}, {4, 5})
- self.assertListEqual(parsed.composition, [[AnonymousAxis('5')], [], [AnonymousAxis('4')], []])
+ self.assertSetEqual(
+ {
+ i.value if isinstance(i, AnonymousAxis) else i
+ for i in parsed.identifiers
+ },
+ {4, 5},
+ )
+ self.assertListEqual(
+ parsed.composition, [[AnonymousAxis("5")], [], [AnonymousAxis("4")], []]
+ )
- parsed = ParsedExpression('name1 ... a1 12 (name2 14)')
+ parsed = ParsedExpression("name1 ... a1 12 (name2 14)")
self.assertEqual(len(parsed.identifiers), 6)
- self.assertEqual(len(parsed.identifiers - {'name1', _ellipsis, 'a1', 'name2'}), 2)
+ self.assertEqual(
+ len(parsed.identifiers - {"name1", _ellipsis, "a1", "name2"}), 2
+ )
self.assertListEqual(
- parsed.composition, [['name1'], _ellipsis, ['a1'], [AnonymousAxis('12')], ['name2', AnonymousAxis('14')]]
+ parsed.composition,
+ [
+ ["name1"],
+ _ellipsis,
+ ["a1"],
+ [AnonymousAxis("12")],
+ ["name2", AnonymousAxis("14")],
+ ],
)
self.assertTrue(parsed.has_non_unitary_anonymous_axes)
self.assertTrue(parsed.has_ellipsis)
self.assertFalse(parsed.has_ellipsis_parenthesized)
- parsed = ParsedExpression('(name1 ... a1 12) name2 14')
+ parsed = ParsedExpression("(name1 ... a1 12) name2 14")
self.assertEqual(len(parsed.identifiers), 6)
- self.assertEqual(len(parsed.identifiers - {'name1', _ellipsis, 'a1', 'name2'}), 2)
+ self.assertEqual(
+ len(parsed.identifiers - {"name1", _ellipsis, "a1", "name2"}), 2
+ )
self.assertListEqual(
- parsed.composition, [['name1', _ellipsis, 'a1', AnonymousAxis('12')], ['name2'], [AnonymousAxis('14')]]
+ parsed.composition,
+ [
+ ["name1", _ellipsis, "a1", AnonymousAxis("12")],
+ ["name2"],
+ [AnonymousAxis("14")],
+ ],
)
self.assertTrue(parsed.has_non_unitary_anonymous_axes)
self.assertTrue(parsed.has_ellipsis)
@@ -240,5 +298,5 @@ class TestValidateRearrangeExpressions(TestCase):
validate_rearrange_expressions(left, right, axes_lengths)
-if __name__ == '__main__':
+if __name__ == "__main__":
run_tests()
diff --git a/test/functorch/test_rearrange.py b/test/functorch/test_rearrange.py
index c7e773a91b..446a2fbb7e 100644
--- a/test/functorch/test_rearrange.py
+++ b/test/functorch/test_rearrange.py
@@ -30,39 +30,39 @@ from typing import List, Tuple
import numpy as np
import torch
from functorch.einops import rearrange
-from torch.testing._internal.common_utils import TestCase, run_tests
+from torch.testing._internal.common_utils import run_tests, TestCase
identity_patterns: List[str] = [
- '...->...',
- 'a b c d e-> a b c d e',
- 'a b c d e ...-> ... a b c d e',
- 'a b c d e ...-> a ... b c d e',
- '... a b c d e -> ... a b c d e',
- 'a ... e-> a ... e',
- 'a ... -> a ... ',
- 'a ... c d e -> a (...) c d e',
+ "...->...",
+ "a b c d e-> a b c d e",
+ "a b c d e ...-> ... a b c d e",
+ "a b c d e ...-> a ... b c d e",
+ "... a b c d e -> ... a b c d e",
+ "a ... e-> a ... e",
+ "a ... -> a ... ",
+ "a ... c d e -> a (...) c d e",
]
equivalent_rearrange_patterns: List[Tuple[str, str]] = [
- ('a b c d e -> (a b) c d e', 'a b ... -> (a b) ... '),
- ('a b c d e -> a b (c d) e', '... c d e -> ... (c d) e'),
- ('a b c d e -> a b c d e', '... -> ... '),
- ('a b c d e -> (a b c d e)', '... -> (...)'),
- ('a b c d e -> b (c d e) a', 'a b ... -> b (...) a'),
- ('a b c d e -> b (a c d) e', 'a b ... e -> b (a ...) e'),
+ ("a b c d e -> (a b) c d e", "a b ... -> (a b) ... "),
+ ("a b c d e -> a b (c d) e", "... c d e -> ... (c d) e"),
+ ("a b c d e -> a b c d e", "... -> ... "),
+ ("a b c d e -> (a b c d e)", "... -> (...)"),
+ ("a b c d e -> b (c d e) a", "a b ... -> b (...) a"),
+ ("a b c d e -> b (a c d) e", "a b ... e -> b (a ...) e"),
]
class TestRearrange(TestCase):
def test_collapsed_ellipsis_errors_out(self) -> None:
x = torch.zeros([1, 1, 1, 1, 1])
- rearrange(x, 'a b c d ... -> a b c ... d')
+ rearrange(x, "a b c d ... -> a b c ... d")
with self.assertRaises(ValueError):
- rearrange(x, 'a b c d (...) -> a b c ... d')
+ rearrange(x, "a b c d (...) -> a b c ... d")
- rearrange(x, '... -> (...)')
+ rearrange(x, "... -> (...)")
with self.assertRaises(ValueError):
- rearrange(x, '(...) -> (...)')
+ rearrange(x, "(...) -> (...)")
def test_ellipsis_ops(self) -> None:
x = torch.arange(2 * 3 * 4 * 5 * 6).reshape([2, 3, 4, 5, 6])
@@ -70,64 +70,73 @@ class TestRearrange(TestCase):
torch.testing.assert_close(rearrange(x, pattern), x, msg=pattern)
for pattern1, pattern2 in equivalent_rearrange_patterns:
- torch.testing.assert_close(rearrange(x, pattern1), rearrange(x, pattern2), msg=f"{pattern1} vs {pattern2}")
+ torch.testing.assert_close(
+ rearrange(x, pattern1),
+ rearrange(x, pattern2),
+ msg=f"{pattern1} vs {pattern2}",
+ )
def test_rearrange_consistency(self) -> None:
shape = [1, 2, 3, 5, 7, 11]
x = torch.arange(int(np.prod(shape, dtype=int))).reshape(shape)
for pattern in [
- 'a b c d e f -> a b c d e f',
- 'b a c d e f -> a b d e f c',
- 'a b c d e f -> f e d c b a',
- 'a b c d e f -> (f e) d (c b a)',
- 'a b c d e f -> (f e d c b a)',
+ "a b c d e f -> a b c d e f",
+ "b a c d e f -> a b d e f c",
+ "a b c d e f -> f e d c b a",
+ "a b c d e f -> (f e) d (c b a)",
+ "a b c d e f -> (f e d c b a)",
]:
result = rearrange(x, pattern)
self.assertEqual(len(np.setdiff1d(x, result)), 0)
self.assertIs(result.dtype, x.dtype)
- result = rearrange(x, 'a b c d e f -> a (b) (c d e) f')
+ result = rearrange(x, "a b c d e f -> a (b) (c d e) f")
torch.testing.assert_close(x.flatten(), result.flatten())
- result = rearrange(x, 'a aa aa1 a1a1 aaaa a11 -> a aa aa1 a1a1 aaaa a11')
+ result = rearrange(x, "a aa aa1 a1a1 aaaa a11 -> a aa aa1 a1a1 aaaa a11")
torch.testing.assert_close(x, result)
- result1 = rearrange(x, 'a b c d e f -> f e d c b a')
- result2 = rearrange(x, 'f e d c b a -> a b c d e f')
+ result1 = rearrange(x, "a b c d e f -> f e d c b a")
+ result2 = rearrange(x, "f e d c b a -> a b c d e f")
torch.testing.assert_close(result1, result2)
- result = rearrange(rearrange(x, 'a b c d e f -> (f d) c (e b) a'), '(f d) c (e b) a -> a b c d e f', b=2, d=5)
+ result = rearrange(
+ rearrange(x, "a b c d e f -> (f d) c (e b) a"),
+ "(f d) c (e b) a -> a b c d e f",
+ b=2,
+ d=5,
+ )
torch.testing.assert_close(x, result)
- sizes = dict(zip('abcdef', shape))
- temp = rearrange(x, 'a b c d e f -> (f d) c (e b) a', **sizes)
- result = rearrange(temp, '(f d) c (e b) a -> a b c d e f', **sizes)
+ sizes = dict(zip("abcdef", shape))
+ temp = rearrange(x, "a b c d e f -> (f d) c (e b) a", **sizes)
+ result = rearrange(temp, "(f d) c (e b) a -> a b c d e f", **sizes)
torch.testing.assert_close(x, result)
x2 = torch.arange(2 * 3 * 4).reshape([2, 3, 4])
- result = rearrange(x2, 'a b c -> b c a')
+ result = rearrange(x2, "a b c -> b c a")
self.assertEqual(x2[1, 2, 3], result[2, 3, 1])
self.assertEqual(x2[0, 1, 2], result[1, 2, 0])
def test_rearrange_permutations(self) -> None:
# tests random permutation of axes against two independent numpy ways
for n_axes in range(1, 10):
- input = torch.arange(2 ** n_axes).reshape([2] * n_axes)
+ input = torch.arange(2**n_axes).reshape([2] * n_axes)
permutation = np.random.permutation(n_axes)
- left_expression = ' '.join('i' + str(axis) for axis in range(n_axes))
- right_expression = ' '.join('i' + str(axis) for axis in permutation)
- expression = left_expression + ' -> ' + right_expression
+ left_expression = " ".join("i" + str(axis) for axis in range(n_axes))
+ right_expression = " ".join("i" + str(axis) for axis in permutation)
+ expression = left_expression + " -> " + right_expression
result = rearrange(input, expression)
for pick in np.random.randint(0, 2, [10, n_axes]):
self.assertEqual(input[tuple(pick)], result[tuple(pick[permutation])])
for n_axes in range(1, 10):
- input = torch.arange(2 ** n_axes).reshape([2] * n_axes)
+ input = torch.arange(2**n_axes).reshape([2] * n_axes)
permutation = np.random.permutation(n_axes)
- left_expression = ' '.join('i' + str(axis) for axis in range(n_axes)[::-1])
- right_expression = ' '.join('i' + str(axis) for axis in permutation[::-1])
- expression = left_expression + ' -> ' + right_expression
+ left_expression = " ".join("i" + str(axis) for axis in range(n_axes)[::-1])
+ right_expression = " ".join("i" + str(axis) for axis in permutation[::-1])
+ expression = left_expression + " -> " + right_expression
result = rearrange(input, expression)
self.assertEqual(result.shape, input.shape)
expected_result = torch.zeros_like(input)
@@ -140,29 +149,32 @@ class TestRearrange(TestCase):
for n_arrays in [1, 2, 5]:
shapes: List[List[int]] = [[], [1], [1, 1], [2, 3, 5, 7], [1] * 6]
for shape in shapes:
- arrays1 = [torch.arange(i, i + np.prod(shape, dtype=int)).reshape(shape) for i in range(n_arrays)]
+ arrays1 = [
+ torch.arange(i, i + np.prod(shape, dtype=int)).reshape(shape)
+ for i in range(n_arrays)
+ ]
result0 = torch.stack(arrays1)
- result1 = rearrange(arrays1, '...->...')
+ result1 = rearrange(arrays1, "...->...")
torch.testing.assert_close(result0, result1)
def test_unsqueeze(self) -> None:
x = torch.randn((2, 3, 4, 5))
- actual = rearrange(x, 'b h w c -> b 1 h w 1 c')
+ actual = rearrange(x, "b h w c -> b 1 h w 1 c")
expected = x.unsqueeze(1).unsqueeze(-2)
torch.testing.assert_close(actual, expected)
def test_squeeze(self) -> None:
x = torch.randn((2, 1, 3, 4, 1, 5))
- actual = rearrange(x, 'b 1 h w 1 c -> b h w c')
+ actual = rearrange(x, "b 1 h w 1 c -> b h w c")
expected = x.squeeze()
torch.testing.assert_close(actual, expected)
def test_0_dim_tensor(self) -> None:
x = expected = torch.tensor(1)
- actual = rearrange(x, '->')
+ actual = rearrange(x, "->")
torch.testing.assert_close(actual, expected)
- actual = rearrange(x, '... -> ...')
+ actual = rearrange(x, "... -> ...")
torch.testing.assert_close(actual, expected)
def test_dimension_mismatch_no_ellipsis(self) -> None:
@@ -179,5 +191,5 @@ class TestRearrange(TestCase):
rearrange(x, "a ... -> ... a")
-if __name__ == '__main__':
+if __name__ == "__main__":
run_tests()
diff --git a/test/functorch/test_vmap.py b/test/functorch/test_vmap.py
index 81d3362ac8..5c6b98fd1f 100644
--- a/test/functorch/test_vmap.py
+++ b/test/functorch/test_vmap.py
@@ -6,67 +6,70 @@
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
+import contextlib
+import functools
+import itertools
+import os
+import random
+import types
+import unittest
+import warnings
+from collections import namedtuple
from typing import OrderedDict
from unittest.case import skipIf
-from torch.testing._internal.common_utils import TestCase, run_tests
+
+import functorch
import torch
import torch.nn.functional as F
-from torch import Tensor
-import functools
-import itertools
-import warnings
-import unittest
-import random
-from torch.testing._internal.common_methods_invocations import op_db
-from torch.testing._internal.common_cuda import with_tf32_off
-from torch.testing._internal.common_device_type import instantiate_device_type_tests, \
- OpDTypes
-from torch.testing._internal.common_device_type import ops
-from torch.testing._internal.common_utils import (
- parametrize,
- instantiate_parametrized_tests,
- IS_WINDOWS,
- subtest,
- skipIfRocm,
- TEST_WITH_TORCHDYNAMO,
- xfailIfTorchDynamo,
- markDynamoStrictTest,
- skipIfTorchDynamo,
-)
-from torch.testing._internal.common_device_type import \
- toleranceOverride, tol
-from functorch_additional_op_db import additional_op_db
from common_utils import (
- get_fallback_and_vmap_exhaustive,
- xfail,
- skip,
- skipOps,
check_vmap_fallback,
- tol1,
- opsToleranceOverride,
- is_batch_norm_training,
- generate_vmap_inputs,
compute_quantities_for_vmap_test,
- is_valid_inplace_sample_input,
decorate,
DisableVmapFallback,
+ generate_vmap_inputs,
+ get_fallback_and_vmap_exhaustive,
+ is_batch_norm_training,
+ is_valid_inplace_sample_input,
+ opsToleranceOverride,
+ skip,
+ skipOps,
+ tol1,
+ xfail,
)
-import types
-import os
-from collections import namedtuple
-import contextlib
-
-import functorch
-from functorch import vmap, grad, grad_and_value, jvp, vjp, jacfwd
+from functorch import grad, grad_and_value, jacfwd, jvp, vjp, vmap
from functorch.experimental import chunk_vmap
+from functorch_additional_op_db import additional_op_db
+from torch import Tensor
from torch._C._functorch import reshape_dim_into, reshape_dim_outof
from torch._functorch.make_functional import functional_init_with_buffers
-from torch.testing._internal.autograd_function_db import autograd_function_db
from torch._functorch.vmap import restore_vmap
+from torch.testing._internal.autograd_function_db import autograd_function_db
+from torch.testing._internal.common_cuda import with_tf32_off
+from torch.testing._internal.common_device_type import (
+ instantiate_device_type_tests,
+ OpDTypes,
+ ops,
+ tol,
+ toleranceOverride,
+)
+from torch.testing._internal.common_methods_invocations import op_db
+from torch.testing._internal.common_utils import (
+ instantiate_parametrized_tests,
+ IS_WINDOWS,
+ markDynamoStrictTest,
+ parametrize,
+ run_tests,
+ skipIfRocm,
+ skipIfTorchDynamo,
+ subtest,
+ TEST_WITH_TORCHDYNAMO,
+ TestCase,
+ unMarkDynamoStrictTest,
+ xfailIfTorchDynamo,
+)
from torch.utils import _pytree as pytree
-from torch.testing._internal.common_utils import unMarkDynamoStrictTest
-FALLBACK_REGEX = 'There is a performance drop'
+FALLBACK_REGEX = "There is a performance drop"
class EnableVmapFallbackWarnings:
@@ -93,16 +96,20 @@ class TestVmapAPI(TestCase):
def test_different_map_dim_size_raises(self):
x = torch.randn(2)
y = torch.randn(3)
- expected_msg = 'Expected all tensors to have the same size in the mapped dimension'
+ expected_msg = (
+ "Expected all tensors to have the same size in the mapped dimension"
+ )
with self.assertRaisesRegex(ValueError, expected_msg):
vmap(torch.mul)(x, y)
with self.assertRaisesRegex(ValueError, expected_msg):
vmap(lambda z: z[0] + z[1], in_dims=((0, 0),))((x, y))
with self.assertRaisesRegex(ValueError, expected_msg):
- vmap(lambda z: z['x'] + z['y'], in_dims=({'x': 0, 'y': 0},))({'x': x, 'y': y})
+ vmap(lambda z: z["x"] + z["y"], in_dims=({"x": 0, "y": 0},))(
+ {"x": x, "y": y}
+ )
def test_func_with_no_inputs(self):
- expected_msg = 'got no inputs'
+ expected_msg = "got no inputs"
def foo():
return torch.randn(3)
@@ -120,7 +127,7 @@ class TestVmapAPI(TestCase):
def foo(x):
return torch.randn(3)
- with self.assertRaisesRegex(ValueError, 'at least one Tensor'):
+ with self.assertRaisesRegex(ValueError, "at least one Tensor"):
vmap(foo, (None,))(1)
def test_constant_function(self):
@@ -223,7 +230,7 @@ class TestVmapAPI(TestCase):
# Don't support non-tensor returns. This is a limitation of vmap;
# functions that don't return tensors must be special cased
- with self.assertRaisesRegex(RuntimeError, 'Batching rule not implemented'):
+ with self.assertRaisesRegex(RuntimeError, "Batching rule not implemented"):
vmap(torch.equal)(tensor, tensor)
def test_nonzero_out_dims(self):
@@ -249,7 +256,9 @@ class TestVmapAPI(TestCase):
tensor = torch.randn(2, 3, 5, 7)
other = torch.randn(2, 3, 5, 7)
result = vmap(lambda x, y: (x, y), out_dims=2)(tensor, other)
- self.assertEqual(result, (tensor.permute(1, 2, 0, 3), other.permute(1, 2, 0, 3)))
+ self.assertEqual(
+ result, (tensor.permute(1, 2, 0, 3), other.permute(1, 2, 0, 3))
+ )
# use out_dims with the maximum vmap-able tensor dims (64 dims)
ndims = 64
@@ -262,12 +271,18 @@ class TestVmapAPI(TestCase):
# test something that is not the identity function
def foo(x, y):
return x, x * y, x * y * y
+
x = torch.randn(2, 3, 5)
y = torch.randn(2, 3, 5)
result = vmap(foo, out_dims=1)(x, y)
self.assertEqual(
result,
- (x.permute(1, 0, 2), (x * y).permute(1, 0, 2), (x * y * y).permute(1, 0, 2)))
+ (
+ x.permute(1, 0, 2),
+ (x * y).permute(1, 0, 2),
+ (x * y * y).permute(1, 0, 2),
+ ),
+ )
def test_multiple_out_dims(self):
def foo(x):
@@ -327,36 +342,38 @@ class TestVmapAPI(TestCase):
def test_out_dims_none_tuple(self):
def foo(x):
- return x, 'hello world'
+ return x, "hello world"
tensor = torch.randn(2, 3)
result = vmap(foo, out_dims=(0, None))(tensor)
- self.assertEqual(result[1], 'hello world')
+ self.assertEqual(result[1], "hello world")
self.assertEqual(result[0], tensor)
def foo(x):
x.add_(1)
- return None, 'hello world'
- result = vmap(foo, out_dims=(None, None))(tensor)
- self.assertEqual(result, (None, 'hello world'))
+ return None, "hello world"
+ result = vmap(foo, out_dims=(None, None))(tensor)
+ self.assertEqual(result, (None, "hello world"))
def test_out_dims_none(self):
def foo(x):
return x
tensor = torch.randn(2, 3)
- with self.assertRaisesRegex(ValueError, 'can not return a BatchedTensor when out_dim is None'):
+ with self.assertRaisesRegex(
+ ValueError, "can not return a BatchedTensor when out_dim is None"
+ ):
vmap(foo, out_dims=None)(tensor)
def foo(x):
x.add_(1)
- return 'hello world'
+ return "hello world"
+
result = vmap(foo, out_dims=None)(tensor)
- self.assertEqual(result, 'hello world')
+ self.assertEqual(result, "hello world")
def test_out_dims_normal_tensor(self):
-
def foo(x):
return torch.arange(3)
@@ -367,7 +384,6 @@ class TestVmapAPI(TestCase):
result = vmap(foo, out_dims=None)(tensor)
self.assertEqual(result, torch.arange(3))
-
def test_pytree_returns(self):
x = torch.randn(2, 3)
@@ -433,15 +449,15 @@ class TestVmapAPI(TestCase):
self.assertEqual(y2, y0.t())
def test_out_dims_must_be_int_or_collection_of_int_err_msg(self):
- msg = 'must be an int, None or a python collection of ints'
+ msg = "must be an int, None or a python collection of ints"
tensor = torch.randn(2, 3)
with self.assertRaisesRegex(ValueError, msg):
- vmap(lambda x: x, out_dims='lol')(tensor)
+ vmap(lambda x: x, out_dims="lol")(tensor)
with self.assertRaisesRegex(ValueError, msg):
- vmap(lambda x: x, out_dims=('lol',))(tensor)
+ vmap(lambda x: x, out_dims=("lol",))(tensor)
def test_out_dims_and_num_outputs_mismatch_err_msg(self):
- msg = 'not compatible'
+ msg = "not compatible"
x = torch.randn(2, 3, 5)
# Too many out_dims
@@ -460,7 +476,7 @@ class TestVmapAPI(TestCase):
# TODO(rzou): This error message isn't that great. It comes straight
# from maybe_wrap_dim. Consider doing a try-catch-(add some context) to
# the error message in the future in C++
- msg = 'Dimension out of range'
+ msg = "Dimension out of range"
x = torch.randn(2, 3, 5)
with self.assertRaisesRegex(IndexError, msg):
vmap(lambda x: x, out_dims=3)(x)
@@ -542,7 +558,7 @@ class TestVmapAPI(TestCase):
def f(x):
return x.item()
- with self.assertRaisesRegex(RuntimeError, r'item\(\) on a Tensor'):
+ with self.assertRaisesRegex(RuntimeError, r"item\(\) on a Tensor"):
vmap(f)(torch.randn(3))
def test_data_dependent_control_flow_throws(self):
@@ -551,7 +567,7 @@ class TestVmapAPI(TestCase):
return x
return 0
- with self.assertRaisesRegex(RuntimeError, r'data-dependent control flow'):
+ with self.assertRaisesRegex(RuntimeError, r"data-dependent control flow"):
vmap(f)(torch.randn(3))
def test_accepts_nested_inputs(self):
@@ -573,28 +589,30 @@ class TestVmapAPI(TestCase):
out = vmap(lambda z: z[0] + z[1], in_dims=([0, 0],))([x, y])
self.assertEqual(out, x + y)
- out = vmap(lambda z: z['x'] + z['y'])({'x': x, 'y': y})
+ out = vmap(lambda z: z["x"] + z["y"])({"x": x, "y": y})
self.assertEqual(out, x + y)
- out = vmap(lambda z: z['x'] + z['y'], in_dims=(0,))({'x': x, 'y': y})
+ out = vmap(lambda z: z["x"] + z["y"], in_dims=(0,))({"x": x, "y": y})
self.assertEqual(out, x + y)
- out = vmap(lambda z: z['x'] + z['y'], in_dims=({'x': 0, 'y': 0},))({'x': x, 'y': y})
+ out = vmap(lambda z: z["x"] + z["y"], in_dims=({"x": 0, "y": 0},))(
+ {"x": x, "y": y}
+ )
self.assertEqual(out, x + y)
# Multiple layers of nesting
- out_fn = vmap(lambda z: z['x'][0] + z['x'][1][0] + z['y'][0] + z['y'][1])
- out = out_fn({'x': [x, (x,)], 'y': [y, y]})
+ out_fn = vmap(lambda z: z["x"][0] + z["x"][1][0] + z["y"][0] + z["y"][1])
+ out = out_fn({"x": [x, (x,)], "y": [y, y]})
self.assertEqual(out, x + x + y + y)
def test_in_dims_wrong_type_err_msg(self):
x = torch.randn(3)
y = torch.randn(3)
- msg = r'expected `in_dims` to be int or a \(potentially nested\) tuple'
+ msg = r"expected `in_dims` to be int or a \(potentially nested\) tuple"
with self.assertRaisesRegex(ValueError, msg):
vmap(torch.mul, [0, 0])(x, y)
with self.assertRaisesRegex(ValueError, msg):
vmap(torch.mul, set({0}))(x, y)
with self.assertRaisesRegex(ValueError, msg):
- vmap(torch.mul, 'lol')(x, y)
+ vmap(torch.mul, "lol")(x, y)
with self.assertRaisesRegex(ValueError, msg):
vmap(lambda z: z[0] + z[1], in_dims=[0, 0])([x, y])
# The following should not throw
@@ -603,7 +621,7 @@ class TestVmapAPI(TestCase):
def test_not_enough_in_dims_err_msg(self):
x = torch.randn(3)
y = torch.randn(3)
- msg = r'in_dims is not compatible with the structure of `inputs`'
+ msg = r"in_dims is not compatible with the structure of `inputs`"
with self.assertRaisesRegex(ValueError, msg):
vmap(torch.mul, (0,))(x, y)
@@ -626,7 +644,7 @@ class TestVmapAPI(TestCase):
x = torch.randn(2, 3)
# the following are errors in jax (and will always be errors)
- msg = 'Got in_dim=0 for an input but the input is of type'
+ msg = "Got in_dim=0 for an input but the input is of type"
with self.assertRaisesRegex(ValueError, msg):
vmap(torch.sum)(x, 0)
with self.assertRaisesRegex(ValueError, msg):
@@ -643,7 +661,7 @@ class TestVmapAPI(TestCase):
x = torch.randn(2, 3)
y = torch.randn(2, 3)
- msg = r'Got in_dim=-?\w for some input, but that input is a Tensor of dimensionality \w'
+ msg = r"Got in_dim=-?\w for some input, but that input is a Tensor of dimensionality \w"
with self.assertRaisesRegex(ValueError, msg):
vmap(foo)(torch.randn([]))
with self.assertRaisesRegex(ValueError, msg):
@@ -700,7 +718,7 @@ class TestVmapAPI(TestCase):
x = torch.randn(B0, 11)
y = torch.randn(11)
- msg = 'The fallback path does not support vmap over dims of size 0'
+ msg = "The fallback path does not support vmap over dims of size 0"
with self.assertRaisesRegex(RuntimeError, msg):
vmap(op, (0, None))(x, y)
@@ -758,11 +776,12 @@ class TestVmapAPI(TestCase):
index = torch.tensor([0, 4, 2])
values = torch.randn(B0, 3, 13)
- self._assert_uses_vmap_fallback((torch.index_add, (0, None, None, 0)), (x, dim, index, values))
+ self._assert_uses_vmap_fallback(
+ (torch.index_add, (0, None, None, 0)), (x, dim, index, values)
+ )
result = vmap(torch.index_add, (0, None, None, 0))(x, dim, index, values)
- expected = torch.index_add(
- x, dim + 1, index, values.view(B0, 3, 1, 13))
+ expected = torch.index_add(x, dim + 1, index, values.view(B0, 3, 1, 13))
self.assertEqual(result, expected)
run_test(batch_size=5)
@@ -897,7 +916,7 @@ class TestVmapAPI(TestCase):
self.assertEqual(x, outplace_op(x_orig, y.view(B0, 1, 7)))
# op(left, right): Some of the levels in right are not found in left
- msg = r'vmap: aten::atan2_\(self, \*extra_args\) is not possible'
+ msg = r"vmap: aten::atan2_\(self, \*extra_args\) is not possible"
x = torch.rand(7)
y = torch.rand(B0, 7)
with self.assertRaisesRegex(RuntimeError, msg):
@@ -922,13 +941,15 @@ class TestVmapAPI(TestCase):
x = torch.randn(3, requires_grad=True)
y = torch.randn(5)
grad = torch.randn_like(x)
- err_msg = r'backward\(\) called inside a functorch transform'
+ err_msg = r"backward\(\) called inside a functorch transform"
def backward_on_vmapped_tensor(x):
x.sum().backward()
# FIXME
- return self.skipTest("error: element 0 of tensors does not require grad and does not have a grad_fn")
+ return self.skipTest(
+ "error: element 0 of tensors does not require grad and does not have a grad_fn"
+ )
with self.assertRaisesRegex(RuntimeError, err_msg):
vmap(backward_on_vmapped_tensor)(x)
@@ -948,7 +969,7 @@ class TestVmapAPI(TestCase):
@unittest.expectedFailure
def test_grad_unsupported_interaction(self):
input_tensor = torch.randn(3, requires_grad=True)
- err_msg = 'autograd.grad.* called inside torch.vmap'
+ err_msg = "autograd.grad.* called inside torch.vmap"
captured = torch.randn(3, requires_grad=True)
@@ -959,7 +980,7 @@ class TestVmapAPI(TestCase):
with self.assertRaisesRegex(RuntimeError, err_msg):
vmap(output_to_grad_is_vmapped)(input_tensor)
- output = (input_tensor ** 2).sum()
+ output = (input_tensor**2).sum()
def input_to_grad_is_vmapped(input_tensor):
return torch.autograd.grad([output], [input_tensor])[0]
@@ -999,7 +1020,7 @@ class TestVmapAPI(TestCase):
def get_vjp(v):
result = torch.nn.functional.conv2d(x, weight)
- grad_x, = torch.autograd.grad(result, x, v)
+ (grad_x,) = torch.autograd.grad(result, x, v)
return grad_x
# Runs vmap(get_vjp)(v), which should not error out.
@@ -1077,7 +1098,6 @@ class TestVmapAPI(TestCase):
self.assertEqual(o, torch.square(t))
def _test_vmap_autocast(self, device):
-
if torch.device(device).type == "cpu":
amp_dtype = torch.bfloat16
else:
@@ -1140,17 +1160,17 @@ class TestVmapAPI(TestCase):
def f(x, y):
output0 = x[0] + x[1]
output1 = y
- return {'a': output0, 'b': output1}
+ return {"a": output0, "b": output1}
B = 2
x0 = torch.randn(B, 3)
x1 = torch.randn(B)
y = torch.randn(4, B)
- out, out_dims = restore_vmap(f, ((0, 0), 1), B, 'error')((x0, x1), y)
- expected = vmap(f, in_dims=((0, 0), 1), out_dims={'a': 0, 'b': 1})((x0, x1), y)
+ out, out_dims = restore_vmap(f, ((0, 0), 1), B, "error")((x0, x1), y)
+ expected = vmap(f, in_dims=((0, 0), 1), out_dims={"a": 0, "b": 1})((x0, x1), y)
self.assertEqual(out, expected)
- self.assertEqual(out_dims, {'a': 0, 'b': 1})
+ self.assertEqual(out_dims, {"a": 0, "b": 1})
def test_restore_vmap_no_vmapped_inputs(self):
def f(x, y, z):
@@ -1161,7 +1181,7 @@ class TestVmapAPI(TestCase):
x = torch.randn(3)
y = torch.randn(4)
z = 5
- out, out_dims = restore_vmap(f, (None, None, None), B, 'error')(x, y, z)
+ out, out_dims = restore_vmap(f, (None, None, None), B, "error")(x, y, z)
self.assertEqual(out, f(x, y, z))
self.assertEqual(out_dims, (None, None, None))
@@ -1173,7 +1193,7 @@ class TestVmapAPI(TestCase):
B = 2
x = torch.randn(B, 3)
y = torch.randn(4)
- out, out_dims = restore_vmap(f, (0, None), B, 'error')(x, y)
+ out, out_dims = restore_vmap(f, (0, None), B, "error")(x, y)
self.assertEqual(out, f(None, y))
self.assertEqual(out_dims, (None, None, None))
@@ -1182,14 +1202,18 @@ class TestVmapAPI(TestCase):
y = x.data
return x
- with self.assertRaisesRegex(RuntimeError, "accessing `data` under vmap transform"):
+ with self.assertRaisesRegex(
+ RuntimeError, "accessing `data` under vmap transform"
+ ):
torch.func.vmap(foo)(torch.randn(3, 3))
def foo(x):
x.data = torch.ones(3, 3)
return x
- with self.assertRaisesRegex(RuntimeError, "mutating directly with `.data` under vmap"):
+ with self.assertRaisesRegex(
+ RuntimeError, "mutating directly with `.data` under vmap"
+ ):
torch.func.vmap(foo)(torch.randn(3, 3))
@@ -1228,26 +1252,31 @@ def reference_vmap(op, inputs, in_dims=0, out_dims=0, return_nt=False):
if isinstance(out_dims, int):
out_dims = (out_dims,) * num_returns
if return_nt:
- return tuple(torch.nested.nested_tensor(list(result_shards))
- for result_shards in zip(*results))
+ return tuple(
+ torch.nested.nested_tensor(list(result_shards))
+ for result_shards in zip(*results)
+ )
else:
- return tuple(torch.stack(result_shards, out_dim)
- for result_shards, out_dim in zip(zip(*results), out_dims))
+ return tuple(
+ torch.stack(result_shards, out_dim)
+ for result_shards, out_dim in zip(zip(*results), out_dims)
+ )
class TensorFactory:
@staticmethod
- def rand(size, device='cpu', dtype=torch.float):
+ def rand(size, device="cpu", dtype=torch.float):
return torch.rand(size, device=device, dtype=dtype)
@staticmethod
- def randn(size, device='cpu', dtype=torch.float):
+ def randn(size, device="cpu", dtype=torch.float):
return torch.randn(size, device=device, dtype=dtype)
@staticmethod
- def randp1(size, device='cpu', dtype=torch.float):
+ def randp1(size, device="cpu", dtype=torch.float):
return torch.rand(size, device=device, dtype=dtype) + 1
+
# Tests vmap(op, in_dims, out_dims)(*inputs) by comparing the output to a
# (slow) sequential map+stack fallback.
#
@@ -1255,11 +1284,20 @@ class TensorFactory:
# check_propagates_grad: Test if the operation propagates gradients.
-def _vmap_test(self, op, inputs, in_dims=0, out_dims=0,
- check_view=False, check_propagates_grad=True):
+def _vmap_test(
+ self,
+ op,
+ inputs,
+ in_dims=0,
+ out_dims=0,
+ check_view=False,
+ check_propagates_grad=True,
+):
result = vmap(op, in_dims, out_dims)(*inputs)
are_nested = [t.is_nested for t in pytree.tree_leaves(result)]
- reference_result = reference_vmap(op, inputs, in_dims, out_dims, return_nt=any(are_nested))
+ reference_result = reference_vmap(
+ op, inputs, in_dims, out_dims, return_nt=any(are_nested)
+ )
self.assertEqual(result, reference_result)
op_has_single_return = not isinstance(result, tuple)
@@ -1267,8 +1305,10 @@ def _vmap_test(self, op, inputs, in_dims=0, out_dims=0,
result_as_tuple = (result,) if op_has_single_return else result
for output in result_as_tuple:
input0_base = inputs[0] if inputs[0]._base is None else inputs[0]._base
- self.assertTrue(output._base is input0_base,
- msg="result was not a view of the first input!")
+ self.assertTrue(
+ output._base is input0_base,
+ msg="result was not a view of the first input!",
+ )
if not check_propagates_grad:
return
@@ -1286,13 +1326,14 @@ def _vmap_test(self, op, inputs, in_dims=0, out_dims=0,
def should_allow_vmap_fallback_usage(fn):
- return getattr(fn, '_allow_vmap_fallback_usage', False)
+ return getattr(fn, "_allow_vmap_fallback_usage", False)
def allowVmapFallbackUsage(fn):
fn._allow_vmap_fallback_usage = True
return fn
+
# All tests of TestVmapBase check that the slow vmap fallback is never invoked.
# This is so that we can incrementally add batching rules for operators to
# replace the slow vmap fallback path for said operators. To skip this check,
@@ -1307,7 +1348,7 @@ def allowVmapFallbackUsage(fn):
class Namespace:
class TestVmapBase(TestCase):
- def __init__(self, method_name='runTest'):
+ def __init__(self, method_name="runTest"):
super().__init__(method_name)
test_method = getattr(self, method_name, None)
@@ -1315,8 +1356,11 @@ class Namespace:
return
if not should_allow_vmap_fallback_usage(test_method):
- setattr(self, method_name,
- self._wrap_method_with_vmap_fallback_check(test_method))
+ setattr(
+ self,
+ method_name,
+ self._wrap_method_with_vmap_fallback_check(test_method),
+ )
def _wrap_method_with_vmap_fallback_check(self, method):
# msg = (
@@ -1331,11 +1375,12 @@ class Namespace:
@functools.wraps(method)
def wrapper(self, *args, **kwargs):
with warnings.catch_warnings(record=True):
- warnings.simplefilter('always')
+ warnings.simplefilter("always")
with EnableVmapFallbackWarnings():
method(*args, **kwargs)
# for captured_warning in wa:
# self.assertNotRegex(str(captured_warning.message), FALLBACK_REGEX, msg)
+
return types.MethodType(wrapper, self)
@allowVmapFallbackUsage
@@ -1391,56 +1436,70 @@ class TestVmapOperators(Namespace.TestVmapBase):
# Doubly nested vmap
test(vmap(op), [getter([B0, B1], device)])
test(vmap(op), [getter([B1, 2, 5, B0, 3], device)], in_dims=2)
- test(vmap(op, in_dims=2), [getter([2, 5, B0, B1, 3], device)],
- in_dims=2, out_dims=2)
-
- @parametrize("case", [
- (torch.abs, TensorFactory.randn),
- (torch.acos, TensorFactory.rand),
- (torch.asin, TensorFactory.rand),
- (torch.atan, TensorFactory.rand),
- (torch.ceil, TensorFactory.randn),
- (torch.cos, TensorFactory.rand),
- (torch.cosh, TensorFactory.rand),
- (torch.digamma, TensorFactory.rand),
- (torch.exp, TensorFactory.randn),
- (torch.expm1, TensorFactory.randn),
- (torch.floor, TensorFactory.randn),
- (torch.frac, TensorFactory.randn),
- (torch.lgamma, TensorFactory.rand),
- (torch.log, TensorFactory.randp1),
- (torch.log10, TensorFactory.randp1),
- (torch.log1p, TensorFactory.randp1),
- (torch.log2, TensorFactory.randp1),
- (torch.neg, TensorFactory.randn),
- (torch.reciprocal, TensorFactory.randp1),
- (torch.relu, TensorFactory.randn),
- (torch.round, TensorFactory.randn),
- (torch.rsqrt, TensorFactory.randp1),
- (torch.sigmoid, TensorFactory.randn),
- (torch.sign, TensorFactory.randn),
- (torch.sin, TensorFactory.rand),
- (torch.sinh, TensorFactory.rand),
- (torch.sqrt, TensorFactory.rand),
- (torch.tan, TensorFactory.rand),
- (torch.tanh, TensorFactory.rand),
- (torch.trunc, TensorFactory.randn),
- ], name_fn=lambda x: x[0].__name__)
+ test(
+ vmap(op, in_dims=2),
+ [getter([2, 5, B0, B1, 3], device)],
+ in_dims=2,
+ out_dims=2,
+ )
+
+ @parametrize(
+ "case",
+ [
+ (torch.abs, TensorFactory.randn),
+ (torch.acos, TensorFactory.rand),
+ (torch.asin, TensorFactory.rand),
+ (torch.atan, TensorFactory.rand),
+ (torch.ceil, TensorFactory.randn),
+ (torch.cos, TensorFactory.rand),
+ (torch.cosh, TensorFactory.rand),
+ (torch.digamma, TensorFactory.rand),
+ (torch.exp, TensorFactory.randn),
+ (torch.expm1, TensorFactory.randn),
+ (torch.floor, TensorFactory.randn),
+ (torch.frac, TensorFactory.randn),
+ (torch.lgamma, TensorFactory.rand),
+ (torch.log, TensorFactory.randp1),
+ (torch.log10, TensorFactory.randp1),
+ (torch.log1p, TensorFactory.randp1),
+ (torch.log2, TensorFactory.randp1),
+ (torch.neg, TensorFactory.randn),
+ (torch.reciprocal, TensorFactory.randp1),
+ (torch.relu, TensorFactory.randn),
+ (torch.round, TensorFactory.randn),
+ (torch.rsqrt, TensorFactory.randp1),
+ (torch.sigmoid, TensorFactory.randn),
+ (torch.sign, TensorFactory.randn),
+ (torch.sin, TensorFactory.rand),
+ (torch.sinh, TensorFactory.rand),
+ (torch.sqrt, TensorFactory.rand),
+ (torch.tan, TensorFactory.rand),
+ (torch.tanh, TensorFactory.rand),
+ (torch.trunc, TensorFactory.randn),
+ ],
+ name_fn=lambda x: x[0].__name__,
+ )
def test_unary_pointwise(self, case):
op, getter = case
- self._test_unary(op, getter, 'cpu')
+ self._test_unary(op, getter, "cpu")
# test in-place
method = getattr(Tensor, f'{op.__name__ + "_"}')
- self._test_unary(method, getter, 'cpu', check_propagates_grad=False)
+ self._test_unary(method, getter, "cpu", check_propagates_grad=False)
def test_clone(self):
# Some basic tests
- self._test_unary(lambda x: x.clone(), TensorFactory.randn, 'cpu')
- self._test_unary(lambda x: x.clone(memory_format=torch.preserve_format),
- TensorFactory.randn, 'cpu')
- self._test_unary(lambda x: x.clone(memory_format=torch.contiguous_format),
- TensorFactory.randn, 'cpu')
+ self._test_unary(lambda x: x.clone(), TensorFactory.randn, "cpu")
+ self._test_unary(
+ lambda x: x.clone(memory_format=torch.preserve_format),
+ TensorFactory.randn,
+ "cpu",
+ )
+ self._test_unary(
+ lambda x: x.clone(memory_format=torch.contiguous_format),
+ TensorFactory.randn,
+ "cpu",
+ )
# Test that the per-examples are contiguous when using torch.contiguous_format
def clone_contiguous(x):
@@ -1457,11 +1516,13 @@ class TestVmapOperators(Namespace.TestVmapBase):
self.assertTrue(y.is_contiguous())
self.assertTrue(y[0][0].is_contiguous())
- msg = r'only supported with memory_format torch.preserve_format or torch.contiguous_format'
+ msg = r"only supported with memory_format torch.preserve_format or torch.contiguous_format"
with self.assertRaisesRegex(RuntimeError, msg):
vmap(lambda x: x.clone(memory_format=torch.channels_last))(torch.randn(B0))
with self.assertRaisesRegex(RuntimeError, msg):
- vmap(lambda x: x.clone(memory_format=torch.channels_last_3d))(torch.randn(B0))
+ vmap(lambda x: x.clone(memory_format=torch.channels_last_3d))(
+ torch.randn(B0)
+ )
def test_weird_matmul_case(self):
# Check that this doesn't crash.
@@ -1471,11 +1532,14 @@ class TestVmapOperators(Namespace.TestVmapBase):
vmap(vmap(torch.matmul, in_dims=(None, 0)))(x, y)
- @parametrize("case",
- (
- (torch.clamp_min_, TensorFactory.randn),
- (torch.clamp_max_, TensorFactory.randn),
- ), name_fn=lambda x: x[0].__name__)
+ @parametrize(
+ "case",
+ (
+ (torch.clamp_min_, TensorFactory.randn),
+ (torch.clamp_max_, TensorFactory.randn),
+ ),
+ name_fn=lambda x: x[0].__name__,
+ )
def test_clamp_inplace_variant(self, case):
test = self._vmap_test
@@ -1483,29 +1547,66 @@ class TestVmapOperators(Namespace.TestVmapBase):
return getter([]).item()
op, getter = case
- device = 'cpu'
+ device = "cpu"
B0, B1 = 7, 11
# Single vmap: op(Tensor, Tensor)
- test(op, (getter([B0, 3], device), getter([B0, 3], device)), check_propagates_grad=False)
- test(op, (getter([B0], device), getter([B0], device)), check_propagates_grad=False)
- test(op, (getter([2, B0, 3], device), getter([2, B0, 3], device)), in_dims=(1, 1), check_propagates_grad=False)
- test(op, (getter([B0, 2, 3], device), getter([2, B0, 3], device)),
- in_dims=(0, 1), out_dims=1, check_propagates_grad=False)
- test(op, (getter([B0, 2, 3], device), getter([1, 1], device)), in_dims=(0, None), check_propagates_grad=False)
- test(op, (getter([B0, 3], device), getter([B0, 3], device)), in_dims=(0, 0), check_propagates_grad=False)
+ test(
+ op,
+ (getter([B0, 3], device), getter([B0, 3], device)),
+ check_propagates_grad=False,
+ )
+ test(
+ op,
+ (getter([B0], device), getter([B0], device)),
+ check_propagates_grad=False,
+ )
+ test(
+ op,
+ (getter([2, B0, 3], device), getter([2, B0, 3], device)),
+ in_dims=(1, 1),
+ check_propagates_grad=False,
+ )
+ test(
+ op,
+ (getter([B0, 2, 3], device), getter([2, B0, 3], device)),
+ in_dims=(0, 1),
+ out_dims=1,
+ check_propagates_grad=False,
+ )
+ test(
+ op,
+ (getter([B0, 2, 3], device), getter([1, 1], device)),
+ in_dims=(0, None),
+ check_propagates_grad=False,
+ )
+ test(
+ op,
+ (getter([B0, 3], device), getter([B0, 3], device)),
+ in_dims=(0, 0),
+ check_propagates_grad=False,
+ )
# Nested vmap: op(Tensor, Tensor)
- test(vmap(op), (getter([B0, B1, 2, 3], device), getter([B0, B1, 1, 3], device)), check_propagates_grad=False)
+ test(
+ vmap(op),
+ (getter([B0, B1, 2, 3], device), getter([B0, B1, 1, 3], device)),
+ check_propagates_grad=False,
+ )
# Python number overload: op(Tensor, Number)
number = get_number(getter)
- self._test_unary(lambda t: op(t, number), getter, device, check_propagates_grad=False)
+ self._test_unary(
+ lambda t: op(t, number), getter, device, check_propagates_grad=False
+ )
- @parametrize('case', [
- subtest(_make_case(torch.clamp_min), name='clamp_min'),
- subtest(_make_case(torch.clamp_max), name='clamp_max'),
- ])
+ @parametrize(
+ "case",
+ [
+ subtest(_make_case(torch.clamp_min), name="clamp_min"),
+ subtest(_make_case(torch.clamp_max), name="clamp_max"),
+ ],
+ )
def test_clamp_variant(self, case):
test = self._vmap_test
@@ -1513,22 +1614,29 @@ class TestVmapOperators(Namespace.TestVmapBase):
return getter([]).item()
op, getter = case
- device = 'cpu'
+ device = "cpu"
B0, B1 = 7, 11
# Single vmap: op(Tensor, Tensor)
test(op, (getter([B0, 3], device), getter([B0, 3], device)))
test(op, (getter([B0], device), getter([B0, 2, 3], device)))
test(op, (getter([B0], device), getter([2, B0, 3], device)), in_dims=(0, 1))
- test(op, (getter([B0], device), getter([2, B0, 3], device)),
- in_dims=(0, 1), out_dims=1)
+ test(
+ op,
+ (getter([B0], device), getter([2, B0, 3], device)),
+ in_dims=(0, 1),
+ out_dims=1,
+ )
test(op, (getter([B0], device), getter([2, 3], device)), in_dims=(0, None))
test(op, (getter([2, 3], device), getter([B0, 3], device)), in_dims=(None, 0))
# Nested vmap: op(Tensor, Tensor)
test(vmap(op), (getter([B0, B1, 2, 3], device), getter([B0, B1, 3], device)))
- test(vmap(op, in_dims=(None, 0)),
- (getter([B0, 2, 3], device), getter([B1, 3], device)), in_dims=(0, None))
+ test(
+ vmap(op, in_dims=(None, 0)),
+ (getter([B0, 2, 3], device), getter([B1, 3], device)),
+ in_dims=(0, None),
+ )
# Python number overload: op(Tensor, Number)
number = get_number(getter)
@@ -1547,12 +1655,12 @@ class TestVmapOperators(Namespace.TestVmapBase):
x = torch.randn(3)
y = torch.randn(2, 3)
- with self.assertRaisesRegex(RuntimeError, 'inplace'):
+ with self.assertRaisesRegex(RuntimeError, "inplace"):
vmap(Tensor.copy_, in_dims=(None, 0))(x, y)
def test_silu_backward(self):
test = self._vmap_test
- device = 'cpu'
+ device = "cpu"
getter = TensorFactory.randp1
B0 = 7
op = torch.ops.aten.silu_backward
@@ -1562,20 +1670,36 @@ class TestVmapOperators(Namespace.TestVmapBase):
test(op, (getter([], device), getter([B0], device)), in_dims=(None, 0))
test(op, (getter([2, B0], device), getter([2], device)), in_dims=(1, None))
- @skipIf(TEST_WITH_TORCHDYNAMO and os.getenv('BUILD_ENVIRONMENT', '') == 'linux-focal-py3.8-clang10',
- "Segfaults with dynamo on focal, see https://github.com/pytorch/pytorch/issues/107173")
- @parametrize('case', [
- subtest(_make_case(torch.add), name='add'),
- subtest(_make_case(lambda x, y: x + y), name='add_dunder'),
- subtest(_make_case(torch.sub), name='sub'),
- subtest(_make_case(lambda x, y: x - y), name='sub_dunder'),
- subtest(_make_case(torch.mul), name='mul'),
- subtest(_make_case(lambda x, y: x * y), name='mul_dunder'),
- subtest(_make_case(torch.div, input_getter=TensorFactory.randp1), name='div'),
- subtest(_make_case(lambda x, y: x / y, input_getter=TensorFactory.randp1), name='div_dunder'),
- subtest(_make_case(torch.pow, input_getter=TensorFactory.randp1), name='pow'),
- subtest(_make_case(lambda x, y: x ** y, input_getter=TensorFactory.randp1), name='pow_dunder'),
- ])
+ @skipIf(
+ TEST_WITH_TORCHDYNAMO
+ and os.getenv("BUILD_ENVIRONMENT", "") == "linux-focal-py3.8-clang10",
+ "Segfaults with dynamo on focal, see https://github.com/pytorch/pytorch/issues/107173",
+ )
+ @parametrize(
+ "case",
+ [
+ subtest(_make_case(torch.add), name="add"),
+ subtest(_make_case(lambda x, y: x + y), name="add_dunder"),
+ subtest(_make_case(torch.sub), name="sub"),
+ subtest(_make_case(lambda x, y: x - y), name="sub_dunder"),
+ subtest(_make_case(torch.mul), name="mul"),
+ subtest(_make_case(lambda x, y: x * y), name="mul_dunder"),
+ subtest(
+ _make_case(torch.div, input_getter=TensorFactory.randp1), name="div"
+ ),
+ subtest(
+ _make_case(lambda x, y: x / y, input_getter=TensorFactory.randp1),
+ name="div_dunder",
+ ),
+ subtest(
+ _make_case(torch.pow, input_getter=TensorFactory.randp1), name="pow"
+ ),
+ subtest(
+ _make_case(lambda x, y: x**y, input_getter=TensorFactory.randp1),
+ name="pow_dunder",
+ ),
+ ],
+ )
def test_arithmetic(self, case):
test = self._vmap_test
@@ -1583,22 +1707,29 @@ class TestVmapOperators(Namespace.TestVmapBase):
return getter([]).item()
op, getter = case
- device = 'cpu'
+ device = "cpu"
B0, B1 = 7, 11
# Single vmap: op(Tensor, Tensor)
test(op, (getter([B0, 3], device), getter([B0, 3], device)))
test(op, (getter([B0], device), getter([B0, 2, 3], device)))
test(op, (getter([B0], device), getter([2, B0, 3], device)), in_dims=(0, 1))
- test(op, (getter([B0], device), getter([2, B0, 3], device)),
- in_dims=(0, 1), out_dims=1)
+ test(
+ op,
+ (getter([B0], device), getter([2, B0, 3], device)),
+ in_dims=(0, 1),
+ out_dims=1,
+ )
test(op, (getter([B0], device), getter([2, 3], device)), in_dims=(0, None))
test(op, (getter([2, 3], device), getter([B0, 3], device)), in_dims=(0, None))
# Nested vmap: op(Tensor, Tensor)
test(vmap(op), (getter([B0, B1, 2, 3], device), getter([B0, B1, 3], device)))
- test(vmap(op, in_dims=(None, 0)),
- (getter([B0, 2, 3], device), getter([B1, 3], device)), in_dims=(0, None))
+ test(
+ vmap(op, in_dims=(None, 0)),
+ (getter([B0, 2, 3], device), getter([B1, 3], device)),
+ in_dims=(0, None),
+ )
# Python number overload: op(Tensor, Number) (and vice-versa)
number = get_number(getter)
@@ -1666,7 +1797,9 @@ class TestVmapOperators(Namespace.TestVmapBase):
offset = x.storage_offset()
# Broadcast
- _test([5, 5, 2, 3], [0, 0, S0, S1], offset, x, lambda x: x.expand(5, 5, 2, 3))
+ _test(
+ [5, 5, 2, 3], [0, 0, S0, S1], offset, x, lambda x: x.expand(5, 5, 2, 3)
+ )
# transpose
_test([3, 2], [S1, S0], offset, x, lambda x: x.transpose(0, 1))
# select
@@ -1680,13 +1813,17 @@ class TestVmapOperators(Namespace.TestVmapBase):
B1 = 7
x = torch.randn(B1, B0, 2, 3)
S0, S1 = x.stride()[2:]
- result = vmap(vmap(lambda t: t.as_strided([5, 5, 2, 3], [0, 0, S0, S1])), in_dims=1)(x)
+ result = vmap(
+ vmap(lambda t: t.as_strided([5, 5, 2, 3], [0, 0, S0, S1])), in_dims=1
+ )(x)
expected = vmap(vmap(lambda t: t.expand(5, 5, 2, 3)), in_dims=1)(x)
self.assertTrue(result._base is expected._base)
self.assertEqual(result, expected)
# Check that mal-formatted size/strides doesn't crash
- with self.assertRaisesRegex(RuntimeError, 'size and stride must have the same length'):
+ with self.assertRaisesRegex(
+ RuntimeError, "size and stride must have the same length"
+ ):
x = torch.randn(B0, 2, 3).transpose(0, 1)
vmap(lambda x: x.as_strided([1, 1, 1], [1, 1]))(x)
@@ -1699,7 +1836,7 @@ class TestVmapOperators(Namespace.TestVmapBase):
# Sanity check #1a: The maximum indexable location of
# xs[i].as_strided(sizes, strides, offset + xs[i].offset() - xs.offset())
# is less than or equal to the maximum indexable location of xs[i].
- msg = 'This is not supported inside of vmap'
+ msg = "This is not supported inside of vmap"
with self.assertRaisesRegex(RuntimeError, msg):
x = torch.randn(B0, 3)
vmap(lambda x: x.as_strided([3], [1], 1))(x)
@@ -1733,14 +1870,14 @@ class TestVmapOperators(Namespace.TestVmapBase):
y = torch.randn(B, 2, 5)
t = torch.randint(0, 5, (B, 2))
test(op, (y, t))
- test(functools.partial(op, reduction='sum'), (y, t))
- test(functools.partial(op, reduction='none'), (y, t))
+ test(functools.partial(op, reduction="sum"), (y, t))
+ test(functools.partial(op, reduction="none"), (y, t))
y = torch.randn(B, 2, 5)
t = torch.randint(0, 5, (2,))
test(op, (y, t), in_dims=(0, None))
- test(functools.partial(op, reduction='sum'), (y, t), in_dims=(0, None))
- test(functools.partial(op, reduction='none'), (y, t), in_dims=(0, None))
+ test(functools.partial(op, reduction="sum"), (y, t), in_dims=(0, None))
+ test(functools.partial(op, reduction="none"), (y, t), in_dims=(0, None))
def test_adaptive_avg_pool2d(self):
test = self._vmap_test
@@ -1767,19 +1904,32 @@ class TestVmapOperators(Namespace.TestVmapBase):
# left arg is vmapped
test(op, (torch.rand(B0, 2, 3, 5), torch.rand(2, 5, 3)), in_dims=(0, None))
- test(vmap(op, in_dims=(0, None)), (torch.rand(B1, B0, 2, 3, 5), torch.rand(2, 5, 3)),
- in_dims=(1, None))
+ test(
+ vmap(op, in_dims=(0, None)),
+ (torch.rand(B1, B0, 2, 3, 5), torch.rand(2, 5, 3)),
+ in_dims=(1, None),
+ )
# right arg is vmapped
test(op, (torch.rand(2, 5, 3), torch.rand(B0, 2, 3, 5)), in_dims=(None, 0))
- test(vmap(op, in_dims=(None, 0)), (torch.rand(2, 5, 3), torch.rand(B1, B0, 2, 3, 5)),
- in_dims=(None, 1))
+ test(
+ vmap(op, in_dims=(None, 0)),
+ (torch.rand(2, 5, 3), torch.rand(B1, B0, 2, 3, 5)),
+ in_dims=(None, 1),
+ )
# both args are vmapped
test(op, (torch.rand(B0, 2, 3, 5), torch.rand(B0, 2, 5, 3)))
- test(vmap(op), (torch.rand(B1, B0, 2, 3, 5), torch.rand(B0, B1, 2, 5, 3)), in_dims=(1, 0))
- test(vmap(op, in_dims=(0, None)),
- (torch.rand(B1, 2, 3, 5), torch.rand(B0, 2, 5, 3)), in_dims=(None, 0))
+ test(
+ vmap(op),
+ (torch.rand(B1, B0, 2, 3, 5), torch.rand(B0, B1, 2, 5, 3)),
+ in_dims=(1, 0),
+ )
+ test(
+ vmap(op, in_dims=(0, None)),
+ (torch.rand(B1, 2, 3, 5), torch.rand(B0, 2, 5, 3)),
+ in_dims=(None, 0),
+ )
def test_cat(self):
test = self._vmap_test
@@ -1789,22 +1939,37 @@ class TestVmapOperators(Namespace.TestVmapBase):
def get_op(dim):
def op(*tensors):
return torch.cat(tensors, dim=dim)
+
return op
test(get_op(0), (torch.rand(B0, 2), torch.rand(B0, 3)))
test(get_op(0), (torch.rand(B0, 0), torch.rand(B0, 0)))
test(get_op(0), (torch.rand(2), torch.rand(B0, 0)), in_dims=(None, 0))
- test(get_op(1), (torch.rand(2, 5), torch.rand(B0, 0), torch.rand(2, 3)), in_dims=(None, 0, None))
+ test(
+ get_op(1),
+ (torch.rand(2, 5), torch.rand(B0, 0), torch.rand(2, 3)),
+ in_dims=(None, 0, None),
+ )
test(get_op(1), (torch.rand(B0, 2, 3), torch.rand(B0, 0)))
test(get_op(1), (torch.rand(B0, 2, 3, 4), torch.rand(0)), in_dims=(0, None))
- test(get_op(0), (torch.rand(0), torch.rand(B0, 2), torch.rand(B0, 0)), in_dims=(None, 0, 0))
+ test(
+ get_op(0),
+ (torch.rand(0), torch.rand(B0, 2), torch.rand(B0, 0)),
+ in_dims=(None, 0, 0),
+ )
test(get_op(0), (torch.rand(2), torch.rand(B0, 3)), in_dims=(None, 0))
test(get_op(0), (torch.rand(2, 17), torch.rand(3, 17, B0)), in_dims=(None, 2))
test(get_op(-1), (torch.rand(17, 2), torch.rand(17, 3, B0)), in_dims=(None, 2))
- test(vmap(get_op(0), in_dims=(0, None)),
- (torch.rand(B1, 2), torch.rand(B0, 3)), in_dims=(None, 0))
- test(vmap(get_op(0), in_dims=(0, 0)),
- (torch.rand(B1, 2), torch.rand(B0, B1, 3)), in_dims=(None, 0))
+ test(
+ vmap(get_op(0), in_dims=(0, None)),
+ (torch.rand(B1, 2), torch.rand(B0, 3)),
+ in_dims=(None, 0),
+ )
+ test(
+ vmap(get_op(0), in_dims=(0, 0)),
+ (torch.rand(B1, 2), torch.rand(B0, B1, 3)),
+ in_dims=(None, 0),
+ )
def test_unsafe_view(self):
# Unsafe view isn't exposed, so we get at it via
@@ -1825,6 +1990,7 @@ class TestVmapOperators(Namespace.TestVmapBase):
def run_test(dtype):
def get(shape):
return torch.randn(shape, dtype=dtype)
+
B0, B1 = 7, 11
test = self._vmap_test
@@ -1836,8 +2002,7 @@ class TestVmapOperators(Namespace.TestVmapBase):
# Doubly nested vmap
test(vmap(op), [get([B0, B1])])
test(vmap(op), [get([B1, 2, 5, B0, 3])], in_dims=2)
- test(vmap(op, in_dims=2), [get([2, 5, B0, B1, 3])],
- in_dims=2, out_dims=2)
+ test(vmap(op, in_dims=2), [get([2, 5, B0, B1, 3])], in_dims=2, out_dims=2)
# correctness tests
run_test(torch.float)
@@ -1851,7 +2016,7 @@ class TestVmapOperators(Namespace.TestVmapBase):
def test_contiguous(self):
op = Tensor.contiguous
- self._test_unary(op, TensorFactory.randn, 'cpu')
+ self._test_unary(op, TensorFactory.randn, "cpu")
# check that contiguous returns the original tensor if the per-examples
# are already contiguous
@@ -1861,7 +2026,7 @@ class TestVmapOperators(Namespace.TestVmapBase):
result = vmap(Tensor.contiguous, in_dims=2, out_dims=2)(x)
self.assertTrue(result is x)
- msg = 'NYI: querying is_contiguous inside of vmap for memory_format'
+ msg = "NYI: querying is_contiguous inside of vmap for memory_format"
tensor = torch.randn(B0, 3)
with self.assertRaisesRegex(RuntimeError, msg):
vmap(functools.partial(op, memory_format=torch.channels_last))(tensor)
@@ -1895,10 +2060,16 @@ class TestVmapOperators(Namespace.TestVmapBase):
# tests for torch.split(self, split_size: int, dim)
test(op, (torch.rand(B0, 2, 1024), 15, -1), in_dims=(0, None, None))
test(op, (torch.rand(2, B0, 1024), 9, 1), in_dims=(1, None, None))
- test(vmap(op, in_dims=(0, None, None)), (torch.rand(B1, 1023, B0, 5), 4, 0),
- in_dims=(2, None, None))
- test(vmap(vmap(lambda t: op(t, 4, 1), in_dims=2)),
- (torch.rand(B1, 2, B0, 64, B2),), in_dims=2)
+ test(
+ vmap(op, in_dims=(0, None, None)),
+ (torch.rand(B1, 1023, B0, 5), 4, 0),
+ in_dims=(2, None, None),
+ )
+ test(
+ vmap(vmap(lambda t: op(t, 4, 1), in_dims=2)),
+ (torch.rand(B1, 2, B0, 64, B2),),
+ in_dims=2,
+ )
def test_clamp(self):
clamp_cases = (
@@ -1909,7 +2080,7 @@ class TestVmapOperators(Namespace.TestVmapBase):
(lambda t: t.clamp_max(max=0.5), TensorFactory.randn),
)
for op, getter in clamp_cases:
- self._test_unary(op, getter, 'cpu')
+ self._test_unary(op, getter, "cpu")
def test_comparison_ops(self):
test = functools.partial(self._vmap_test, check_propagates_grad=False)
@@ -1918,12 +2089,18 @@ class TestVmapOperators(Namespace.TestVmapBase):
B0, B1 = 7, 11
ops = (
- torch.eq, lambda x, y: x == y,
- torch.gt, lambda x, y: x > y,
- torch.ge, lambda x, y: x >= y,
- torch.le, lambda x, y: x <= y,
- torch.lt, lambda x, y: x < y,
- torch.ne, lambda x, y: x != y,
+ torch.eq,
+ lambda x, y: x == y,
+ torch.gt,
+ lambda x, y: x > y,
+ torch.ge,
+ lambda x, y: x >= y,
+ torch.le,
+ lambda x, y: x <= y,
+ torch.lt,
+ lambda x, y: x < y,
+ torch.ne,
+ lambda x, y: x != y,
)
for op in ops:
@@ -1937,12 +2114,17 @@ class TestVmapOperators(Namespace.TestVmapBase):
# Nested vmap: op(Tensor, Tensor)
test(vmap(op), (getter([B0, B1, 2, 3]), getter([B0, B1, 3])))
- test(vmap(op, in_dims=(None, 0)),
- (getter([B0, 2, 3]), getter([B1, 3])), in_dims=(0, None))
+ test(
+ vmap(op, in_dims=(None, 0)),
+ (getter([B0, 2, 3]), getter([B1, 3])),
+ in_dims=(0, None),
+ )
# test number as inputs
number = getter([]).item()
- self._test_unary(lambda t: op(t, number), getter, 'cpu', check_propagates_grad=False)
+ self._test_unary(
+ lambda t: op(t, number), getter, "cpu", check_propagates_grad=False
+ )
def test_cross_batch_size_three(self):
# Let's test corner case when batch_size is 3 and cross' dim argument is not specified
@@ -1952,8 +2134,11 @@ class TestVmapOperators(Namespace.TestVmapBase):
test = self._vmap_test
B0 = B1 = 3
test(op, (torch.rand(B0, 2, 3), torch.rand(B0, 2, 3)))
- test(vmap(op, in_dims=(0, None)), (torch.rand(B0, B1, 2, 3), torch.rand(B0, B1, 2, 3)),
- in_dims=(None, 1))
+ test(
+ vmap(op, in_dims=(0, None)),
+ (torch.rand(B0, B1, 2, 3), torch.rand(B0, B1, 2, 3)),
+ in_dims=(None, 1),
+ )
def test_diagonal(self):
tensor = torch.randn(3, 5, 7, 11, 13)
@@ -1964,8 +2149,12 @@ class TestVmapOperators(Namespace.TestVmapBase):
test(op, (tensor, 2, 1, 2), in_dims=(1, None, None, None))
test(op, (tensor, 0, -2, -1), in_dims=(1, None, None, None), out_dims=1)
test(vmap(lambda t: op(t, 0, 0, -1)), (tensor,), in_dims=1, out_dims=1)
- test(vmap(vmap(lambda t: op(t, 0, 0, 1), in_dims=1), in_dims=3),
- (tensor,), in_dims=1, out_dims=1)
+ test(
+ vmap(vmap(lambda t: op(t, 0, 0, 1), in_dims=1), in_dims=3),
+ (tensor,),
+ in_dims=1,
+ out_dims=1,
+ )
def test_dot(self):
op = torch.dot
@@ -1983,19 +2172,28 @@ class TestVmapOperators(Namespace.TestVmapBase):
# left arg is vmapped
test(op, (torch.rand(B0, 5), torch.rand(5)), in_dims=(0, None))
- test(vmap(op, in_dims=(0, None)), (torch.rand(B1, B0, 5), torch.rand(5)),
- in_dims=(1, None))
+ test(
+ vmap(op, in_dims=(0, None)),
+ (torch.rand(B1, B0, 5), torch.rand(5)),
+ in_dims=(1, None),
+ )
# right arg is vmapped
test(op, (torch.rand(5), torch.rand(B0, 5)), in_dims=(None, 0))
- test(vmap(op, in_dims=(None, 0)), (torch.rand(5), torch.rand(B1, B0, 5)),
- in_dims=(None, 1))
+ test(
+ vmap(op, in_dims=(None, 0)),
+ (torch.rand(5), torch.rand(B1, B0, 5)),
+ in_dims=(None, 1),
+ )
# both args are vmapped
test(op, (torch.rand(B0, 5), torch.rand(B0, 5)))
test(vmap(op), (torch.rand(B1, B0, 5), torch.rand(B0, B1, 5)), in_dims=(1, 0))
- test(vmap(op, in_dims=(0, None)),
- (torch.rand(B1, 5), torch.rand(B0, 5)), in_dims=(None, 0))
+ test(
+ vmap(op, in_dims=(0, None)),
+ (torch.rand(B1, 5), torch.rand(B0, 5)),
+ in_dims=(None, 0),
+ )
def test_expand_as(self):
op = torch.Tensor.expand_as
@@ -2005,7 +2203,11 @@ class TestVmapOperators(Namespace.TestVmapBase):
test(op, (torch.rand(B0, 1, 5), torch.rand(2, 3, 5)), in_dims=(0, None))
test(op, (torch.rand(1, 5), torch.rand(B0, 2, 3, 5)), in_dims=(None, 0))
test(vmap(op), (torch.rand(B0, B1, 1, 5), torch.rand(B0, B1, 2, 3, 5)))
- test(vmap(op), (torch.rand(B0, B1, 1, 5), torch.rand(B1, B0, 2, 3, 5)), in_dims=(0, 1))
+ test(
+ vmap(op),
+ (torch.rand(B0, B1, 1, 5), torch.rand(B1, B0, 2, 3, 5)),
+ in_dims=(0, 1),
+ )
test(vmap(op), (torch.rand(B0, B1), torch.rand(B1, 2, 3, 5)), in_dims=(0, None))
test(vmap(vmap(op)), (torch.rand(B0, B1, B2), torch.rand(B0, B1, B2, 2, 3, 5)))
@@ -2027,18 +2229,22 @@ class TestVmapOperators(Namespace.TestVmapBase):
# Doubly nested vmap
test(vmap(op), [TensorFactory.randn([B0, B1])])
test(vmap(op), [TensorFactory.randn([B1, 2, 5, B0, 3])], in_dims=2)
- test(vmap(op, in_dims=2), [TensorFactory.randn([2, 5, B0, B1, 3])],
- in_dims=2, out_dims=2)
+ test(
+ vmap(op, in_dims=2),
+ [TensorFactory.randn([2, 5, B0, B1, 3])],
+ in_dims=2,
+ out_dims=2,
+ )
# test when value is a batched tensor for fill_ operator
B0, B1 = 3, 5
test(Tensor.fill_, [TensorFactory.randn([B0, B1]), TensorFactory.randn(B0)])
- with self.assertRaisesRegex(RuntimeError,
- ""):
+ with self.assertRaisesRegex(RuntimeError, ""):
# Runtime Error is thrown when the tensor being written to isn't being vmapped over
- vmap(Tensor.fill_, (None, 0))(TensorFactory.randn([B0, B1]),
- TensorFactory.randn([B0]))
+ vmap(Tensor.fill_, (None, 0))(
+ TensorFactory.randn([B0, B1]), TensorFactory.randn([B0])
+ )
def _test_complex_views(self, op, dtypes):
test = self._vmap_view_test
@@ -2058,8 +2264,7 @@ class TestVmapOperators(Namespace.TestVmapBase):
# Doubly nested vmap
test(vmap(op), [get([B0, B1])])
test(vmap(op), [get([B1, 2, 5, 3, B0])], in_dims=4)
- test(vmap(op, in_dims=2), [get([2, 5, B0, B1, 3])],
- in_dims=2, out_dims=2)
+ test(vmap(op, in_dims=2), [get([2, 5, B0, B1, 3])], in_dims=2, out_dims=2)
for dtype in dtypes:
run_test(op, dtype)
@@ -2071,7 +2276,9 @@ class TestVmapOperators(Namespace.TestVmapBase):
self._test_complex_views(torch.imag, dtypes=[torch.cfloat, torch.cdouble])
def test_view_as_real(self):
- self._test_complex_views(torch.view_as_real, dtypes=[torch.cfloat, torch.cdouble])
+ self._test_complex_views(
+ torch.view_as_real, dtypes=[torch.cfloat, torch.cdouble]
+ )
def test_view_as_complex(self):
def run_test(dtype):
@@ -2090,8 +2297,9 @@ class TestVmapOperators(Namespace.TestVmapBase):
# Doubly nested vmap
test(vmap(op), [get([B0, B1, 2])])
test(vmap(op), [get([B1, 2, 5, B0, 3, 2])], in_dims=2)
- test(vmap(op, in_dims=2), [get([2, 5, B0, B1, 3, 2])],
- in_dims=2, out_dims=2)
+ test(
+ vmap(op, in_dims=2), [get([2, 5, B0, B1, 3, 2])], in_dims=2, out_dims=2
+ )
# Interesting case #1: Batch dim directly before dim of size 2
test(op, [get([3, B0, 2])], in_dims=1)
@@ -2112,7 +2320,7 @@ class TestVmapOperators(Namespace.TestVmapBase):
vmap(vmap(op, in_dims=1), in_dims=1)(get([2, B0, B1]))
# Invalid input: no dimension of size 2
- msg = 'Input tensor must have one or more dimensions'
+ msg = "Input tensor must have one or more dimensions"
with self.assertRaisesRegex(RuntimeError, msg):
vmap(op)(get([B0]))
with self.assertRaisesRegex(RuntimeError, msg):
@@ -2120,7 +2328,7 @@ class TestVmapOperators(Namespace.TestVmapBase):
# Invalid input: Batch dim has size 2, but the logical last dim does
# not have size 2
- msg = 'Tensor must have a last dimension of size 2'
+ msg = "Tensor must have a last dimension of size 2"
with self.assertRaisesRegex(RuntimeError, msg):
vmap(op, in_dims=1)(get([3, 2]))
@@ -2141,7 +2349,7 @@ class TestVmapOperators(Namespace.TestVmapBase):
self.assertEqual(vmap(foo)(tensor), torch.tensor([0, 0, 0]))
def test_is_floating_point(self):
- float_tensor = torch.tensor([1., 2., 3.])
+ float_tensor = torch.tensor([1.0, 2.0, 3.0])
long_tensor = torch.tensor([1, 2, 3])
def foo(x):
@@ -2153,14 +2361,13 @@ class TestVmapOperators(Namespace.TestVmapBase):
self.assertEqual(vmap(foo)(float_tensor), torch.tensor([1, 1, 1]))
self.assertEqual(vmap(foo)(long_tensor), torch.tensor([0, 0, 0]))
- @unittest.skipIf(IS_WINDOWS,
- reason="Windows not yet supported for torch.compile")
+ @unittest.skipIf(IS_WINDOWS, reason="Windows not yet supported for torch.compile")
def test_is_contiguous(self):
def foo(x):
if x.is_contiguous():
- return torch.tensor(1.)
+ return torch.tensor(1.0)
else:
- return torch.tensor(0.)
+ return torch.tensor(0.0)
B0, B1 = 3, 5
@@ -2204,7 +2411,7 @@ class TestVmapOperators(Namespace.TestVmapBase):
x.is_contiguous(memory_format=memory_format)
return x
- msg = 'NYI: querying is_contiguous inside of vmap for memory_format'
+ msg = "NYI: querying is_contiguous inside of vmap for memory_format"
tensor = torch.randn(B0, 2, 7, 3)
with self.assertRaisesRegex(RuntimeError, msg):
vmap(functools.partial(baz, memory_format=torch.channels_last))(tensor)
@@ -2212,6 +2419,7 @@ class TestVmapOperators(Namespace.TestVmapBase):
vmap(functools.partial(baz, memory_format=torch.channels_last_3d))(tensor)
for mf in (torch.channels_last, torch.channels_last_3d):
+
@torch.compile(backend="eager", fullgraph=True)
def f(x):
if x.is_contiguous(memory_format=mf):
@@ -2246,7 +2454,7 @@ class TestVmapOperators(Namespace.TestVmapBase):
return torch.unsqueeze(x, -1)
# bdims in canonical order
- test(vmap(unsqueeze_0), (torch.rand(B0, B1, 2), ))
+ test(vmap(unsqueeze_0), (torch.rand(B0, B1, 2),))
test(vmap(unsqueeze_last), (torch.rand(B0, B1, 2),))
# wild bdims
@@ -2263,17 +2471,30 @@ class TestVmapOperators(Namespace.TestVmapBase):
# movedim(tensor, int, int) variant
test(op, (torch.rand(B0, 2, 5), 0, 1), in_dims=(0, None, None))
test(op, (torch.rand(2, B0, 5), 0, 1), in_dims=(1, None, None))
- test(vmap(op, in_dims=(0, None, None)), (torch.rand(B1, 2, B0, 5), 0, 1), in_dims=(2, None, None))
- test(vmap(vmap(op, in_dims=(2, None, None)), in_dims=(0, None, None)),
- (torch.rand(B1, 2, B0, 5, B2), 0, 1), in_dims=(2, None, None))
+ test(
+ vmap(op, in_dims=(0, None, None)),
+ (torch.rand(B1, 2, B0, 5), 0, 1),
+ in_dims=(2, None, None),
+ )
+ test(
+ vmap(vmap(op, in_dims=(2, None, None)), in_dims=(0, None, None)),
+ (torch.rand(B1, 2, B0, 5, B2), 0, 1),
+ in_dims=(2, None, None),
+ )
# movedim(tensor, intlist, intlist) variant
test(op, (torch.rand(B0, 2, 3, 5), [1, 0], [0, 2]), in_dims=(0, None, None))
test(op, (torch.rand(2, 3, B0, 5), [1, 0], [0, 2]), in_dims=(1, None, None))
- test(vmap(op, in_dims=(0, None, None)),
- (torch.rand(B1, 2, B0, 5), [0, 1], [1, 0]), in_dims=(2, None, None))
- test(vmap(vmap(op, in_dims=(2, None, None)), in_dims=(0, None, None)),
- (torch.rand(B1, 2, B0, 5, B2), [0, 1], [1, 0]), in_dims=(2, None, None))
+ test(
+ vmap(op, in_dims=(0, None, None)),
+ (torch.rand(B1, 2, B0, 5), [0, 1], [1, 0]),
+ in_dims=(2, None, None),
+ )
+ test(
+ vmap(vmap(op, in_dims=(2, None, None)), in_dims=(0, None, None)),
+ (torch.rand(B1, 2, B0, 5, B2), [0, 1], [1, 0]),
+ in_dims=(2, None, None),
+ )
def test_mm(self):
op = torch.mm
@@ -2291,19 +2512,32 @@ class TestVmapOperators(Namespace.TestVmapBase):
# left arg is vmapped
test(op, (torch.rand(B0, 2, 5), torch.rand(5, 2)), in_dims=(0, None))
- test(vmap(op, in_dims=(0, None)), (torch.rand(B1, B0, 2, 5), torch.rand(5, 2)),
- in_dims=(1, None))
+ test(
+ vmap(op, in_dims=(0, None)),
+ (torch.rand(B1, B0, 2, 5), torch.rand(5, 2)),
+ in_dims=(1, None),
+ )
# right arg is vmapped
test(op, (torch.rand(2, 5), torch.rand(B0, 5, 2)), in_dims=(None, 0))
- test(vmap(op, in_dims=(None, 0)), (torch.rand(2, 5), torch.rand(B1, B0, 5, 2)),
- in_dims=(None, 1))
+ test(
+ vmap(op, in_dims=(None, 0)),
+ (torch.rand(2, 5), torch.rand(B1, B0, 5, 2)),
+ in_dims=(None, 1),
+ )
# both args are vmapped
test(op, (torch.rand(B0, 2, 5), torch.rand(B0, 5, 2)))
- test(vmap(op), (torch.rand(B1, B0, 2, 5), torch.rand(B0, B1, 5, 2)), in_dims=(1, 0))
- test(vmap(op, in_dims=(0, None)),
- (torch.rand(B1, 2, 5), torch.rand(B0, 5, 2)), in_dims=(None, 0))
+ test(
+ vmap(op),
+ (torch.rand(B1, B0, 2, 5), torch.rand(B0, B1, 5, 2)),
+ in_dims=(1, 0),
+ )
+ test(
+ vmap(op, in_dims=(0, None)),
+ (torch.rand(B1, 2, 5), torch.rand(B0, 5, 2)),
+ in_dims=(None, 0),
+ )
def test_mv(self):
op = torch.mv
@@ -2321,19 +2555,30 @@ class TestVmapOperators(Namespace.TestVmapBase):
# left arg is vmapped
test(op, (torch.rand(B0, 2, 5), torch.rand(5)), in_dims=(0, None))
- test(vmap(op, in_dims=(0, None)), (torch.rand(B1, B0, 2, 5), torch.rand(5)),
- in_dims=(1, None))
+ test(
+ vmap(op, in_dims=(0, None)),
+ (torch.rand(B1, B0, 2, 5), torch.rand(5)),
+ in_dims=(1, None),
+ )
# right arg is vmapped
test(op, (torch.rand(2, 5), torch.rand(B0, 5)), in_dims=(None, 0))
- test(vmap(op, in_dims=(None, 0)), (torch.rand(2, 5), torch.rand(B1, B0, 5)),
- in_dims=(None, 1))
+ test(
+ vmap(op, in_dims=(None, 0)),
+ (torch.rand(2, 5), torch.rand(B1, B0, 5)),
+ in_dims=(None, 1),
+ )
# both args are vmapped
test(op, (torch.rand(B0, 2, 5), torch.rand(B0, 5)))
- test(vmap(op), (torch.rand(B1, B0, 2, 5), torch.rand(B0, B1, 5)), in_dims=(1, 0))
- test(vmap(op, in_dims=(0, None)),
- (torch.rand(B1, 2, 5), torch.rand(B0, 5)), in_dims=(None, 0))
+ test(
+ vmap(op), (torch.rand(B1, B0, 2, 5), torch.rand(B0, B1, 5)), in_dims=(1, 0)
+ )
+ test(
+ vmap(op, in_dims=(0, None)),
+ (torch.rand(B1, 2, 5), torch.rand(B0, 5)),
+ in_dims=(None, 0),
+ )
def test_narrow(self):
op = torch.narrow
@@ -2342,10 +2587,18 @@ class TestVmapOperators(Namespace.TestVmapBase):
test(op, (torch.rand(B0, 2, 5), -1, 1, 3), in_dims=(0, None, None, None))
test(op, (torch.rand(2, B0, 5), 1, 1, 3), in_dims=(1, None, None, None))
- test(vmap(op, in_dims=(0, None, None, None)),
- (torch.rand(B1, 2, B0, 5), 1, 0, 0), in_dims=(2, None, None, None))
- test(vmap(vmap(op, in_dims=(2, None, None, None)), in_dims=(0, None, None, None)),
- (torch.rand(B1, 2, B0, 5, B2), -1, 2, 3), in_dims=(2, None, None, None))
+ test(
+ vmap(op, in_dims=(0, None, None, None)),
+ (torch.rand(B1, 2, B0, 5), 1, 0, 0),
+ in_dims=(2, None, None, None),
+ )
+ test(
+ vmap(
+ vmap(op, in_dims=(2, None, None, None)), in_dims=(0, None, None, None)
+ ),
+ (torch.rand(B1, 2, B0, 5, B2), -1, 2, 3),
+ in_dims=(2, None, None, None),
+ )
def test_new_empty(self):
# Empty is non-deterministic so we just check that the shape of the
@@ -2383,7 +2636,9 @@ class TestVmapOperators(Namespace.TestVmapBase):
self.assertEqual(result.stride(), [B1 * S, S] + stride)
x = torch.randn(B1, B0)
- result = vmap(vmap(lambda x: x.new_empty_strided(size, stride)), in_dims=1)(x)
+ result = vmap(vmap(lambda x: x.new_empty_strided(size, stride)), in_dims=1)(
+ x
+ )
S = x.new_empty_strided(size, stride).storage().size()
self.assertEqual(result.shape, [B0, B1] + size)
self.assertEqual(result.stride(), [B1 * S, S] + stride)
@@ -2420,7 +2675,11 @@ class TestVmapOperators(Namespace.TestVmapBase):
test(op, (torch.rand(B0, 2, 5), 0, 0), in_dims=(0, None, None))
test(op, (torch.rand(2, B0, 5), 1, 1), in_dims=(1, None, None))
test(vmap(lambda t: op(t, 1, 1)), (torch.rand(B1, 2, B0, 5),), in_dims=2)
- test(vmap(vmap(lambda t: op(t, 1, 1), in_dims=1)), (torch.rand(B1, 2, B0, B2, 5),), in_dims=2)
+ test(
+ vmap(vmap(lambda t: op(t, 1, 1), in_dims=1)),
+ (torch.rand(B1, 2, B0, B2, 5),),
+ in_dims=2,
+ )
def test_roll_no_dims(self):
op = torch.roll
@@ -2429,7 +2688,11 @@ class TestVmapOperators(Namespace.TestVmapBase):
test(op, (torch.rand(B0, 2, 5), 2), in_dims=(0, None))
test(op, (torch.rand(2, B0, 5), 3), in_dims=(1, None))
test(vmap(lambda t: op(t, 3)), (torch.rand(B1, 2, B0, 5),), in_dims=2)
- test(vmap(vmap(lambda t: op(t, 3), in_dims=1)), (torch.rand(B1, 2, B0, B2, 5),), in_dims=2)
+ test(
+ vmap(vmap(lambda t: op(t, 3), in_dims=1)),
+ (torch.rand(B1, 2, B0, B2, 5),),
+ in_dims=2,
+ )
def test_stack(self):
test = self._vmap_test
@@ -2439,25 +2702,37 @@ class TestVmapOperators(Namespace.TestVmapBase):
def get_op(dim):
def op(*tensors):
return torch.stack(tensors, dim=dim)
+
return op
test(get_op(0), (torch.rand(B0, 3), torch.rand(B0, 3)))
test(get_op(0), (torch.rand(3), torch.rand(B0, 3)), in_dims=(None, 0))
test(get_op(0), (torch.rand(2, 17), torch.rand(2, 17, B0)), in_dims=(None, 2))
test(get_op(-1), (torch.rand(2, 17), torch.rand(2, 17, B0)), in_dims=(None, 2))
- test(vmap(get_op(0), in_dims=(0, None)),
- (torch.rand(B1, 2), torch.rand(B0, 2)), in_dims=(None, 0))
- test(vmap(get_op(0), in_dims=(0, 0)),
- (torch.rand(B1, 2), torch.rand(B0, B1, 2)), in_dims=(None, 0))
+ test(
+ vmap(get_op(0), in_dims=(0, None)),
+ (torch.rand(B1, 2), torch.rand(B0, 2)),
+ in_dims=(None, 0),
+ )
+ test(
+ vmap(get_op(0), in_dims=(0, 0)),
+ (torch.rand(B1, 2), torch.rand(B0, B1, 2)),
+ in_dims=(None, 0),
+ )
def test_slice(self):
test = self._vmap_view_test
B0, B1, B2 = 7, 11, 13
test(lambda t: t[0:1], (torch.rand(B0, 3, 5),))
test(lambda t: t[:, 1:3], (torch.rand(3, 5, B0),), in_dims=2)
- test(vmap(lambda t: t[:, 0:1], in_dims=2), (torch.rand(3, 5, B0, B1),), in_dims=2)
- test(vmap(vmap(lambda t: t[0:1], in_dims=2), in_dims=2),
- (torch.rand(3, 5, B0, B1, B2),), in_dims=2)
+ test(
+ vmap(lambda t: t[:, 0:1], in_dims=2), (torch.rand(3, 5, B0, B1),), in_dims=2
+ )
+ test(
+ vmap(vmap(lambda t: t[0:1], in_dims=2), in_dims=2),
+ (torch.rand(3, 5, B0, B1, B2),),
+ in_dims=2,
+ )
@xfailIfTorchDynamo
def test_squeeze(self):
@@ -2507,8 +2782,12 @@ class TestVmapOperators(Namespace.TestVmapBase):
test(vmap(lambda x: op(x, 0)), [torch.randn([B0, B1])])
test(vmap(lambda x: op(x, -1)), [torch.randn([B0, B1])])
test(vmap(lambda x: op(x, -2)), [torch.randn([B1, 2, 5, B0, 3])], in_dims=2)
- test(vmap(lambda x: op(x, 2), in_dims=2), [torch.randn([2, 5, B0, B1, 3])],
- in_dims=2, out_dims=2)
+ test(
+ vmap(lambda x: op(x, 2), in_dims=2),
+ [torch.randn([2, 5, B0, B1, 3])],
+ in_dims=2,
+ out_dims=2,
+ )
def test_sum_dim(self):
self._test_mean_sum_dim(torch.sum)
@@ -2520,6 +2799,7 @@ class TestVmapOperators(Namespace.TestVmapBase):
def test(f, args):
for loop_out, batched_out in get_fallback_and_vmap_exhaustive(f, args, {}):
self.assertEqual(loop_out, batched_out)
+
B0 = 5
test(lambda x: torch.argmax(x), [torch.randn(B0)])
test(lambda x: torch.argmax(x), [torch.randn(B0, 2, 3)])
@@ -2570,31 +2850,64 @@ class TestVmapOperators(Namespace.TestVmapBase):
B0, B1, B2 = 7, 11, 13
op = torch.reshape
test(op, (torch.rand(B0, 2 * 5), [2, 5]), in_dims=(0, None), check_view=True)
- test(op, (torch.rand(2, B0, 5), [1, 1, 10]), in_dims=(1, None), check_view=False)
- test(vmap(lambda t: t.reshape([-1])), (torch.rand(B0, B1, 2, 5),), check_view=True)
- test(vmap(vmap(lambda t: t.reshape([-1]), in_dims=2), in_dims=1),
- (torch.rand(3, B1, 2, B2, 5, B0),), in_dims=5, check_view=False)
+ test(
+ op, (torch.rand(2, B0, 5), [1, 1, 10]), in_dims=(1, None), check_view=False
+ )
+ test(
+ vmap(lambda t: t.reshape([-1])),
+ (torch.rand(B0, B1, 2, 5),),
+ check_view=True,
+ )
+ test(
+ vmap(vmap(lambda t: t.reshape([-1]), in_dims=2), in_dims=1),
+ (torch.rand(3, B1, 2, B2, 5, B0),),
+ in_dims=5,
+ check_view=False,
+ )
def test_reshape_as(self):
test = self._vmap_test
B0, B1, B2 = 7, 11, 13
op = torch.Tensor.reshape_as
test(op, (torch.rand(B0, 2 * 5), torch.rand(B0, 2, 5)), check_view=True)
- test(op, (torch.rand(2 * 5), torch.rand(B0, 2, 5)), in_dims=(None, 0), check_view=True)
- test(op, (torch.rand(B0, 2 * 5), torch.rand(2, 5)), in_dims=(0, None), check_view=True)
+ test(
+ op,
+ (torch.rand(2 * 5), torch.rand(B0, 2, 5)),
+ in_dims=(None, 0),
+ check_view=True,
+ )
+ test(
+ op,
+ (torch.rand(B0, 2 * 5), torch.rand(2, 5)),
+ in_dims=(0, None),
+ check_view=True,
+ )
- test(op, (torch.rand(2, B0, 5), torch.rand(1, 1, 10)), in_dims=(1, None), check_view=False)
+ test(
+ op,
+ (torch.rand(2, B0, 5), torch.rand(1, 1, 10)),
+ in_dims=(1, None),
+ check_view=False,
+ )
- test(vmap(op), (torch.rand(B0, B1, 2, 5), torch.randn(B0, B1, 10)), check_view=True)
- test(vmap(vmap(op, in_dims=(2, None)), in_dims=(1, None)),
- (torch.rand(3, B1, 2, B2, 5, B0), torch.rand(B0, 3 * 2 * 5)),
- in_dims=(5, 0), check_view=False)
+ test(
+ vmap(op),
+ (torch.rand(B0, B1, 2, 5), torch.randn(B0, B1, 10)),
+ check_view=True,
+ )
+ test(
+ vmap(vmap(op, in_dims=(2, None)), in_dims=(1, None)),
+ (torch.rand(3, B1, 2, B2, 5, B0), torch.rand(B0, 3 * 2 * 5)),
+ in_dims=(5, 0),
+ check_view=False,
+ )
def test_result_type(self):
def scalar_tensor_with_dtype(op):
def wrapped(*args, **kwargs):
dtype = op(*args, **kwargs)
return torch.ones([], dtype=dtype)
+
return wrapped
test = self._vmap_test
@@ -2602,36 +2915,66 @@ class TestVmapOperators(Namespace.TestVmapBase):
B0 = 2
- test(op, (torch.randn(B0), torch.randn(B0, dtype=torch.float64)),
- check_propagates_grad=False)
- test(op, (torch.randn(B0), torch.randint(10, [B0], dtype=torch.int64)),
- check_propagates_grad=False)
+ test(
+ op,
+ (torch.randn(B0), torch.randn(B0, dtype=torch.float64)),
+ check_propagates_grad=False,
+ )
+ test(
+ op,
+ (torch.randn(B0), torch.randint(10, [B0], dtype=torch.int64)),
+ check_propagates_grad=False,
+ )
test(lambda x: op(x, 1), (torch.randn(B0),), check_propagates_grad=False)
test(lambda x: op(x, 1.6), (torch.randn(B0),), check_propagates_grad=False)
- test(lambda x: op(x, torch.tensor(1)), (torch.randn(B0),),
- check_propagates_grad=False)
- test(lambda x: op(x, torch.tensor(1.6, dtype=torch.double)),
- (torch.randn(B0),), check_propagates_grad=False)
+ test(
+ lambda x: op(x, torch.tensor(1)),
+ (torch.randn(B0),),
+ check_propagates_grad=False,
+ )
+ test(
+ lambda x: op(x, torch.tensor(1.6, dtype=torch.double)),
+ (torch.randn(B0),),
+ check_propagates_grad=False,
+ )
- test(op, (torch.randn(B0, 2), torch.randn(B0, 2, dtype=torch.float64)),
- check_propagates_grad=False)
- test(op, (torch.randn(B0, 2), torch.randint(10, [B0, 2], dtype=torch.int64)),
- check_propagates_grad=False)
+ test(
+ op,
+ (torch.randn(B0, 2), torch.randn(B0, 2, dtype=torch.float64)),
+ check_propagates_grad=False,
+ )
+ test(
+ op,
+ (torch.randn(B0, 2), torch.randint(10, [B0, 2], dtype=torch.int64)),
+ check_propagates_grad=False,
+ )
test(lambda x: op(x, 1), (torch.randn(B0, 2),), check_propagates_grad=False)
test(lambda x: op(x, 1.6), (torch.randn(B0, 2),), check_propagates_grad=False)
- test(lambda x: op(x, torch.tensor(1)), (torch.randn(B0, 2),),
- check_propagates_grad=False)
- test(lambda x: op(x, torch.tensor(1.6, dtype=torch.double)),
- (torch.randn(B0, 2),), check_propagates_grad=False)
+ test(
+ lambda x: op(x, torch.tensor(1)),
+ (torch.randn(B0, 2),),
+ check_propagates_grad=False,
+ )
+ test(
+ lambda x: op(x, torch.tensor(1.6, dtype=torch.double)),
+ (torch.randn(B0, 2),),
+ check_propagates_grad=False,
+ )
- test(op, (torch.randn(B0, 2), torch.randn(B0, dtype=torch.float64)),
- check_propagates_grad=False)
- test(op, (torch.randn(B0, 2), torch.randint(10, [B0], dtype=torch.int64)),
- check_propagates_grad=False)
+ test(
+ op,
+ (torch.randn(B0, 2), torch.randn(B0, dtype=torch.float64)),
+ check_propagates_grad=False,
+ )
+ test(
+ op,
+ (torch.randn(B0, 2), torch.randint(10, [B0], dtype=torch.int64)),
+ check_propagates_grad=False,
+ )
def test_tensor_split(self):
test = self._vmap_view_test
@@ -2641,18 +2984,38 @@ class TestVmapOperators(Namespace.TestVmapBase):
# tests for torch.tensor_split(self, indices_or_sections: int, dim)
test(op, (torch.rand(B0, 2, 1024), 5, -1), in_dims=(0, None, None))
test(op, (torch.rand(2, B0, 1024), 150, 1), in_dims=(1, None, None))
- test(vmap(op, in_dims=(0, None, None)), (torch.rand(B1, 1023, B0, 5), 256, 0),
- in_dims=(2, None, None))
- test(vmap(vmap(lambda t: op(t, 4, 1), in_dims=2)),
- (torch.rand(B1, 2, B0, 64, B2),), in_dims=2)
+ test(
+ vmap(op, in_dims=(0, None, None)),
+ (torch.rand(B1, 1023, B0, 5), 256, 0),
+ in_dims=(2, None, None),
+ )
+ test(
+ vmap(vmap(lambda t: op(t, 4, 1), in_dims=2)),
+ (torch.rand(B1, 2, B0, 64, B2),),
+ in_dims=2,
+ )
# tests for torch.tensor_split(self, indices_or_sections: List[int], dim)
- test(op, (torch.rand(B0, 2, 1024), [50, 100, 378, 890], -1), in_dims=(0, None, None))
- test(op, (torch.rand(2, B0, 1024), [50, 100, 212, 345, 0, 378, 890], 1), in_dims=(1, None, None))
- test(vmap(op, in_dims=(0, None, None)), (torch.rand(B1, 1023, B0, 5), [50, 100, 212, 345, 0, 378, 890], 0),
- in_dims=(2, None, None))
- test(vmap(vmap(lambda t: op(t, [4, 8, 9, 34, 29], 1), in_dims=2)),
- (torch.rand(B1, 2, B0, 64, B2),), in_dims=2)
+ test(
+ op,
+ (torch.rand(B0, 2, 1024), [50, 100, 378, 890], -1),
+ in_dims=(0, None, None),
+ )
+ test(
+ op,
+ (torch.rand(2, B0, 1024), [50, 100, 212, 345, 0, 378, 890], 1),
+ in_dims=(1, None, None),
+ )
+ test(
+ vmap(op, in_dims=(0, None, None)),
+ (torch.rand(B1, 1023, B0, 5), [50, 100, 212, 345, 0, 378, 890], 0),
+ in_dims=(2, None, None),
+ )
+ test(
+ vmap(vmap(lambda t: op(t, [4, 8, 9, 34, 29], 1), in_dims=2)),
+ (torch.rand(B1, 2, B0, 64, B2),),
+ in_dims=2,
+ )
@skipIfTorchDynamo("really slow")
def test_split(self):
@@ -2663,18 +3026,32 @@ class TestVmapOperators(Namespace.TestVmapBase):
# tests for torch.split(self, split_size: int, dim)
test(op, (torch.rand(B0, 2, 1024), 101, -1), in_dims=(0, None, None))
test(op, (torch.rand(2, B0, 1024), 130, 1), in_dims=(1, None, None))
- test(vmap(op, in_dims=(0, None, None)), (torch.rand(B1, 1023, B0, 5), 256, 0),
- in_dims=(2, None, None))
- test(vmap(vmap(lambda t: op(t, 4, 1), in_dims=2)),
- (torch.rand(B1, 2, B0, 64, B2),), in_dims=2)
+ test(
+ vmap(op, in_dims=(0, None, None)),
+ (torch.rand(B1, 1023, B0, 5), 256, 0),
+ in_dims=(2, None, None),
+ )
+ test(
+ vmap(vmap(lambda t: op(t, 4, 1), in_dims=2)),
+ (torch.rand(B1, 2, B0, 64, B2),),
+ in_dims=2,
+ )
# tests for torch.split(self, split_size: List[int], dim)
test(op, (torch.rand(B0, 2, 1024), [1, 1020, 3], -1), in_dims=(0, None, None))
- test(op, (torch.rand(2, B0, 1024), [100] * 10 + [24], 1), in_dims=(1, None, None))
- test(vmap(op, in_dims=(0, None, None)), (torch.rand(B1, 1023, B0, 5), [256] * 3 + [255], 0),
- in_dims=(2, None, None))
- test(vmap(vmap(lambda t: op(t, [4] * 8 + [8] * 4, 1), in_dims=2)),
- (torch.rand(B1, 2, B0, 64, B2),), in_dims=2)
+ test(
+ op, (torch.rand(2, B0, 1024), [100] * 10 + [24], 1), in_dims=(1, None, None)
+ )
+ test(
+ vmap(op, in_dims=(0, None, None)),
+ (torch.rand(B1, 1023, B0, 5), [256] * 3 + [255], 0),
+ in_dims=(2, None, None),
+ )
+ test(
+ vmap(vmap(lambda t: op(t, [4] * 8 + [8] * 4, 1), in_dims=2)),
+ (torch.rand(B1, 2, B0, 64, B2),),
+ in_dims=2,
+ )
def test_trace(self):
op = torch.trace
@@ -2695,8 +3072,11 @@ class TestVmapOperators(Namespace.TestVmapBase):
test(lambda x: op(x, 3, 1), (torch.rand(B0, 2, 5, 4, 6),))
test(lambda x: op(x, 1, 0), (torch.rand(2, B0, 5),), in_dims=1)
test(vmap(lambda x: op(x, 0, 1)), (torch.rand(B1, 2, B0, 5),), in_dims=2)
- test(vmap(vmap(lambda x: op(x, 0, 1), in_dims=2)),
- (torch.rand(B1, 2, B0, 5, B2),), in_dims=2)
+ test(
+ vmap(vmap(lambda x: op(x, 0, 1), in_dims=2)),
+ (torch.rand(B1, 2, B0, 5, B2),),
+ in_dims=2,
+ )
# Special case: scalar tensor
for dim1, dim2 in itertools.product([0, -1], [0, -1]):
@@ -2729,12 +3109,16 @@ class TestVmapOperators(Namespace.TestVmapBase):
test = self._vmap_test
B0, B1 = 7, 11
- test(lambda t: t.to('cpu'), (torch.rand(B0),))
+ test(lambda t: t.to("cpu"), (torch.rand(B0),))
test(lambda t: t.to(torch.double), (torch.rand(B0),))
- test(lambda t, o: t.to(o), (torch.rand(B0), torch.randn(B0, dtype=torch.float64)))
- test(lambda t, o: t.to(o),
- (torch.rand(B0), torch.randn(B0, dtype=torch.float64)),
- in_dims=(0, None))
+ test(
+ lambda t, o: t.to(o), (torch.rand(B0), torch.randn(B0, dtype=torch.float64))
+ )
+ test(
+ lambda t, o: t.to(o),
+ (torch.rand(B0), torch.randn(B0, dtype=torch.float64)),
+ in_dims=(0, None),
+ )
test(vmap(lambda t: t.to(torch.double)), (torch.rand(B0, B1, 3),))
# also test some casting methods
@@ -2750,10 +3134,18 @@ class TestVmapOperators(Namespace.TestVmapBase):
test(op, (torch.rand(B0, 7, 11), 0, 2, 1), in_dims=(0, None, None, None))
test(op, (torch.rand(7, B0, 11), 1, 4, 2), in_dims=(1, None, None, None))
- test(vmap(op, in_dims=(0, None, None, None)),
- (torch.rand(B1, 7, B0, 11), 1, 5, 1), in_dims=(2, None, None, None))
- test(vmap(vmap(op, in_dims=(2, None, None, None)), in_dims=(0, None, None, None)),
- (torch.rand(B1, 7, B0, 11, B2), -1, 2, 4), in_dims=(2, None, None, None))
+ test(
+ vmap(op, in_dims=(0, None, None, None)),
+ (torch.rand(B1, 7, B0, 11), 1, 5, 1),
+ in_dims=(2, None, None, None),
+ )
+ test(
+ vmap(
+ vmap(op, in_dims=(2, None, None, None)), in_dims=(0, None, None, None)
+ ),
+ (torch.rand(B1, 7, B0, 11, B2), -1, 2, 4),
+ in_dims=(2, None, None, None),
+ )
def test_unbind(self):
test = self._vmap_view_test
@@ -2763,10 +3155,16 @@ class TestVmapOperators(Namespace.TestVmapBase):
test(op, (torch.rand(B0, 2, 1024), -1), in_dims=(0, None))
test(op, (torch.rand(B0, 2, 0),))
test(op, (torch.rand(2, B0, 7), 0), in_dims=(1, None))
- test(vmap(op, in_dims=(0, None)), (torch.rand(B1, 1023, B0, 5), 1),
- in_dims=(2, None))
- test(vmap(vmap(lambda t: op(t, dim=1), in_dims=2)),
- (torch.rand(B1, 2, B0, 32, B2),), in_dims=2)
+ test(
+ vmap(op, in_dims=(0, None)),
+ (torch.rand(B1, 1023, B0, 5), 1),
+ in_dims=(2, None),
+ )
+ test(
+ vmap(vmap(lambda t: op(t, dim=1), in_dims=2)),
+ (torch.rand(B1, 2, B0, 32, B2),),
+ in_dims=2,
+ )
def test_view(self):
test = self._vmap_view_test
@@ -2780,8 +3178,11 @@ class TestVmapOperators(Namespace.TestVmapBase):
test(op, (torch.rand(B0, 2 * 5), [2, 5]), in_dims=(0, None))
test(op, (torch.rand(B0, 4, 5), [1, 2, 1, 10]), in_dims=(0, None))
test(vmap(lambda t: t.view([-1])), (torch.rand(B0, B1, 2, 5, 3),))
- test(vmap(vmap(lambda t: t.reshape([-1])), in_dims=1),
- (torch.rand(B2, B0, B1, 3, 2, 5),), in_dims=1)
+ test(
+ vmap(vmap(lambda t: t.reshape([-1])), in_dims=1),
+ (torch.rand(B2, B0, B1, 3, 2, 5),),
+ in_dims=1,
+ )
def test_view_as(self):
test = self._vmap_view_test
@@ -2799,9 +3200,11 @@ class TestVmapOperators(Namespace.TestVmapBase):
test(op, (torch.rand(B0, 4, 5), torch.rand(2, 1, 1, 10)), in_dims=(0, None))
test(vmap(op), (torch.rand(B0, B1, 2, 5), torch.randn(B0, B1, 10)))
- test(vmap(vmap(op, in_dims=(0, None)), in_dims=(0, None)),
- (torch.rand(B1, B2, B0, 3, 2, 5), torch.rand(B0, 3 * 2 * 5)),
- in_dims=(2, 0))
+ test(
+ vmap(vmap(op, in_dims=(0, None)), in_dims=(0, None)),
+ (torch.rand(B1, B2, B0, 3, 2, 5), torch.rand(B0, 3 * 2 * 5)),
+ in_dims=(2, 0),
+ )
def test_conv2d(self):
conv_setups = [
@@ -2814,21 +3217,31 @@ class TestVmapOperators(Namespace.TestVmapBase):
mod = conv_mod(4, 8, kernel_size=3)
arg_values = [torch.randn(inp_shape), mod.weight, mod.bias]
kwarg_values = {}
- for loop_out, batched_out in get_fallback_and_vmap_exhaustive(conv_fn, arg_values, kwarg_values):
+ for loop_out, batched_out in get_fallback_and_vmap_exhaustive(
+ conv_fn, arg_values, kwarg_values
+ ):
self.assertEqual(loop_out, batched_out)
arg_values = [torch.randn(inp_shape), mod.weight, None]
- for loop_out, batched_out in get_fallback_and_vmap_exhaustive(conv_fn, arg_values, kwarg_values):
+ for loop_out, batched_out in get_fallback_and_vmap_exhaustive(
+ conv_fn, arg_values, kwarg_values
+ ):
self.assertEqual(loop_out, batched_out)
- mod2 = conv_mod(4, 8, kernel_size=3, groups=2, stride=3, padding=1, dilation=2)
+ mod2 = conv_mod(
+ 4, 8, kernel_size=3, groups=2, stride=3, padding=1, dilation=2
+ )
arg_values = [torch.randn(inp_shape), mod2.weight, mod2.bias]
kwarg_values = dict(groups=2, stride=3, padding=1, dilation=2)
- for loop_out, batched_out in get_fallback_and_vmap_exhaustive(conv_fn, arg_values, kwarg_values):
+ for loop_out, batched_out in get_fallback_and_vmap_exhaustive(
+ conv_fn, arg_values, kwarg_values
+ ):
self.assertEqual(loop_out, batched_out)
arg_values = [torch.randn(inp_shape), mod2.weight, None]
- for loop_out, batched_out in get_fallback_and_vmap_exhaustive(conv_fn, arg_values, kwarg_values):
+ for loop_out, batched_out in get_fallback_and_vmap_exhaustive(
+ conv_fn, arg_values, kwarg_values
+ ):
self.assertEqual(loop_out, batched_out)
def test_one_hot(self):
@@ -2837,7 +3250,9 @@ class TestVmapOperators(Namespace.TestVmapBase):
(torch.randint(0, 3, [2, 3, 4]), 4),
]
for args in sample_inputs:
- for loop_out, batched_out in get_fallback_and_vmap_exhaustive(F.one_hot, args, {}):
+ for loop_out, batched_out in get_fallback_and_vmap_exhaustive(
+ F.one_hot, args, {}
+ ):
self.assertEqual(loop_out, batched_out)
def test_conj_bit(self):
@@ -2848,6 +3263,7 @@ class TestVmapOperators(Namespace.TestVmapBase):
y = x.conj()
assert y.is_conj()
return y
+
res = vmap(foo)(x)
self.assertEqual(res, x.conj())
@@ -2859,14 +3275,18 @@ class TestVmapOperators(Namespace.TestVmapBase):
return x + torch.randn(shape)
torch.manual_seed(0)
- out1 = vmap(vmap(vmap_f, randomness='different'), randomness='different')(torch.ones(2, 3))
+ out1 = vmap(vmap(vmap_f, randomness="different"), randomness="different")(
+ torch.ones(2, 3)
+ )
torch.manual_seed(0)
out2 = naive_f(torch.ones(2, 3), (2, 3))
self.assertEqual(out1, out2)
torch.manual_seed(0)
- out1 = vmap(vmap(vmap_f, randomness='different'), randomness='different')(torch.ones(2, 3, 4))
+ out1 = vmap(vmap(vmap_f, randomness="different"), randomness="different")(
+ torch.ones(2, 3, 4)
+ )
torch.manual_seed(0)
out2 = naive_f(torch.ones(2, 3, 4), (2, 3, 1))
@@ -2874,11 +3294,10 @@ class TestVmapOperators(Namespace.TestVmapBase):
self.assertTrue(torch.randn(()).dim() == 0)
- @parametrize('in_dim', [0, 1, 2])
- @parametrize('out_dim', [0, 1, 2])
- @parametrize('randomness', ['error', 'same'])
+ @parametrize("in_dim", [0, 1, 2])
+ @parametrize("out_dim", [0, 1, 2])
+ @parametrize("randomness", ["error", "same"])
def test_chunk_vmap(self, in_dim, out_dim, randomness):
-
x = torch.randn(4, 5, 6)
def f(x):
@@ -2893,15 +3312,18 @@ class TestVmapOperators(Namespace.TestVmapBase):
for chunks in [1, 2, 3, 4, 7, 10, 16]:
torch.set_rng_state(rs)
output = chunk_vmap(
- f, in_dims=in_dim, out_dims=out_dim, randomness=randomness, chunks=chunks
+ f,
+ in_dims=in_dim,
+ out_dims=out_dim,
+ randomness=randomness,
+ chunks=chunks,
)(x)
self.assertEqual(output, expected)
- @parametrize('in_dim', [0, 1, 2])
- @parametrize('out_dim', [0, 1, 2])
- @parametrize('randomness', ['error', 'same'])
+ @parametrize("in_dim", [0, 1, 2])
+ @parametrize("out_dim", [0, 1, 2])
+ @parametrize("randomness", ["error", "same"])
def test_vmap_chunksize(self, in_dim, out_dim, randomness):
-
x = torch.randn(4, 5, 6)
y = torch.randn_like(x)
@@ -2911,8 +3333,9 @@ class TestVmapOperators(Namespace.TestVmapBase):
if randomness != "error":
y = y + torch.rand_like(x)
return y
+
f_args = (x,)
- f_kwargs = {'in_dims': in_dim, 'out_dims': out_dim, 'randomness': randomness}
+ f_kwargs = {"in_dims": in_dim, "out_dims": out_dim, "randomness": randomness}
# fn: Nested Input/Single Output
def f1(pair):
@@ -2921,55 +3344,79 @@ class TestVmapOperators(Namespace.TestVmapBase):
if randomness != "error":
z = z + torch.rand_like(z)
return z
+
f1_args = ((x, y),)
- f1_kwargs = {'in_dims': ((in_dim,) * 2,), 'out_dims': out_dim, 'randomness': randomness}
+ f1_kwargs = {
+ "in_dims": ((in_dim,) * 2,),
+ "out_dims": out_dim,
+ "randomness": randomness,
+ }
# fn: Single Input/Nested Output
def f2(x):
y = x.sin()
if randomness != "error":
y = y + torch.rand_like(x)
- return {'out': y, 'out1': y + 2}
+ return {"out": y, "out1": y + 2}
+
f2_args = (x,)
- f2_kwargs = {'in_dims': in_dim, 'out_dims': out_dim, 'randomness': randomness}
+ f2_kwargs = {"in_dims": in_dim, "out_dims": out_dim, "randomness": randomness}
# fn: Nested Input/Nested Output (first tensor is not vmapped).
def f3(inp_dict):
- x = inp_dict['inp']
- y = inp_dict['inp1']
+ x = inp_dict["inp"]
+ y = inp_dict["inp1"]
z = x.sin() + y.cos()
if randomness != "error":
z = z + torch.rand_like(z)
- return {'z': z, 'tuple': (z, z + 1)}
- f3_args = ({'inp': x.index_select(in_dim, torch.tensor([0])).squeeze(in_dim), 'inp1': y},)
- f3_kwargs = {'in_dims': ({'inp': None, 'inp1': in_dim},), 'out_dims': out_dim, 'randomness': randomness}
+ return {"z": z, "tuple": (z, z + 1)}
+
+ f3_args = (
+ {
+ "inp": x.index_select(in_dim, torch.tensor([0])).squeeze(in_dim),
+ "inp1": y,
+ },
+ )
+ f3_kwargs = {
+ "in_dims": ({"inp": None, "inp1": in_dim},),
+ "out_dims": out_dim,
+ "randomness": randomness,
+ }
# fn: Nested Input/Nested Output (first argument is not a Tensor).
def f4(inp_dict):
- x = inp_dict['inp']
- y = inp_dict['inp1']
+ x = inp_dict["inp"]
+ y = inp_dict["inp1"]
z = x + y.cos()
if randomness != "error":
z = z + torch.rand_like(z)
- return {'z': z, 'tuple': (z, z + 1)}
- f4_args = ({'inp': 2., 'inp1': y},)
- f4_kwargs = {'in_dims': ({'inp': None, 'inp1': in_dim},), 'out_dims': out_dim, 'randomness': randomness}
-
- fns_and_args = ((f, f_args, f_kwargs), (f1, f1_args, f1_kwargs), (f2, f2_args, f2_kwargs),
- (f3, f3_args, f3_kwargs), (f4, f4_args, f4_kwargs))
+ return {"z": z, "tuple": (z, z + 1)}
+
+ f4_args = ({"inp": 2.0, "inp1": y},)
+ f4_kwargs = {
+ "in_dims": ({"inp": None, "inp1": in_dim},),
+ "out_dims": out_dim,
+ "randomness": randomness,
+ }
+
+ fns_and_args = (
+ (f, f_args, f_kwargs),
+ (f1, f1_args, f1_kwargs),
+ (f2, f2_args, f2_kwargs),
+ (f3, f3_args, f3_kwargs),
+ (f4, f4_args, f4_kwargs),
+ )
for fn, args, kwargs in fns_and_args:
rs = torch.get_rng_state()
expected_vmap = vmap(fn, **kwargs)(*args)
for chunk_size in (1, 2, 3, 4, 7, 10, 16, 100):
torch.set_rng_state(rs)
- output = vmap(
- fn, chunk_size=chunk_size, **kwargs
- )(*args)
+ output = vmap(fn, chunk_size=chunk_size, **kwargs)(*args)
self.assertEqual(output, expected_vmap)
- @parametrize('in_dim', [0, 1])
- @parametrize('out_dim', [0, 1])
- @parametrize('randomness', ['error', 'same'])
+ @parametrize("in_dim", [0, 1])
+ @parametrize("out_dim", [0, 1])
+ @parametrize("randomness", ["error", "same"])
def test_vmap_chunksize_error(self, in_dim, out_dim, randomness):
x = torch.randn(4, 5, 6)
@@ -2981,21 +3428,31 @@ class TestVmapOperators(Namespace.TestVmapBase):
# Incorrect `chunk_size`
for chunk_size in (-1, 0):
- with self.assertRaisesRegex(ValueError, "vmap: chunk_size should be None or greater than 0."):
+ with self.assertRaisesRegex(
+ ValueError, "vmap: chunk_size should be None or greater than 0."
+ ):
vmap(
- f, in_dims=in_dim, out_dims=out_dim, randomness=randomness, chunk_size=chunk_size
+ f,
+ in_dims=in_dim,
+ out_dims=out_dim,
+ randomness=randomness,
+ chunk_size=chunk_size,
)(x)
# Incorrect `out_dims`
msg = "out_dims is not compatible with the structure of `outputs`"
with self.assertRaisesRegex(ValueError, msg):
vmap(
- f, in_dims=in_dim, out_dims=(out_dim, out_dim), randomness=randomness, chunk_size=2
+ f,
+ in_dims=in_dim,
+ out_dims=(out_dim, out_dim),
+ randomness=randomness,
+ chunk_size=2,
)(x)
- @parametrize('in_dim', [0, 1])
- @parametrize('out_dim', [0, 1])
- @parametrize('randomness', ['error', 'same'])
+ @parametrize("in_dim", [0, 1])
+ @parametrize("out_dim", [0, 1])
+ @parametrize("randomness", ["error", "same"])
def test_vmap_chunksize_composition(self, in_dim, out_dim, randomness):
x = torch.randn(4, 5, 6)
y = torch.randn_like(x)
@@ -3006,6 +3463,7 @@ class TestVmapOperators(Namespace.TestVmapBase):
if randomness != "error":
y = y + torch.rand_like(x)
return y
+
f_args = (x,)
# fn: Nested Input/Single Output
@@ -3015,6 +3473,7 @@ class TestVmapOperators(Namespace.TestVmapBase):
if randomness != "error":
z = z + torch.rand_like(z)
return z
+
f1_args = ((x, y),)
# fn: Single Input/Nested Output
@@ -3022,59 +3481,82 @@ class TestVmapOperators(Namespace.TestVmapBase):
y = x.sin()
if randomness != "error":
y = y + torch.rand_like(x)
- return {'out': y, 'out1': y + 2}
+ return {"out": y, "out1": y + 2}
+
f2_args = (x,)
# fn: Nested Input/Nested Output
def f3(inp_dict):
- x = inp_dict['inp']
- y = inp_dict['inp1']
+ x = inp_dict["inp"]
+ y = inp_dict["inp1"]
z = x.sin() + y.cos()
if randomness != "error":
z = z + torch.rand_like(z)
- return {'z': z, 'tuple': (z, z + 1)}
- f3_args = ({'inp': x, 'inp1': y},)
+ return {"z": z, "tuple": (z, z + 1)}
+
+ f3_args = ({"inp": x, "inp1": y},)
for fn, args in ((f, f_args), (f1, f1_args), (f2, f2_args), (f3, f3_args)):
rs = torch.get_rng_state()
- expected = vmap(vmap(fn, in_dims=in_dim, out_dims=out_dim, randomness=randomness),
- in_dims=in_dim, out_dims=out_dim, randomness=randomness)(*args)
+ expected = vmap(
+ vmap(fn, in_dims=in_dim, out_dims=out_dim, randomness=randomness),
+ in_dims=in_dim,
+ out_dims=out_dim,
+ randomness=randomness,
+ )(*args)
for chunk_size in (1, 2, 3, 4, 7, 10, 16, 100):
torch.set_rng_state(rs)
- actual = vmap(vmap(
- fn, in_dims=in_dim, out_dims=out_dim, randomness=randomness, chunk_size=chunk_size
- ), in_dims=in_dim, out_dims=out_dim, randomness=randomness, chunk_size=chunk_size)(*args)
+ actual = vmap(
+ vmap(
+ fn,
+ in_dims=in_dim,
+ out_dims=out_dim,
+ randomness=randomness,
+ chunk_size=chunk_size,
+ ),
+ in_dims=in_dim,
+ out_dims=out_dim,
+ randomness=randomness,
+ chunk_size=chunk_size,
+ )(*args)
self.assertEqual(actual, expected)
+
instantiate_parametrized_tests(TestVmapOperators)
def construct_v(output, batch_size, contig=False):
if contig:
- return torch.randn(batch_size, *output.shape,
- dtype=output.dtype, device=output.device)
- result = torch.randn(*output.shape, batch_size,
- dtype=output.dtype, device=output.device)
+ return torch.randn(
+ batch_size, *output.shape, dtype=output.dtype, device=output.device
+ )
+ result = torch.randn(
+ *output.shape, batch_size, dtype=output.dtype, device=output.device
+ )
return result.movedim(-1, 0)
+
def as_tuple(x):
if isinstance(x, tuple):
return x
elif isinstance(x, list):
return tuple(x)
else:
- return x,
+ return (x,)
def differentiable(args):
- return tuple(arg for arg in as_tuple(args)
- if isinstance(arg, torch.Tensor) and arg.requires_grad)
+ return tuple(
+ arg
+ for arg in as_tuple(args)
+ if isinstance(arg, torch.Tensor) and arg.requires_grad
+ )
def _get_rand_no_zeros(*args, **kwargs):
- requires_grad = kwargs.get('requires_grad', False)
+ requires_grad = kwargs.get("requires_grad", False)
kwargs_without_requires_grad = kwargs.copy()
- kwargs_without_requires_grad['requires_grad'] = False
+ kwargs_without_requires_grad["requires_grad"] = False
result = torch.rand(*args, **kwargs_without_requires_grad)
return result.clamp_min_(0.1).requires_grad_(requires_grad)
@@ -3090,20 +3572,26 @@ class TestVmapBatchedGradient(Namespace.TestVmapBase):
# output_process_fn: a function that maps the outputs to the part
# that should be differentiated.
# batch_size: the batch dim size for the batched grad
- def _batched_grad_test(self, op, args, kwargs=None, output_process_fn=lambda x: x, batch_size=3):
+ def _batched_grad_test(
+ self, op, args, kwargs=None, output_process_fn=lambda x: x, batch_size=3
+ ):
if kwargs is None:
kwargs = {}
outputs = op(*args, **kwargs)
outputs = differentiable(output_process_fn(outputs))
for contig in [True, False]:
- batched_vectors = tuple(construct_v(out, batch_size, contig)
- for out in outputs)
+ batched_vectors = tuple(
+ construct_v(out, batch_size, contig) for out in outputs
+ )
def vector_jacobian_product(*vectors):
- return torch.autograd.grad(outputs, differentiable(args), vectors,
- retain_graph=True)
- self._vmap_test(vector_jacobian_product, batched_vectors,
- check_propagates_grad=False)
+ return torch.autograd.grad(
+ outputs, differentiable(args), vectors, retain_graph=True
+ )
+
+ self._vmap_test(
+ vector_jacobian_product, batched_vectors, check_propagates_grad=False
+ )
# Tests batched second grad computation of outputs = op(*args, **kwargs).
# by comparing it to a sequential map+stack fallback.
@@ -3118,32 +3606,43 @@ class TestVmapBatchedGradient(Namespace.TestVmapBase):
# Regression.
# It might be useful to have a test that computes batched first gradients and
# then uses those to compute batched second gradients in the future.
- def _batched_grad_grad_test(self, op, args, kwargs=None, output_process_fn=lambda x: x, batch_size=3):
+ def _batched_grad_grad_test(
+ self, op, args, kwargs=None, output_process_fn=lambda x: x, batch_size=3
+ ):
if kwargs is None:
kwargs = {}
outputs = op(*args, **kwargs)
outputs = differentiable(output_process_fn(outputs))
ones = tuple(torch.ones_like(out) for out in outputs)
# Same thing as summing together all of the outputs and calling .backward()
- first_grads = torch.autograd.grad(outputs, differentiable(args), ones,
- create_graph=True)
+ first_grads = torch.autograd.grad(
+ outputs, differentiable(args), ones, create_graph=True
+ )
first_grads = differentiable(first_grads)
self.assertNotEqual(
- len(first_grads), 0, "None of the first grads depend on the input!")
+ len(first_grads), 0, "None of the first grads depend on the input!"
+ )
for contig in [True, False]:
- batched_vectors = tuple(construct_v(grad, batch_size, contig)
- for grad in first_grads)
+ batched_vectors = tuple(
+ construct_v(grad, batch_size, contig) for grad in first_grads
+ )
def vector_hessian_product(*vectors):
- outputs = torch.autograd.grad(first_grads, differentiable(args), vectors,
- retain_graph=True, allow_unused=True)
+ outputs = torch.autograd.grad(
+ first_grads,
+ differentiable(args),
+ vectors,
+ retain_graph=True,
+ allow_unused=True,
+ )
outputs = tuple(out for out in outputs if out is not None)
assert len(outputs) > 0
return outputs
- self._vmap_test(vector_hessian_product, batched_vectors,
- check_propagates_grad=False)
+ self._vmap_test(
+ vector_hessian_product, batched_vectors, check_propagates_grad=False
+ )
def _test_arithmetic(self, op, device, test_grad_grad=True):
x = torch.randn(2, 3, requires_grad=True, device=device)
@@ -3193,6 +3692,7 @@ class TestVmapBatchedGradient(Namespace.TestVmapBase):
def op(x):
return x.expand(5, 5, 2, 3)
+
self._batched_grad_test(op, (x,))
@allowVmapFallbackUsage
@@ -3273,6 +3773,7 @@ class TestVmapBatchedGradient(Namespace.TestVmapBase):
def op(x, y):
return torch.stack([x, y])
+
self._batched_grad_test(op, (x, y))
def test_select(self, device):
@@ -3314,7 +3815,9 @@ class TestVmapBatchedGradient(Namespace.TestVmapBase):
def f(t):
return torch.where(t)
- with self.assertRaisesRegex(RuntimeError, r"Attempted to vmap over aten::where"):
+ with self.assertRaisesRegex(
+ RuntimeError, r"Attempted to vmap over aten::where"
+ ):
vmap(f)(x)
def test_threshold(self, device):
@@ -3367,7 +3870,7 @@ class TestVmapBatchedGradient(Namespace.TestVmapBase):
gy = torch.randn(B0, requires_grad=True)
def vjp(v):
- res, = torch.autograd.grad(y, x, v, allow_unused=True)
+ (res,) = torch.autograd.grad(y, x, v, allow_unused=True)
return torch.zeros_like(x) if res is None else res
result = vmap(vjp)(gy)
@@ -3381,7 +3884,7 @@ class TestVmapBatchedGradient(Namespace.TestVmapBase):
gy = torch.randn(B0, requires_grad=True)
def vjp(v):
- res, = torch.autograd.grad(y, x, v, allow_unused=True)
+ (res,) = torch.autograd.grad(y, x, v, allow_unused=True)
return torch.zeros_like(x) if res is None else res
_ = vjp(gy[0])
@@ -3408,10 +3911,12 @@ def discover_variants(opinfo):
# @markDynamoStrictTest
@unMarkDynamoStrictTest
class TestVmapOperatorsOpInfo(TestCase):
-
- def vmap_outplace_test(self, func, args, kwargs, in_dims, check_shape_only=False,
- postprocess_fn=None):
- for vmap_out, loop_out in compute_quantities_for_vmap_test(func, args, kwargs, in_dims):
+ def vmap_outplace_test(
+ self, func, args, kwargs, in_dims, check_shape_only=False, postprocess_fn=None
+ ):
+ for vmap_out, loop_out in compute_quantities_for_vmap_test(
+ func, args, kwargs, in_dims
+ ):
if postprocess_fn is not None:
loop_out = postprocess_fn(loop_out)
vmap_out = postprocess_fn(vmap_out)
@@ -3429,18 +3934,32 @@ class TestVmapOperatorsOpInfo(TestCase):
# on the in-place operation
with self.assertRaises(RuntimeError):
for _ in compute_quantities_for_vmap_test(
- func, args, kwargs, in_dims, compute_loop_out=False, clone_inputs=True):
+ func,
+ args,
+ kwargs,
+ in_dims,
+ compute_loop_out=False,
+ clone_inputs=True,
+ ):
pass
return
for vmap_out, loop_out in compute_quantities_for_vmap_test(
- func, args, kwargs, in_dims, clone_inputs=True):
+ func, args, kwargs, in_dims, clone_inputs=True
+ ):
if postprocess_fn is not None:
loop_out = postprocess_fn(loop_out)
vmap_out = postprocess_fn(vmap_out)
self.assertEqual(vmap_out, loop_out)
- def opinfo_vmap_test(self, device, dtype, op, check_has_batch_rule,
- skip_inplace=(), postprocess_fn=None):
+ def opinfo_vmap_test(
+ self,
+ device,
+ dtype,
+ op,
+ check_has_batch_rule,
+ skip_inplace=(),
+ postprocess_fn=None,
+ ):
def test():
# Error inputs check
if op.error_inputs_func is not None:
@@ -3471,9 +3990,11 @@ class TestVmapOperatorsOpInfo(TestCase):
if op.name in sample_inputs_op:
sample_inputs_itr = op.sample_inputs(device, dtype, requires_grad=False)
else:
- sample_inputs_itr = op.reference_inputs(device, dtype, requires_grad=False)
+ sample_inputs_itr = op.reference_inputs(
+ device, dtype, requires_grad=False
+ )
aliases, inplace_aliases = discover_variants(op)
- check_shape_only = op.name in ('empty_like', 'new_empty')
+ check_shape_only = op.name in ("empty_like", "new_empty")
for sample_input in sample_inputs_itr:
args = (sample_input.input,) + sample_input.args
if not any(isinstance(arg, torch.Tensor) for arg in args):
@@ -3482,15 +4003,27 @@ class TestVmapOperatorsOpInfo(TestCase):
kwargs = sample_input.kwargs
is_batch_norm_and_training = is_batch_norm_training(op.name, kwargs)
for batched_args, in_dims, _ in generate_vmap_inputs(
- args, {}, is_batch_norm_and_training=is_batch_norm_and_training):
+ args, {}, is_batch_norm_and_training=is_batch_norm_and_training
+ ):
for func in aliases:
- self.vmap_outplace_test(func, batched_args, kwargs, in_dims, check_shape_only, postprocess_fn)
+ self.vmap_outplace_test(
+ func,
+ batched_args,
+ kwargs,
+ in_dims,
+ check_shape_only,
+ postprocess_fn,
+ )
if op.name in skip_inplace:
continue
- if not is_valid_inplace_sample_input(sample_input, op, op.inplace_variant):
+ if not is_valid_inplace_sample_input(
+ sample_input, op, op.inplace_variant
+ ):
continue
for func in inplace_aliases:
- self.vmap_inplace_test(func, batched_args, kwargs, in_dims, postprocess_fn)
+ self.vmap_inplace_test(
+ func, batched_args, kwargs, in_dims, postprocess_fn
+ )
if check_has_batch_rule:
check_vmap_fallback(self, test, op)
@@ -3500,330 +4033,399 @@ class TestVmapOperatorsOpInfo(TestCase):
vmap_fail = {
# -------------------- ALLOWED FAILURES --------------------------------
# These are things that we either cannot fix or are not actually problems
- xfail('resize_'),
- xfail('resize_as_'),
- xfail('to_sparse'),
- xfail('__getitem__'), # dynamic mask
- xfail('index_put'), # dynamic mask
- xfail('nn.functional.dropout'), # works, can't check against for loop because of randomness inconsistency
- xfail('nn.functional.scaled_dot_product_attention'), # randomness
- xfail('nn.functional.multi_head_attention_forward'), # randomness
- xfail('masked_select'), # dynamic op
- xfail('nonzero'), # dynamic op
- xfail('unique', ''), # dynamic op
- xfail('unique_consecutive', ''), # dynamic op
- xfail('allclose'), # returns a boolean
- xfail('uniform'), # randomness is tested separately
- xfail('rand_like'), # randomness is tested separately
- xfail('randint_like'), # randomness is tested separately
- xfail('randn_like'), # randomness is tested separately
- xfail('bernoulli', ''), # randomness is tested separately
- xfail('normal', ''), # randomness is tested separately
- xfail('normal', 'number_mean'), # randomness is tested separately
- xfail('multinomial', ''), # randomness
- xfail('nn.functional.embedding', ''), # we only support some cases
- xfail('nn.functional.rrelu'), # randomness
- xfail('nn.functional.dropout2d', ''), # randomness
- xfail('nn.functional.dropout3d', ''), # randomness
- xfail('nn.functional.alpha_dropout', ''), # randomness
- xfail('nn.functional.feature_alpha_dropout', 'with_train'), # randomness
- xfail('as_strided'), # Our test runner can't handle this; manual test exists
- xfail('as_strided_scatter'), # no batching rule implemented, default doesnt work
- skip('new_empty_strided'), # empty tensor data is garbage so it's hard to make comparisons with it
- xfail('nn.functional.fractional_max_pool3d'), # randomness
- xfail('nn.functional.fractional_max_pool2d'), # randomness
- xfail('pca_lowrank', ''), # random operation
- xfail('svd_lowrank', ''), # random operation
- xfail('sparse.sampled_addmm'), # sparse
- xfail('sparse.mm', 'reduce'), # sparse
- xfail("NumpyCubeNotComposableAutogradFunction"), # Not composable autograd.Function
- skip('_softmax_backward_data'),
- skip('linalg.eigh', ''), # not always return the same result for the same input, see test_linalg_eigh for manual test
- skip('to'), # RuntimeError: required rank 4 tensor to use channels_last format
+ xfail("resize_"),
+ xfail("resize_as_"),
+ xfail("to_sparse"),
+ xfail("__getitem__"), # dynamic mask
+ xfail("index_put"), # dynamic mask
+ xfail(
+ "nn.functional.dropout"
+ ), # works, can't check against for loop because of randomness inconsistency
+ xfail("nn.functional.scaled_dot_product_attention"), # randomness
+ xfail("nn.functional.multi_head_attention_forward"), # randomness
+ xfail("masked_select"), # dynamic op
+ xfail("nonzero"), # dynamic op
+ xfail("unique", ""), # dynamic op
+ xfail("unique_consecutive", ""), # dynamic op
+ xfail("allclose"), # returns a boolean
+ xfail("uniform"), # randomness is tested separately
+ xfail("rand_like"), # randomness is tested separately
+ xfail("randint_like"), # randomness is tested separately
+ xfail("randn_like"), # randomness is tested separately
+ xfail("bernoulli", ""), # randomness is tested separately
+ xfail("normal", ""), # randomness is tested separately
+ xfail("normal", "number_mean"), # randomness is tested separately
+ xfail("multinomial", ""), # randomness
+ xfail("nn.functional.embedding", ""), # we only support some cases
+ xfail("nn.functional.rrelu"), # randomness
+ xfail("nn.functional.dropout2d", ""), # randomness
+ xfail("nn.functional.dropout3d", ""), # randomness
+ xfail("nn.functional.alpha_dropout", ""), # randomness
+ xfail("nn.functional.feature_alpha_dropout", "with_train"), # randomness
+ xfail("as_strided"), # Our test runner can't handle this; manual test exists
+ xfail(
+ "as_strided_scatter"
+ ), # no batching rule implemented, default doesnt work
+ skip(
+ "new_empty_strided"
+ ), # empty tensor data is garbage so it's hard to make comparisons with it
+ xfail("nn.functional.fractional_max_pool3d"), # randomness
+ xfail("nn.functional.fractional_max_pool2d"), # randomness
+ xfail("pca_lowrank", ""), # random operation
+ xfail("svd_lowrank", ""), # random operation
+ xfail("sparse.sampled_addmm"), # sparse
+ xfail("sparse.mm", "reduce"), # sparse
+ xfail(
+ "NumpyCubeNotComposableAutogradFunction"
+ ), # Not composable autograd.Function
+ skip("_softmax_backward_data"),
+ skip(
+ "linalg.eigh", ""
+ ), # not always return the same result for the same input, see test_linalg_eigh for manual test
+ skip("to"), # RuntimeError: required rank 4 tensor to use channels_last format
# ----------------------------------------------------------------------
-
# ---------------------------- BUGS ------------------------------------
# entries in here don't work and need to be fixed.
# Each one of these is a bug
decorate("frexp", decorator=skipIfTorchDynamo()),
-
- xfail('clamp_min', ''), # Exception not raised on error input
- xfail('clamp_max', ''), # Exception not raised on error input
-
- xfail('view_as_complex'), # RuntimeError: Tensor must have a last dimension with stride 1
- xfail('tensor_split'), # data_ptr
- xfail('histogramdd'), # expected Tensor as element 0 in argument 0, but got tuple
- xfail('nn.functional.gaussian_nll_loss'), # data-dependent control flow error
- xfail('nn.functional.embedding_bag'), # embedding renorm vmap inplace incompatible
- xfail('narrow'), # Batching rule not implemented for aten::narrow.Tensor
-
+ xfail("clamp_min", ""), # Exception not raised on error input
+ xfail("clamp_max", ""), # Exception not raised on error input
+ xfail(
+ "view_as_complex"
+ ), # RuntimeError: Tensor must have a last dimension with stride 1
+ xfail("tensor_split"), # data_ptr
+ xfail(
+ "histogramdd"
+ ), # expected Tensor as element 0 in argument 0, but got tuple
+ xfail("nn.functional.gaussian_nll_loss"), # data-dependent control flow error
+ xfail(
+ "nn.functional.embedding_bag"
+ ), # embedding renorm vmap inplace incompatible
+ xfail("narrow"), # Batching rule not implemented for aten::narrow.Tensor
# required rank 4 tensor to use channels_last format
- xfail('bfloat16'),
- xfail('bool'),
- xfail('byte'),
- xfail('char'),
- xfail('double'),
- xfail('float'),
- xfail('half'),
- xfail('int'),
- xfail('long'),
- xfail('short'),
- xfail('cdouble'),
- xfail('cfloat'),
-
- xfail('jiterator_binary', device_type='cuda'), # NYI: querying is_contiguous inside of vmap
- xfail('jiterator_binary_return_by_ref', device_type='cuda'), # NYI: querying is_contiguous inside of vmap
- xfail('jiterator_4inputs_with_extra_args', device_type='cuda'), # NYI: querying is_contiguous inside of vmap
- xfail('equal', ''), # TypeError: object of type 'bool' has no len(); likely testrunner problem
- xfail('jiterator_unary', device_type='cuda'), # NYI: querying is_contiguous inside of vmap
- xfail('jiterator_2inputs_2outputs', device_type='cuda'), # NYI: querying is_contiguous inside of vmap
+ xfail("bfloat16"),
+ xfail("bool"),
+ xfail("byte"),
+ xfail("char"),
+ xfail("double"),
+ xfail("float"),
+ xfail("half"),
+ xfail("int"),
+ xfail("long"),
+ xfail("short"),
+ xfail("cdouble"),
+ xfail("cfloat"),
+ xfail(
+ "jiterator_binary", device_type="cuda"
+ ), # NYI: querying is_contiguous inside of vmap
+ xfail(
+ "jiterator_binary_return_by_ref", device_type="cuda"
+ ), # NYI: querying is_contiguous inside of vmap
+ xfail(
+ "jiterator_4inputs_with_extra_args", device_type="cuda"
+ ), # NYI: querying is_contiguous inside of vmap
+ xfail(
+ "equal", ""
+ ), # TypeError: object of type 'bool' has no len(); likely testrunner problem
+ xfail(
+ "jiterator_unary", device_type="cuda"
+ ), # NYI: querying is_contiguous inside of vmap
+ xfail(
+ "jiterator_2inputs_2outputs", device_type="cuda"
+ ), # NYI: querying is_contiguous inside of vmap
# ---------------------------------------------------------------------
-
# TypeError: expected Tensor as element 0 in argument 0, but got NotImplementedType
- xfail('__rsub__'),
+ xfail("__rsub__"),
# RuntimeError: Batching rule not implemented for aten::moveaxis.int;
# the fallback path doesn't work on out= or view ops.
- xfail('movedim'),
+ xfail("movedim"),
# RuntimeError: NYI: querying is_contiguous inside of vmap for
# memory_format other than torch.contiguous_format
- xfail('contiguous'),
+ xfail("contiguous"),
# RuntimeError: NYI: Tensor.clone(memory_format) inside vmap is only supported
# with memory_format torch.preserve_format or torch.contiguous_format (got ChannelsLast)
- xfail('clone'),
+ xfail("clone"),
# RuntimeError: When vmap-ing torch.nn.functional.one_hot,
# please provide an explicit positive num_classes argument.
- xfail('nn.functional.one_hot'),
+ xfail("nn.functional.one_hot"),
# RuntimeError: Expected all tensors to be on the same device,
# but found at least two devices, cuda:0 and cpu!
- xfail('eq', device_type='cuda'),
- xfail('ge', device_type='cuda'),
- xfail('gt', device_type='cuda'),
- xfail('le', device_type='cuda'),
- xfail('lt', device_type='cuda'),
- xfail('ne', device_type='cuda'),
-
+ xfail("eq", device_type="cuda"),
+ xfail("ge", device_type="cuda"),
+ xfail("gt", device_type="cuda"),
+ xfail("le", device_type="cuda"),
+ xfail("lt", device_type="cuda"),
+ xfail("ne", device_type="cuda"),
# RuntimeError: aten::_flash_attention_forward hit the vmap fallback which is currently disabled
- xfail('torch.ops.aten._flash_attention_forward'),
+ xfail("torch.ops.aten._flash_attention_forward"),
}
@with_tf32_off # https://github.com/pytorch/pytorch/issues/86798
@ops(op_db + additional_op_db + autograd_function_db, dtypes=OpDTypes.any_one)
- @opsToleranceOverride('TestVmapOperatorsOpInfo', 'test_vmap_exhaustive', (
- tol1('linalg.det',
- {torch.float32: tol(atol=1e-04, rtol=1e-04)}, device_type='cuda'),
- # The following is often flaky, but just on windows.
- # We should investigate if it's actually a problem or not.
- tol1('nn.functional.conv_transpose3d',
- {torch.float32: tol(atol=1e-04, rtol=1e-02)}, device_type='cuda'),
- ))
- @toleranceOverride({torch.float32: tol(atol=1e-04, rtol=1e-04),
- torch.complex64: tol(atol=1e-04, rtol=1e-04)})
- @skipOps('TestVmapOperatorsOpInfo', 'test_vmap_exhaustive', vmap_fail.union({
- # RuntimeError: Batch norm got a batched tensor as input while the running_mean or running_var,
- # which will be updated in place, were not batched.
- xfail('native_batch_norm'),
- xfail('_native_batch_norm_legit'),
- # TODO: implement batching rule
- xfail('_batch_norm_with_update'),
- xfail('tril'), # Exception not raised on error input
- xfail('triu'), # Exception not raised on error input
- xfail('as_strided', 'partial_views'),
-
- # https://github.com/pytorch/pytorch/issues/96560
- decorate('nn.functional.batch_norm', decorator=skipIfRocm),
-
- # RuntimeError: output with shape [4, 4] doesn't match the broadcast shape [1, 4, 4]
- xfail('addcdiv'),
- xfail('addcmul'),
- xfail('clamp'),
-
- xfail('torch.ops.aten._efficient_attention_forward'), # outputs ints
-
- # TypeError: expected Tensor as element 0 in argument 0, but got float
- xfail('item'),
- }))
+ @opsToleranceOverride(
+ "TestVmapOperatorsOpInfo",
+ "test_vmap_exhaustive",
+ (
+ tol1(
+ "linalg.det",
+ {torch.float32: tol(atol=1e-04, rtol=1e-04)},
+ device_type="cuda",
+ ),
+ # The following is often flaky, but just on windows.
+ # We should investigate if it's actually a problem or not.
+ tol1(
+ "nn.functional.conv_transpose3d",
+ {torch.float32: tol(atol=1e-04, rtol=1e-02)},
+ device_type="cuda",
+ ),
+ ),
+ )
+ @toleranceOverride(
+ {
+ torch.float32: tol(atol=1e-04, rtol=1e-04),
+ torch.complex64: tol(atol=1e-04, rtol=1e-04),
+ }
+ )
+ @skipOps(
+ "TestVmapOperatorsOpInfo",
+ "test_vmap_exhaustive",
+ vmap_fail.union(
+ {
+ # RuntimeError: Batch norm got a batched tensor as input while the running_mean or running_var,
+ # which will be updated in place, were not batched.
+ xfail("native_batch_norm"),
+ xfail("_native_batch_norm_legit"),
+ # TODO: implement batching rule
+ xfail("_batch_norm_with_update"),
+ xfail("tril"), # Exception not raised on error input
+ xfail("triu"), # Exception not raised on error input
+ xfail("as_strided", "partial_views"),
+ # https://github.com/pytorch/pytorch/issues/96560
+ decorate("nn.functional.batch_norm", decorator=skipIfRocm),
+ # RuntimeError: output with shape [4, 4] doesn't match the broadcast shape [1, 4, 4]
+ xfail("addcdiv"),
+ xfail("addcmul"),
+ xfail("clamp"),
+ xfail("torch.ops.aten._efficient_attention_forward"), # outputs ints
+ # TypeError: expected Tensor as element 0 in argument 0, but got float
+ xfail("item"),
+ }
+ ),
+ )
def test_vmap_exhaustive(self, device, dtype, op):
# needs to be fixed
- inplace_failure_list = (
+ inplace_failure_list = ()
+ self.opinfo_vmap_test(
+ device,
+ dtype,
+ op,
+ check_has_batch_rule=False,
+ skip_inplace=inplace_failure_list,
)
- self.opinfo_vmap_test(device, dtype, op, check_has_batch_rule=False,
- skip_inplace=inplace_failure_list)
@with_tf32_off
@ops(op_db + additional_op_db + autograd_function_db, dtypes=OpDTypes.any_one)
- @opsToleranceOverride('TestVmapOperatorsOpInfo', 'test_op_has_batch_rule', (
- tol1('linalg.det',
- {torch.float32: tol(atol=1e-04, rtol=1e-04)}, device_type='cuda'),
- ))
- @toleranceOverride({torch.float32: tol(atol=1e-04, rtol=1e-04), torch.complex64: tol(atol=1e-04, rtol=1e-04)})
- @skipOps('TestVmapOperatorsOpInfo', 'test_op_has_batch_rule', vmap_fail.union({
- xfail('as_strided', 'partial_views'),
- skip('to'), # RuntimeError: required rank 4 tensor to use channels_last format
- xfail('fill'),
- # Batch norm got a batched tensor as input while the running_mean or running_var,
- # which will be updated in place, were not batched.
- xfail('native_batch_norm'),
- xfail('_native_batch_norm_legit'),
- # TODO: implement batching rule
- xfail('_batch_norm_with_update'),
- xfail('histogram'),
- xfail('scatter_reduce', 'sum'),
- xfail('scatter_reduce', 'mean'),
- xfail('scatter_reduce', 'amax'),
- xfail('scatter_reduce', 'amin'),
- # `index_put` OpInfo in pytorch/pytorch has
- # masked index as input which is not supported
- xfail('index_put', ''),
- xfail('isin'),
- xfail('masked_fill'),
- xfail('masked_scatter'),
- xfail('masked_select'),
- xfail('nanquantile'),
- xfail('ormqr'),
- xfail('put'),
- xfail('quantile'),
- xfail('renorm'),
- xfail('resize_as_'),
- xfail('take'),
- xfail('tensor_split'),
- xfail('to_sparse'),
- # TypeError: expected Tensor as element 0 in argument 0, but got float
- xfail('item'),
- xfail('tril'), # Exception not raised on error input
- xfail('triu'), # Exception not raised on error input
- xfail('__getitem__', ''),
- xfail('count_nonzero'),
- xfail('nn.functional.dropout'), # works, can't check against for loop because of randomness inconsistency
- xfail('nn.functional.scaled_dot_product_attention'), # randomness
- xfail('nn.functional.multi_head_attention_forward'), # randomness
- xfail('torch.ops.aten._efficient_attention_forward'), # outputs ints
- xfail('resize_'),
- xfail('view_as_complex'),
- xfail('matrix_exp'),
- xfail('fft.ihfft2'),
- xfail('fft.ihfftn'),
- xfail('allclose'),
- xfail('argwhere'),
- xfail('unique_consecutive'),
- xfail('unique'),
- xfail('nn.functional.ctc_loss'),
- xfail('nn.functional.gaussian_nll_loss'),
- xfail('histc'),
- xfail('as_strided'),
- xfail('istft'),
- xfail('nonzero'),
- xfail('nn.functional.fractional_max_pool2d'),
- xfail('stft'),
- xfail('isclose'),
- xfail('nn.functional.fractional_max_pool3d'),
- xfail('nn.functional.bilinear'),
- xfail('nn.functional.embedding_bag'),
- xfail('linalg.tensorsolve'),
- xfail('bernoulli', ''),
- xfail('nn.functional.feature_alpha_dropout', 'with_train'),
- xfail('native_dropout_backward'),
- xfail('nn.functional.kl_div', ''),
- xfail('multinomial', ''),
- xfail('pca_lowrank', ''),
- xfail('normal', ''),
- xfail('nn.functional.dropout2d', ''),
- xfail('normal', 'number_mean'),
- xfail('svd_lowrank', ''),
- xfail('diagflat', ''),
- xfail('special.log_ndtr'),
- xfail('narrow'), # Batching rule not implemented for aten::narrow.Tensor
- xfail('nn.functional.triplet_margin_loss', ''),
- xfail('nn.functional.pdist', ''),
- xfail('scatter_reduce', 'sum'),
- xfail('scatter_reduce', 'amax'),
- xfail('nn.functional.max_unpool1d', 'grad'),
- xfail('nn.functional.multi_margin_loss', ''),
- xfail('scatter_reduce', 'prod'),
- xfail('nn.functional.multilabel_margin_loss', ''),
- xfail('scatter_reduce', 'amin'),
- xfail('nn.functional.max_unpool3d', 'grad'),
- xfail('nn.functional.max_unpool2d', ''),
- xfail('nn.functional.max_unpool2d', 'grad'),
- xfail('nn.functional.margin_ranking_loss', ''),
- xfail('nn.functional.max_unpool1d', ''),
- xfail('nn.functional.soft_margin_loss', ''),
- xfail('scatter_reduce', 'mean'),
- xfail('nn.functional.max_unpool3d', ''),
- xfail('linalg.ldl_solve', '', device_type='cpu'),
- xfail('chalf', ''),
- xfail('clamp_max', ''),
- xfail('jiterator_binary_return_by_ref', device_type='cuda'),
- xfail('jiterator_unary', device_type='cuda'),
- xfail('jiterator_2inputs_2outputs', device_type='cuda'),
- xfail('special.airy_ai'),
- xfail('clamp_min', ''),
- xfail('sparse.sampled_addmm'),
- xfail('sparse.mm', 'reduce'),
- xfail('special.chebyshev_polynomial_u'),
- xfail('_segment_reduce', 'offsets'),
- xfail('index_reduce', ''),
- xfail('special.laguerre_polynomial_l'),
- xfail('special.hermite_polynomial_h'),
- xfail('jiterator_binary', device_type='cuda'),
- xfail('jiterator_4inputs_with_extra_args', device_type='cuda'),
- xfail('_segment_reduce', 'lengths'),
- xfail('lu_solve', ''),
- xfail('special.hermite_polynomial_he'),
- xfail('nn.functional.dropout3d', ''),
- xfail('special.chebyshev_polynomial_t'),
- xfail('as_strided_scatter', ''),
- xfail('equal', ''),
- xfail('linalg.lu', ''),
- skip('linalg.ldl_solve', ''),
- skip('_softmax_backward_data'),
- # https://github.com/pytorch/pytorch/issues/96560
- decorate('nn.functional.batch_norm', decorator=skipIfRocm),
-
- # One or more of the overload doesn't have a Batch rule.
- xfail('bincount'),
- # RuntimeError: Expected all tensors to be on the same device,
- # but found at least two devices, cuda:0 and cpu!
- xfail('ge', device_type='cuda'),
- xfail('argsort'), # aten::argsort.stable hit the vmap fallback which is currently disabled
- xfail('searchsorted'), # aten::searchsorted.Scalar hit the vmap fallback which is currently disabled
- }))
+ @opsToleranceOverride(
+ "TestVmapOperatorsOpInfo",
+ "test_op_has_batch_rule",
+ (
+ tol1(
+ "linalg.det",
+ {torch.float32: tol(atol=1e-04, rtol=1e-04)},
+ device_type="cuda",
+ ),
+ ),
+ )
+ @toleranceOverride(
+ {
+ torch.float32: tol(atol=1e-04, rtol=1e-04),
+ torch.complex64: tol(atol=1e-04, rtol=1e-04),
+ }
+ )
+ @skipOps(
+ "TestVmapOperatorsOpInfo",
+ "test_op_has_batch_rule",
+ vmap_fail.union(
+ {
+ xfail("as_strided", "partial_views"),
+ skip(
+ "to"
+ ), # RuntimeError: required rank 4 tensor to use channels_last format
+ xfail("fill"),
+ # Batch norm got a batched tensor as input while the running_mean or running_var,
+ # which will be updated in place, were not batched.
+ xfail("native_batch_norm"),
+ xfail("_native_batch_norm_legit"),
+ # TODO: implement batching rule
+ xfail("_batch_norm_with_update"),
+ xfail("histogram"),
+ xfail("scatter_reduce", "sum"),
+ xfail("scatter_reduce", "mean"),
+ xfail("scatter_reduce", "amax"),
+ xfail("scatter_reduce", "amin"),
+ # `index_put` OpInfo in pytorch/pytorch has
+ # masked index as input which is not supported
+ xfail("index_put", ""),
+ xfail("isin"),
+ xfail("masked_fill"),
+ xfail("masked_scatter"),
+ xfail("masked_select"),
+ xfail("nanquantile"),
+ xfail("ormqr"),
+ xfail("put"),
+ xfail("quantile"),
+ xfail("renorm"),
+ xfail("resize_as_"),
+ xfail("take"),
+ xfail("tensor_split"),
+ xfail("to_sparse"),
+ # TypeError: expected Tensor as element 0 in argument 0, but got float
+ xfail("item"),
+ xfail("tril"), # Exception not raised on error input
+ xfail("triu"), # Exception not raised on error input
+ xfail("__getitem__", ""),
+ xfail("count_nonzero"),
+ xfail(
+ "nn.functional.dropout"
+ ), # works, can't check against for loop because of randomness inconsistency
+ xfail("nn.functional.scaled_dot_product_attention"), # randomness
+ xfail("nn.functional.multi_head_attention_forward"), # randomness
+ xfail("torch.ops.aten._efficient_attention_forward"), # outputs ints
+ xfail("resize_"),
+ xfail("view_as_complex"),
+ xfail("matrix_exp"),
+ xfail("fft.ihfft2"),
+ xfail("fft.ihfftn"),
+ xfail("allclose"),
+ xfail("argwhere"),
+ xfail("unique_consecutive"),
+ xfail("unique"),
+ xfail("nn.functional.ctc_loss"),
+ xfail("nn.functional.gaussian_nll_loss"),
+ xfail("histc"),
+ xfail("as_strided"),
+ xfail("istft"),
+ xfail("nonzero"),
+ xfail("nn.functional.fractional_max_pool2d"),
+ xfail("stft"),
+ xfail("isclose"),
+ xfail("nn.functional.fractional_max_pool3d"),
+ xfail("nn.functional.bilinear"),
+ xfail("nn.functional.embedding_bag"),
+ xfail("linalg.tensorsolve"),
+ xfail("bernoulli", ""),
+ xfail("nn.functional.feature_alpha_dropout", "with_train"),
+ xfail("native_dropout_backward"),
+ xfail("nn.functional.kl_div", ""),
+ xfail("multinomial", ""),
+ xfail("pca_lowrank", ""),
+ xfail("normal", ""),
+ xfail("nn.functional.dropout2d", ""),
+ xfail("normal", "number_mean"),
+ xfail("svd_lowrank", ""),
+ xfail("diagflat", ""),
+ xfail("special.log_ndtr"),
+ xfail(
+ "narrow"
+ ), # Batching rule not implemented for aten::narrow.Tensor
+ xfail("nn.functional.triplet_margin_loss", ""),
+ xfail("nn.functional.pdist", ""),
+ xfail("scatter_reduce", "sum"),
+ xfail("scatter_reduce", "amax"),
+ xfail("nn.functional.max_unpool1d", "grad"),
+ xfail("nn.functional.multi_margin_loss", ""),
+ xfail("scatter_reduce", "prod"),
+ xfail("nn.functional.multilabel_margin_loss", ""),
+ xfail("scatter_reduce", "amin"),
+ xfail("nn.functional.max_unpool3d", "grad"),
+ xfail("nn.functional.max_unpool2d", ""),
+ xfail("nn.functional.max_unpool2d", "grad"),
+ xfail("nn.functional.margin_ranking_loss", ""),
+ xfail("nn.functional.max_unpool1d", ""),
+ xfail("nn.functional.soft_margin_loss", ""),
+ xfail("scatter_reduce", "mean"),
+ xfail("nn.functional.max_unpool3d", ""),
+ xfail("linalg.ldl_solve", "", device_type="cpu"),
+ xfail("chalf", ""),
+ xfail("clamp_max", ""),
+ xfail("jiterator_binary_return_by_ref", device_type="cuda"),
+ xfail("jiterator_unary", device_type="cuda"),
+ xfail("jiterator_2inputs_2outputs", device_type="cuda"),
+ xfail("special.airy_ai"),
+ xfail("clamp_min", ""),
+ xfail("sparse.sampled_addmm"),
+ xfail("sparse.mm", "reduce"),
+ xfail("special.chebyshev_polynomial_u"),
+ xfail("_segment_reduce", "offsets"),
+ xfail("index_reduce", ""),
+ xfail("special.laguerre_polynomial_l"),
+ xfail("special.hermite_polynomial_h"),
+ xfail("jiterator_binary", device_type="cuda"),
+ xfail("jiterator_4inputs_with_extra_args", device_type="cuda"),
+ xfail("_segment_reduce", "lengths"),
+ xfail("lu_solve", ""),
+ xfail("special.hermite_polynomial_he"),
+ xfail("nn.functional.dropout3d", ""),
+ xfail("special.chebyshev_polynomial_t"),
+ xfail("as_strided_scatter", ""),
+ xfail("equal", ""),
+ xfail("linalg.lu", ""),
+ skip("linalg.ldl_solve", ""),
+ skip("_softmax_backward_data"),
+ # https://github.com/pytorch/pytorch/issues/96560
+ decorate("nn.functional.batch_norm", decorator=skipIfRocm),
+ # One or more of the overload doesn't have a Batch rule.
+ xfail("bincount"),
+ # RuntimeError: Expected all tensors to be on the same device,
+ # but found at least two devices, cuda:0 and cpu!
+ xfail("ge", device_type="cuda"),
+ xfail(
+ "argsort"
+ ), # aten::argsort.stable hit the vmap fallback which is currently disabled
+ xfail(
+ "searchsorted"
+ ), # aten::searchsorted.Scalar hit the vmap fallback which is currently disabled
+ }
+ ),
+ )
def test_op_has_batch_rule(self, device, dtype, op):
# needs to be fixed
inplace_failures = (
- 'addbmm',
- 'addcdiv',
- 'addcmul',
- 'addmm',
- 'addmv',
- 'addr',
- 'baddbmm',
- 'clamp',
- 'conj_physical',
- 'cumprod',
- 'cumsum',
- 'floor_divide',
- 'fmod',
- 'heaviside',
- 'hypot',
- 'igamma',
- 'igammac',
- 'index_copy',
- 'ldexp',
- 'lerp',
- 'neg',
- 'nextafter',
- 'polygamma',
- 'pow',
- 'remainder',
- 'scatter_add',
- 'scatter',
- 'square',
- 'sub',
- 'trunc',
- 'xlogy',
- )
- self.opinfo_vmap_test(device, dtype, op, check_has_batch_rule=True,
- skip_inplace=inplace_failures)
+ "addbmm",
+ "addcdiv",
+ "addcmul",
+ "addmm",
+ "addmv",
+ "addr",
+ "baddbmm",
+ "clamp",
+ "conj_physical",
+ "cumprod",
+ "cumsum",
+ "floor_divide",
+ "fmod",
+ "heaviside",
+ "hypot",
+ "igamma",
+ "igammac",
+ "index_copy",
+ "ldexp",
+ "lerp",
+ "neg",
+ "nextafter",
+ "polygamma",
+ "pow",
+ "remainder",
+ "scatter_add",
+ "scatter",
+ "square",
+ "sub",
+ "trunc",
+ "xlogy",
+ )
+ self.opinfo_vmap_test(
+ device, dtype, op, check_has_batch_rule=True, skip_inplace=inplace_failures
+ )
def test_linalg_svd(self, device):
# linalg_svd returns a tuple of three tensors, (U, S, Vh).
@@ -3839,12 +4441,17 @@ class TestVmapOperatorsOpInfo(TestCase):
diag_S.diagonal(offset=0, dim1=-2, dim2=-1).copy_(S)
return U @ diag_S @ Vh
- opinfos = [op for op in op_db if op.name == 'linalg.svd']
+ opinfos = [op for op in op_db if op.name == "linalg.svd"]
assert len(opinfos) > 0
for op in opinfos:
- self.opinfo_vmap_test(device, torch.float, op, check_has_batch_rule=True,
- postprocess_fn=compute_A)
+ self.opinfo_vmap_test(
+ device,
+ torch.float,
+ op,
+ check_has_batch_rule=True,
+ postprocess_fn=compute_A,
+ )
def test_linalg_eigh(self, device):
# linalg_svd returns two tensors, (Q, L).
@@ -3861,12 +4468,17 @@ class TestVmapOperatorsOpInfo(TestCase):
Qh = Q.transpose(-2, -1).conj()
return Q @ diag_L @ Qh
- opinfos = [op for op in op_db if op.name == 'linalg.eigh']
+ opinfos = [op for op in op_db if op.name == "linalg.eigh"]
assert len(opinfos) > 0
for op in opinfos:
- self.opinfo_vmap_test(device, torch.float, op, check_has_batch_rule=True,
- postprocess_fn=compute_A)
+ self.opinfo_vmap_test(
+ device,
+ torch.float,
+ op,
+ check_has_batch_rule=True,
+ postprocess_fn=compute_A,
+ )
@skipIfTorchDynamo()
def test_slogdet(self, device):
@@ -3889,7 +4501,9 @@ class TestVmapOperatorsOpInfo(TestCase):
dim = -2
index = torch.tensor([[2, 3], [0, 4]], device=device)
value = 5.0
- self.vmap_outplace_test(torch.index_fill, (x, dim, index, value), {}, (None, None, 0, None))
+ self.vmap_outplace_test(
+ torch.index_fill, (x, dim, index, value), {}, (None, None, 0, None)
+ )
def test2():
# self batched, self logical rank 1, index logical rank 1
@@ -3897,7 +4511,9 @@ class TestVmapOperatorsOpInfo(TestCase):
dim = 0
index = torch.tensor([[0], [1]], device=device)
for value in (1.0, torch.rand((), device=device)):
- self.vmap_outplace_test(torch.index_fill, (x, dim, index, value), {}, (0, None, 0, None))
+ self.vmap_outplace_test(
+ torch.index_fill, (x, dim, index, value), {}, (0, None, 0, None)
+ )
def test3():
# self batched, self logical rank 1, index logical rank 0
@@ -3905,7 +4521,9 @@ class TestVmapOperatorsOpInfo(TestCase):
dim = 0
index = torch.tensor([0, 1], device=device)
for value in (1.0, torch.rand((), device=device)):
- self.vmap_outplace_test(torch.index_fill, (x, dim, index, value), {}, (0, None, 0, None))
+ self.vmap_outplace_test(
+ torch.index_fill, (x, dim, index, value), {}, (0, None, 0, None)
+ )
def test4():
# self not batched, self logical rank 0, index logical rank 1
@@ -3913,7 +4531,9 @@ class TestVmapOperatorsOpInfo(TestCase):
dim = 0
index = torch.tensor([[0], [0]], device=device)
for value in (1.0, torch.rand((), device=device)):
- self.vmap_outplace_test(torch.index_fill, (x, dim, index, value), {}, (None, None, 0, None))
+ self.vmap_outplace_test(
+ torch.index_fill, (x, dim, index, value), {}, (None, None, 0, None)
+ )
def test5():
# self not batched, self logical rank 0, index logical rank 0
@@ -3921,7 +4541,9 @@ class TestVmapOperatorsOpInfo(TestCase):
dim = 0
index = torch.tensor([0, 0], device=device)
for value in (1.0, torch.rand((), device=device)):
- self.vmap_outplace_test(torch.index_fill, (x, dim, index, value), {}, (None, None, 0, None))
+ self.vmap_outplace_test(
+ torch.index_fill, (x, dim, index, value), {}, (None, None, 0, None)
+ )
def test6():
# self not batched, self logical rank 0, index logical rank 1
@@ -3929,7 +4551,9 @@ class TestVmapOperatorsOpInfo(TestCase):
dim = 0
index = torch.tensor([[0], [1]], device=device)
for value in (1.0, torch.rand((), device=device)):
- self.vmap_outplace_test(torch.index_fill, (x, dim, index, value), {}, (None, None, 0, None))
+ self.vmap_outplace_test(
+ torch.index_fill, (x, dim, index, value), {}, (None, None, 0, None)
+ )
def test7():
# self not batched, self logical rank 0, index logical rank 0
@@ -3937,7 +4561,9 @@ class TestVmapOperatorsOpInfo(TestCase):
dim = 0
index = torch.tensor([0, 1], device=device)
for value in (1.0, torch.rand((), device=device)):
- self.vmap_outplace_test(torch.index_fill, (x, dim, index, value), {}, (None, None, 0, None))
+ self.vmap_outplace_test(
+ torch.index_fill, (x, dim, index, value), {}, (None, None, 0, None)
+ )
def test8():
# self batched, self logical rank > 1, index logical rank 0
@@ -3945,7 +4571,9 @@ class TestVmapOperatorsOpInfo(TestCase):
dim = 0
index = torch.tensor([0, 1], device=device)
for value in (1.0, torch.rand((), device=device)):
- self.vmap_outplace_test(torch.index_fill, (x, dim, index, value), {}, (0, None, 0, None))
+ self.vmap_outplace_test(
+ torch.index_fill, (x, dim, index, value), {}, (0, None, 0, None)
+ )
for test in (test1, test2, test3, test4, test5, test6, test7, test8):
check_vmap_fallback(self, test, torch.index_fill)
@@ -3982,16 +4610,31 @@ class TestVmapOperatorsOpInfo(TestCase):
output_padding = (0, 0)
groups = 1
output_mask = (True, True, True)
- gO = torch.randn_like(F.conv2d(images, weight, bias, stride, padding, dilation, groups))
+ gO = torch.randn_like(
+ F.conv2d(images, weight, bias, stride, padding, dilation, groups)
+ )
args = (
- ggI, ggW, ggb, gO, weight, images, stride, padding, dilation,
- transposed, output_padding, groups, output_mask,
+ ggI,
+ ggW,
+ ggb,
+ gO,
+ weight,
+ images,
+ stride,
+ padding,
+ dilation,
+ transposed,
+ output_padding,
+ groups,
+ output_mask,
)
op = torch.ops.aten._convolution_double_backward
generator = get_fallback_and_vmap_exhaustive(op, args, {})
- is_cuda_sm86 = device.startswith("cuda") and torch.cuda.get_device_capability(0) == (8, 6)
+ is_cuda_sm86 = device.startswith("cuda") and torch.cuda.get_device_capability(
+ 0
+ ) == (8, 6)
atol, rtol = (1e-3, 1e-3) if is_cuda_sm86 else (1e-4, 1e-4)
def test():
@@ -4007,11 +4650,11 @@ class TestVmapOperatorsOpInfo(TestCase):
op = torch.isnan
x = torch.randn(B, N, C, H, W)
- x[x > 0] = float('nan')
+ x[x > 0] = float("nan")
test(self, op, (x,), in_dims=(0))
def test_sum_scalar(self, device):
- x = torch.tensor([10.], device=device)
+ x = torch.tensor([10.0], device=device)
y = vmap(torch.sum)(x)
self.assertEqual(y, x)
@@ -4028,7 +4671,7 @@ class TestVmapOperatorsOpInfo(TestCase):
op = torch.isinf
x = torch.randn(B, N, C, H, W)
- x[x > 0] = float('inf')
+ x[x > 0] = float("inf")
test(self, op, (x,), in_dims=(0))
def test_foo_like(self, device):
@@ -4040,7 +4683,9 @@ class TestVmapOperatorsOpInfo(TestCase):
x = torch.randn(B, N, C, H, W)
# todo(chilli): test these better
# Not testing correctness, just that they run
- vmap(op, in_dims=(0,))(x,)
+ vmap(op, in_dims=(0,))(
+ x,
+ )
def test_flatten(self, device):
test = functools.partial(_vmap_test, check_propagates_grad=False)
@@ -4070,7 +4715,9 @@ class TestVmapOperatorsOpInfo(TestCase):
def test(f, t, idx, values):
base = f(t[0], idx[0], values[0])
self.assertEqual(vmap(f, in_dims=(0, 0, 0))(t, idx, values)[0], base)
- self.assertEqual(vmap(f, in_dims=(0, None, None))(t, idx[0], values[0])[0], base)
+ self.assertEqual(
+ vmap(f, in_dims=(0, None, None))(t, idx[0], values[0])[0], base
+ )
self.assertEqual(vmap(f, in_dims=(0, None, 0))(t, idx[0], values)[0], base)
self.assertEqual(vmap(f, in_dims=(0, 0, None))(t, idx, values[0])[0], base)
@@ -4115,15 +4762,24 @@ class TestVmapOperatorsOpInfo(TestCase):
# index_put_
tensor = torch.zeros(3, 3, 4)
value = torch.ones(3, 2)
- idxs = (torch.tensor([[0], [1], [2]]), torch.tensor([[0]]), torch.tensor([1, 2]))
+ idxs = (
+ torch.tensor([[0], [1], [2]]),
+ torch.tensor([[0]]),
+ torch.tensor([1, 2]),
+ )
expected = torch.index_put_(tensor.clone(), idxs, value)
def f(t, idx, v):
torch.index_put_(t, idx, v)
return t
- self.assertEqual(vmap(f, in_dims=(0, (None, None), 0))(tensor, idxs[1:], value), expected)
- self.assertEqual(vmap(f, in_dims=(0, (None, None), None))(tensor, idxs[1:], value[0]), expected)
+ self.assertEqual(
+ vmap(f, in_dims=(0, (None, None), 0))(tensor, idxs[1:], value), expected
+ )
+ self.assertEqual(
+ vmap(f, in_dims=(0, (None, None), None))(tensor, idxs[1:], value[0]),
+ expected,
+ )
# boolean mask
B = 2
@@ -4138,9 +4794,9 @@ class TestVmapOperatorsOpInfo(TestCase):
self.vmap_outplace_test(f, (x, gy), {}, in_dims=(None, 0))
- @parametrize('training', [True, False])
- @parametrize('track_running_stats', [True, False])
- @parametrize('affine', [True, False])
+ @parametrize("training", [True, False])
+ @parametrize("track_running_stats", [True, False])
+ @parametrize("affine", [True, False])
def test_batch_norm(self, device, affine, track_running_stats, training):
if not track_running_stats and not training:
return
@@ -4150,9 +4806,9 @@ class TestVmapOperatorsOpInfo(TestCase):
ensemble_size = 10
hidden_dim = 3
- weights, buffers, _, _, _ = \
- functional_init_with_buffers(BN, [ensemble_size])(
- hidden_dim, affine=affine, track_running_stats=track_running_stats)
+ weights, buffers, _, _, _ = functional_init_with_buffers(BN, [ensemble_size])(
+ hidden_dim, affine=affine, track_running_stats=track_running_stats
+ )
inputs = [torch.randn(ensemble_size, 32, hidden_dim, 16, 16, device=device)]
in_dims = [0]
@@ -4189,13 +4845,23 @@ class TestVmapOperatorsOpInfo(TestCase):
def test_torch_return_types_returns(self, device):
t = torch.randn(3, 2, 2, device=device)
- self.assertTrue(isinstance(vmap(torch.min, (0, None))(t, 0), torch.return_types.min))
- self.assertTrue(isinstance(vmap(torch.max, (0, None))(t, 0), torch.return_types.max))
- self.assertTrue(isinstance(vmap(torch.topk, (0, None, None))(t, 1, 0), torch.return_types.topk))
- self.assertTrue(isinstance(vmap(torch.linalg.eig, (0))(t), torch.return_types.linalg_eig))
+ self.assertTrue(
+ isinstance(vmap(torch.min, (0, None))(t, 0), torch.return_types.min)
+ )
+ self.assertTrue(
+ isinstance(vmap(torch.max, (0, None))(t, 0), torch.return_types.max)
+ )
+ self.assertTrue(
+ isinstance(
+ vmap(torch.topk, (0, None, None))(t, 1, 0), torch.return_types.topk
+ )
+ )
+ self.assertTrue(
+ isinstance(vmap(torch.linalg.eig, (0))(t), torch.return_types.linalg_eig)
+ )
def test_namedtuple_returns(self, device):
- Point = namedtuple('Point', ['x', 'y'])
+ Point = namedtuple("Point", ["x", "y"])
def f(x, y):
return Point(x=x, y=y)
@@ -4216,14 +4882,18 @@ class TestVmapOperatorsOpInfo(TestCase):
def push_vjp(leaf, gout):
_, vjp_fn = vjp(func, leaf)
- result, = vjp_fn(gout)
+ (result,) = vjp_fn(gout)
return result
leaf = torch.randn(4, 4, device=device)
gout = torch.randn(2, 2, device=device)
args = (leaf, gout)
- for batched_args, in_dims, _, in generate_vmap_inputs(args, {}):
+ for (
+ batched_args,
+ in_dims,
+ _,
+ ) in generate_vmap_inputs(args, {}):
if in_dims[1] is None:
# triggers some composite compliance problem
continue
@@ -4243,13 +4913,17 @@ class TestVmapOperatorsOpInfo(TestCase):
def f3(x, idx):
return x[:, :, idx]
- inps = (torch.randn(5, 5, 5, device=device),
- torch.randn(5, 5, 5, 5, device=device),
- torch.randn(5, 5, 5, 5, 5, device=device))
- idxes = (torch.tensor([0, 1, 2], device=device),
- torch.tensor([0, 1, 2], device=device).reshape(3, 1),
- torch.tensor([0, 1, 2], device=device).reshape(3, 1, 1))
- for (inp, idx) in itertools.product(inps, idxes):
+ inps = (
+ torch.randn(5, 5, 5, device=device),
+ torch.randn(5, 5, 5, 5, device=device),
+ torch.randn(5, 5, 5, 5, 5, device=device),
+ )
+ idxes = (
+ torch.tensor([0, 1, 2], device=device),
+ torch.tensor([0, 1, 2], device=device).reshape(3, 1),
+ torch.tensor([0, 1, 2], device=device).reshape(3, 1, 1),
+ )
+ for inp, idx in itertools.product(inps, idxes):
test(f, (inp, idx))
test(f2, (inp, idx))
test(f3, (inp, idx))
@@ -4280,27 +4954,38 @@ class TestVmapOperatorsOpInfo(TestCase):
b = with_vmap(_fake_vmap)
self.assertEqual(a, b)
- @ops(filter(lambda op: "linalg" in op.name, op_db + additional_op_db), allowed_dtypes=(torch.float,))
- @skipOps('TestVmapOperatorsOpInfo', 'test_vmap_linalg_failure_1D_input', {
- xfail('linalg.vector_norm'), # can accept vector inputs
- xfail('linalg.norm'), # can accept vector inputs
- xfail('linalg.norm', 'subgradients_at_zero'), # can accept vector inputs
- xfail('linalg.vander'), # can accept vector inputs
- skip('linalg.multi_dot'), # accepts list of tensor inputs, has its own special test
- xfail('linalg.vecdot'),
- # throws in vmap on CUDA
- # IndexError: Dimension out of range (expected to be in range of [-1, 0], but got -2)
- # https://github.com/pytorch/pytorch/runs/8110653462?check_suite_focus=true
- # but it passes locally
- xfail('linalg.diagonal'),
- skip('linalg.matrix_norm', ''),
- skip('linalg.ldl_solve', ''),
- })
+ @ops(
+ filter(lambda op: "linalg" in op.name, op_db + additional_op_db),
+ allowed_dtypes=(torch.float,),
+ )
+ @skipOps(
+ "TestVmapOperatorsOpInfo",
+ "test_vmap_linalg_failure_1D_input",
+ {
+ xfail("linalg.vector_norm"), # can accept vector inputs
+ xfail("linalg.norm"), # can accept vector inputs
+ xfail("linalg.norm", "subgradients_at_zero"), # can accept vector inputs
+ xfail("linalg.vander"), # can accept vector inputs
+ skip(
+ "linalg.multi_dot"
+ ), # accepts list of tensor inputs, has its own special test
+ xfail("linalg.vecdot"),
+ # throws in vmap on CUDA
+ # IndexError: Dimension out of range (expected to be in range of [-1, 0], but got -2)
+ # https://github.com/pytorch/pytorch/runs/8110653462?check_suite_focus=true
+ # but it passes locally
+ xfail("linalg.diagonal"),
+ skip("linalg.matrix_norm", ""),
+ skip("linalg.ldl_solve", ""),
+ },
+ )
def test_vmap_linalg_failure_1D_input(self, device, dtype, op):
for sample in op.sample_inputs(device, dtype, requires_grad=False):
if sample.input.dim() != 2 or sample.input.shape[0] == 0:
continue
- test_input = sample.input[0] # using the sample input avoids numerical inconsistency issues
+ test_input = sample.input[
+ 0
+ ] # using the sample input avoids numerical inconsistency issues
with self.assertRaisesRegex(RuntimeError, "dimension"):
op(test_input, *sample.args, **sample.kwargs)
@@ -4329,41 +5014,59 @@ class TestVmapOperatorsOpInfo(TestCase):
def f(x):
nonlocal escaped
escaped = x
- return x ** 2
+ return x**2
x = torch.randn([3, 3, 3, 3, 3])
vmap(f)(x)
- common_message = r"your tensor may have escaped from inside a function being vmapped.*{0}.*"
+ common_message = (
+ r"your tensor may have escaped from inside a function being vmapped.*{0}.*"
+ )
# Note: These are not a complete set of tests for all possible functions calling 'vmap_check_escaped'
- with self.assertRaisesRegex(RuntimeError, common_message.format("gen_vmap_plumbing")):
+ with self.assertRaisesRegex(
+ RuntimeError, common_message.format("gen_vmap_plumbing")
+ ):
escaped.sin()
- with self.assertRaisesRegex(RuntimeError, common_message.format("boxed_tensor_inputs_batch_rule")):
+ with self.assertRaisesRegex(
+ RuntimeError, common_message.format("boxed_tensor_inputs_batch_rule")
+ ):
escaped.sin_()
- with self.assertRaisesRegex(RuntimeError, common_message.format("gen_vmap_inplace_plumbing")):
+ with self.assertRaisesRegex(
+ RuntimeError, common_message.format("gen_vmap_inplace_plumbing")
+ ):
escaped.mul_(1)
- with self.assertRaisesRegex(RuntimeError, common_message.format("binary_cross_entropy_plumbing")):
+ with self.assertRaisesRegex(
+ RuntimeError, common_message.format("binary_cross_entropy_plumbing")
+ ):
torch.nn.functional.binary_cross_entropy(escaped, torch.zeros([3, 3, 3, 3]))
- with self.assertRaisesRegex(RuntimeError, common_message.format("boxed_existing_bdim_all_batch_rule")):
+ with self.assertRaisesRegex(
+ RuntimeError, common_message.format("boxed_existing_bdim_all_batch_rule")
+ ):
torch.nn.functional.adaptive_max_pool2d(escaped, output_size=(1, 1))
- with self.assertRaisesRegex(RuntimeError, common_message.format("boxed_reduction_batch_rule")):
+ with self.assertRaisesRegex(
+ RuntimeError, common_message.format("boxed_reduction_batch_rule")
+ ):
escaped.argmin()
a = torch.zeros([4, 4, 4, 4])
b = torch.zeros([4, 4, 4, 4], dtype=torch.long)
- with self.assertRaisesRegex(RuntimeError, common_message.format("boxed_all_tensors_have_optional_bdim")):
+ with self.assertRaisesRegex(
+ RuntimeError, common_message.format("boxed_all_tensors_have_optional_bdim")
+ ):
torch.ops.aten.adaptive_max_pool2d_backward(escaped, a, b)
vmap(f)(torch.tensor([[0, 0], [0, 0]], dtype=torch.int))
- with self.assertRaisesRegex(RuntimeError, common_message.format("gen_vmap_plumbing_no_returns")):
- torch.ops.aten._linalg_check_errors(escaped, 'linalg.inv', is_matrix=False)
+ with self.assertRaisesRegex(
+ RuntimeError, common_message.format("gen_vmap_plumbing_no_returns")
+ ):
+ torch.ops.aten._linalg_check_errors(escaped, "linalg.inv", is_matrix=False)
def test_vmap_with_anomaly_detection(self):
with torch.autograd.set_detect_anomaly(True):
@@ -4387,7 +5090,9 @@ class TestVmapOperatorsOpInfo(TestCase):
# Thus we test explicitly with different samples across a batch.
def test():
- boundaries = torch.tensor([[1, 4, 5, 7, 9], [1, 2, 6, 8, 10]], device=device)
+ boundaries = torch.tensor(
+ [[1, 4, 5, 7, 9], [1, 2, 6, 8, 10]], device=device
+ )
v = torch.tensor(3, device=device)
self.vmap_outplace_test(torch.searchsorted, (boundaries, v), {}, (0, None))
self.vmap_outplace_test(torch.bucketize, (v, boundaries), {}, (None, 0))
@@ -4398,10 +5103,15 @@ class TestVmapOperatorsOpInfo(TestCase):
test()
+
@markDynamoStrictTest
class TestRandomness(TestCase):
def _reset_random(self, generator, orig_state, use_generator, seed):
- return generator.set_state(orig_state) if use_generator else torch.manual_seed(seed)
+ return (
+ generator.set_state(orig_state)
+ if use_generator
+ else torch.manual_seed(seed)
+ )
def _get_image(self, batched_input, batch_size, device):
if batched_input == "first":
@@ -4417,26 +5127,33 @@ class TestRandomness(TestCase):
def _assert_all_slices_unique(self, tensor):
B0 = tensor.shape[0]
- slices_equal = vmap(vmap(lambda x, y: (x == y).all(), (0, None)), (None, 0))(tensor, tensor)
+ slices_equal = vmap(vmap(lambda x, y: (x == y).all(), (0, None)), (None, 0))(
+ tensor, tensor
+ )
assert slices_equal.shape == (B0, B0)
slices_equal.diagonal().zero_()
self.assertEqual(slices_equal, torch.zeros_like(slices_equal))
def _assert_throws_in_error_mode(self, fn, args, in_dims):
- with self.assertRaisesRegex(RuntimeError, r"called random operation while in randomness error mode"):
+ with self.assertRaisesRegex(
+ RuntimeError, r"called random operation while in randomness error mode"
+ ):
vmap(fn, in_dims=in_dims, randomness="error")(*args)
def _assert_throws_in_different_mode_inplace(self, fn, args, in_dims):
- with self.assertRaisesRegex(RuntimeError, r"different inplace randomness on an unbatched tensor"):
+ with self.assertRaisesRegex(
+ RuntimeError, r"different inplace randomness on an unbatched tensor"
+ ):
vmap(fn, in_dims=in_dims, randomness="different")(*args)
def _assert_throws_in_same_mode_batched(self, fn, args, in_dims):
- with self.assertRaisesRegex(RuntimeError,
- r"Vmap does not currently support same randomness with a batched tensor input"):
+ with self.assertRaisesRegex(
+ RuntimeError,
+ r"Vmap does not currently support same randomness with a batched tensor input",
+ ):
vmap(fn, in_dims=in_dims, randomness="same")(*args)
def _in_dims(self, *batched_strings):
-
def get_in_dim(batched_string):
if batched_string == "first":
return 0
@@ -4445,22 +5162,27 @@ class TestRandomness(TestCase):
assert batched_string == "none"
return None
- batched_strings = batched_strings + ("first",) # for the always batched as first dim dummy argument
+ batched_strings = batched_strings + (
+ "first",
+ ) # for the always batched as first dim dummy argument
return tuple(get_in_dim(batched_string) for batched_string in batched_strings)
- @parametrize('randomness', ['same', 'different', 'error'])
- @parametrize('use_generator', [True, False])
+ @parametrize("randomness", ["same", "different", "error"])
+ @parametrize("use_generator", [True, False])
def test_factory_ops(self, device, randomness, use_generator):
-
generator = torch.Generator(device=device)
orig_state = generator.get_state()
- kwargs = {'device': device, 'generator': generator} if use_generator else {'device': device}
+ kwargs = (
+ {"device": device, "generator": generator}
+ if use_generator
+ else {"device": device}
+ )
ops = [
lambda _, shape: torch.randn(shape, **kwargs),
lambda _, shape: torch.rand(shape, **kwargs),
lambda _, shape: torch.randint(100, shape, **kwargs),
lambda _, shape: torch.randint(5, 100, shape, **kwargs),
- lambda _, shape: torch.normal(0., 1., shape, **kwargs),
+ lambda _, shape: torch.normal(0.0, 1.0, shape, **kwargs),
]
B0 = 4
shape = (3, 3)
@@ -4468,12 +5190,16 @@ class TestRandomness(TestCase):
for op in ops:
passed = torch.randn(B0, device=device)
- if randomness == 'error':
- self._assert_throws_in_error_mode(op, (passed, shape), in_dims=(0, None))
+ if randomness == "error":
+ self._assert_throws_in_error_mode(
+ op, (passed, shape), in_dims=(0, None)
+ )
return
generator = self._reset_random(generator, orig_state, use_generator, seed)
- vmap_result = vmap(op, in_dims=(0, None), randomness=randomness)(passed, shape)
+ vmap_result = vmap(op, in_dims=(0, None), randomness=randomness)(
+ passed, shape
+ )
generator = self._reset_random(generator, orig_state, use_generator, seed)
if randomness == "different":
@@ -4486,10 +5212,9 @@ class TestRandomness(TestCase):
for i in range(B0):
self.assertEqual(vmap_result[i], expected)
- @parametrize('randomness', ['same', 'different', 'error'])
- @parametrize('use_generator', [True, False])
+ @parametrize("randomness", ["same", "different", "error"])
+ @parametrize("use_generator", [True, False])
def test_randperm(self, device, randomness, use_generator):
-
# needs a special case because randperm doesn't take a batch size
B0 = 4
seed = 1234567
@@ -4499,37 +5224,46 @@ class TestRandomness(TestCase):
generator = torch.Generator(device=device)
orig_state = generator.get_state()
- kwargs = {'device': device, 'generator': generator} if use_generator else {'device': device}
+ kwargs = (
+ {"device": device, "generator": generator}
+ if use_generator
+ else {"device": device}
+ )
- if randomness == 'error':
- with self.assertRaisesRegex(RuntimeError, r"called random operation while in randomness error mode"):
- vmap(lambda _: torch.randperm(10, **kwargs), randomness=randomness)(passed)
+ if randomness == "error":
+ with self.assertRaisesRegex(
+ RuntimeError, r"called random operation while in randomness error mode"
+ ):
+ vmap(lambda _: torch.randperm(10, **kwargs), randomness=randomness)(
+ passed
+ )
return
- vmap_result = vmap(lambda _: torch.randperm(10, **kwargs), randomness=randomness)(passed)
+ vmap_result = vmap(
+ lambda _: torch.randperm(10, **kwargs), randomness=randomness
+ )(passed)
generator = generator.set_state(orig_state)
torch.manual_seed(seed)
- if randomness == 'different':
+ if randomness == "different":
for i in range(B0):
expected = torch.randperm(10, **kwargs)
# RNG differs between eager and via dynamo trace on CUDA
- if (TEST_WITH_TORCHDYNAMO and torch.device(device).type == 'cuda'):
+ if TEST_WITH_TORCHDYNAMO and torch.device(device).type == "cuda":
self._assert_all_slices_unique(vmap_result)
else:
self.assertEqual(vmap_result[i], expected)
else:
expected = torch.randperm(10, **kwargs)
# RNG differs between eager and via dynamo trace on CUDA
- if (TEST_WITH_TORCHDYNAMO and torch.device(device).type == 'cuda'):
+ if TEST_WITH_TORCHDYNAMO and torch.device(device).type == "cuda":
self._assert_all_slices_equal(vmap_result)
else:
for i in range(B0):
self.assertEqual(vmap_result[i], expected)
- @parametrize('randomness', ['error', 'same', 'different'])
- @parametrize('batched_input', ["first", "last", "none"])
+ @parametrize("randomness", ["error", "same", "different"])
+ @parametrize("batched_input", ["first", "last", "none"])
def test_dropout(self, device, randomness, batched_input):
-
def op(t, ignored):
return torch.nn.functional.dropout(torch.ones_like(t), training=True)
@@ -4538,12 +5272,16 @@ class TestRandomness(TestCase):
passed = self._get_image(batched_input, B0, device)
in_dims = self._in_dims(batched_input)
- if randomness == 'error':
- with self.assertRaisesRegex(RuntimeError, r"called random operation while in randomness error mode"):
+ if randomness == "error":
+ with self.assertRaisesRegex(
+ RuntimeError, r"called random operation while in randomness error mode"
+ ):
vmap(op, randomness=randomness, in_dims=in_dims)(passed, always_batched)
return
- vmap_result = vmap(op, randomness=randomness, in_dims=in_dims)(passed, always_batched)
+ vmap_result = vmap(op, randomness=randomness, in_dims=in_dims)(
+ passed, always_batched
+ )
# Check that the randomness is within bounds...
# ideally this is close to 0.5
@@ -4551,17 +5289,16 @@ class TestRandomness(TestCase):
self.assertTrue(p_estimate < 0.75)
self.assertTrue(p_estimate > 0.25)
- if randomness == 'different':
+ if randomness == "different":
self._assert_all_slices_unique(vmap_result)
return
- assert randomness == 'same'
+ assert randomness == "same"
self._assert_all_slices_equal(vmap_result)
- @parametrize('randomness', ['error', 'same', 'different'])
- @parametrize('batched_input', ["first", "last", "none"])
+ @parametrize("randomness", ["error", "same", "different"])
+ @parametrize("batched_input", ["first", "last", "none"])
def test_alpha_dropout(self, device, randomness, batched_input):
-
def op(t, ignored):
return torch.nn.functional.alpha_dropout(torch.ones_like(t), training=True)
@@ -4570,28 +5307,35 @@ class TestRandomness(TestCase):
passed = self._get_image(batched_input, B0, device)
in_dims = self._in_dims(batched_input)
- if randomness == 'error':
- with self.assertRaisesRegex(RuntimeError, r"called random operation while in randomness error mode"):
+ if randomness == "error":
+ with self.assertRaisesRegex(
+ RuntimeError, r"called random operation while in randomness error mode"
+ ):
vmap(op, randomness=randomness, in_dims=in_dims)(passed, always_batched)
return
# I have no clue how to actually test correctness of alpha dropout because the docs
# seem wrong: https://github.com/pytorch/pytorch/issues/74004
- vmap_result = vmap(op, randomness=randomness, in_dims=in_dims)(passed, always_batched)
- if randomness == 'different':
+ vmap_result = vmap(op, randomness=randomness, in_dims=in_dims)(
+ passed, always_batched
+ )
+ if randomness == "different":
self._assert_all_slices_unique(vmap_result)
return
- assert randomness == 'same'
+ assert randomness == "same"
self._assert_all_slices_equal(vmap_result)
- @parametrize('randomness', ['error', 'same', 'different'])
- @parametrize('batched_input', ["first", "last", "none"])
- @parametrize('dim', [2, 3])
+ @parametrize("randomness", ["error", "same", "different"])
+ @parametrize("batched_input", ["first", "last", "none"])
+ @parametrize("dim", [2, 3])
def test_feature_dropout(self, device, randomness, batched_input, dim):
-
def op(t, ignored):
- f = torch.nn.functional.dropout2d if dim == 2 else torch.nn.functional.dropout3d
+ f = (
+ torch.nn.functional.dropout2d
+ if dim == 2
+ else torch.nn.functional.dropout3d
+ )
return f(torch.ones_like(t), training=True)
B0 = 4
@@ -4602,33 +5346,42 @@ class TestRandomness(TestCase):
passed = passed.unsqueeze(unsqueeze_dim)
in_dims = self._in_dims(batched_input)
- if randomness == 'error':
- with self.assertRaisesRegex(RuntimeError, r"called random operation while in randomness error mode"):
+ if randomness == "error":
+ with self.assertRaisesRegex(
+ RuntimeError, r"called random operation while in randomness error mode"
+ ):
vmap(op, randomness=randomness, in_dims=in_dims)(passed, always_batched)
return
- vmap_result = vmap(op, randomness=randomness, in_dims=in_dims)(passed, always_batched)
+ vmap_result = vmap(op, randomness=randomness, in_dims=in_dims)(
+ passed, always_batched
+ )
# Check the "feature" pattern
dims = [-1, -2] if dim == 2 else [-1, -2, -3]
- planes_numel = 2 * vmap_result.numel() / (vmap_result.shape[0] * vmap_result.shape[1] * vmap_result.shape[2])
+ planes_numel = (
+ 2
+ * vmap_result.numel()
+ / (vmap_result.shape[0] * vmap_result.shape[1] * vmap_result.shape[2])
+ )
planes = vmap_result.sum(dims)
result = (planes == 0) ^ (planes == planes_numel)
self.assertEqual(result, torch.ones_like(result, dtype=torch.bool))
- if randomness == 'different':
+ if randomness == "different":
self._assert_all_slices_unique(vmap_result)
return
- assert randomness == 'same'
+ assert randomness == "same"
self._assert_all_slices_equal(vmap_result)
- @parametrize('randomness', ['error', 'same', 'different'])
- @parametrize('batched_input', ["first", "last", "none"])
+ @parametrize("randomness", ["error", "same", "different"])
+ @parametrize("batched_input", ["first", "last", "none"])
def test_feature_alpha_dropout(self, device, randomness, batched_input):
-
def op(t, ignored):
- return torch.nn.functional.feature_alpha_dropout(torch.ones_like(t), training=True)
+ return torch.nn.functional.feature_alpha_dropout(
+ torch.ones_like(t), training=True
+ )
B0 = 4
always_batched = torch.randn((B0,))
@@ -4637,12 +5390,16 @@ class TestRandomness(TestCase):
passed = passed.unsqueeze(unsqueeze_dim)
in_dims = self._in_dims(batched_input)
- if randomness == 'error':
- with self.assertRaisesRegex(RuntimeError, r"called random operation while in randomness error mode"):
+ if randomness == "error":
+ with self.assertRaisesRegex(
+ RuntimeError, r"called random operation while in randomness error mode"
+ ):
vmap(op, randomness=randomness, in_dims=in_dims)(passed, always_batched)
return
- vmap_result = vmap(op, randomness=randomness, in_dims=in_dims)(passed, always_batched)
+ vmap_result = vmap(op, randomness=randomness, in_dims=in_dims)(
+ passed, always_batched
+ )
# I have no clue how to actually test correctness of alpha dropout because the docs
# seem wrong: https://github.com/pytorch/pytorch/issues/74004
@@ -4655,17 +5412,16 @@ class TestRandomness(TestCase):
result = (planes == min_elt) ^ (planes == max_elt)
self.assertEqual(result, torch.ones_like(result, dtype=torch.bool))
- if randomness == 'different':
+ if randomness == "different":
self._assert_all_slices_unique(vmap_result)
return
- assert randomness == 'same'
+ assert randomness == "same"
self._assert_all_slices_equal(vmap_result)
- @parametrize('randomness', ['error', 'same', 'different'])
- @parametrize('batched_input', ["first", "last", "none"])
+ @parametrize("randomness", ["error", "same", "different"])
+ @parametrize("batched_input", ["first", "last", "none"])
def test_like_functions(self, device, randomness, batched_input):
-
seed = 1234567
supported_ops = [
lambda t, _: torch.randint_like(t, 20),
@@ -4680,46 +5436,55 @@ class TestRandomness(TestCase):
passed = self._get_image(batched_input, B0, device)
in_dims = self._in_dims(batched_input)
- if randomness == 'error':
- with self.assertRaisesRegex(RuntimeError, r"called random operation while in randomness error mode"):
- vmap(op, in_dims=in_dims, randomness=randomness)(passed, always_batched)
+ if randomness == "error":
+ with self.assertRaisesRegex(
+ RuntimeError,
+ r"called random operation while in randomness error mode",
+ ):
+ vmap(op, in_dims=in_dims, randomness=randomness)(
+ passed, always_batched
+ )
return
torch.manual_seed(seed)
- vmap_result = vmap(op, randomness=randomness, in_dims=in_dims)(passed, always_batched)
+ vmap_result = vmap(op, randomness=randomness, in_dims=in_dims)(
+ passed, always_batched
+ )
torch.manual_seed(seed)
if batched_input == "last":
passed = passed.movedim(-1, 0)
- if randomness == 'different':
+ if randomness == "different":
if batched_input == "none":
passed = passed.expand(B0, *passed.shape)
expected = op(passed, 0)
self._assert_all_slices_unique(vmap_result)
# RNG differs between eager and via dynamo trace on CUDA
- if not (TEST_WITH_TORCHDYNAMO and torch.device(device).type == 'cuda'):
+ if not (TEST_WITH_TORCHDYNAMO and torch.device(device).type == "cuda"):
self.assertEqual(expected, vmap_result)
return
- assert randomness == 'same'
+ assert randomness == "same"
if batched_input != "none":
passed = passed[0]
expected = op(passed, 0)
self._assert_all_slices_equal(vmap_result)
# RNG differs between eager and via dynamo trace on CUDA
- if not (TEST_WITH_TORCHDYNAMO and torch.device(device).type == 'cuda'):
+ if not (TEST_WITH_TORCHDYNAMO and torch.device(device).type == "cuda"):
for i in range(B0):
self.assertEqual(expected, vmap_result[i])
- @parametrize('use_generator', [True, False])
- @parametrize('randomness', ['error', 'same', 'different'])
- @parametrize('batched_input', ["first", "last", "none"])
- def test_random_unary_inplace(self, device, use_generator, randomness, batched_input):
+ @parametrize("use_generator", [True, False])
+ @parametrize("randomness", ["error", "same", "different"])
+ @parametrize("batched_input", ["first", "last", "none"])
+ def test_random_unary_inplace(
+ self, device, use_generator, randomness, batched_input
+ ):
generator = torch.Generator(device=device)
orig_state = generator.get_state()
- kwargs = {'generator': generator} if use_generator else {}
+ kwargs = {"generator": generator} if use_generator else {}
ops = [
lambda t, _: t.random_(**kwargs),
lambda t, _: t.random_(100, **kwargs),
@@ -4742,15 +5507,21 @@ class TestRandomness(TestCase):
passed = self._get_image(batched_input, B0, device)
passed_expected = passed.clone()
- if randomness == 'error':
- self._assert_throws_in_error_mode(op, (passed, always_batched), in_dims=in_dims)
+ if randomness == "error":
+ self._assert_throws_in_error_mode(
+ op, (passed, always_batched), in_dims=in_dims
+ )
return
- if randomness == 'different' and batched_input == "none":
- self._assert_throws_in_different_mode_inplace(op, (passed, always_batched), in_dims=in_dims)
+ if randomness == "different" and batched_input == "none":
+ self._assert_throws_in_different_mode_inplace(
+ op, (passed, always_batched), in_dims=in_dims
+ )
return
generator = self._reset_random(generator, orig_state, use_generator, seed)
- vmap_result = vmap(op, in_dims=in_dims, randomness=randomness)(passed, always_batched)
+ vmap_result = vmap(op, in_dims=in_dims, randomness=randomness)(
+ passed, always_batched
+ )
if batched_input == "last":
passed_expected = passed_expected.movedim(-1, 0)
@@ -4761,22 +5532,26 @@ class TestRandomness(TestCase):
self.assertEqual(vmap_result, expected)
else:
if batched_input != "none":
- passed_expected = passed_expected[0].clone() # bug in pytorch, normal_ on views doesn't work
+ passed_expected = passed_expected[
+ 0
+ ].clone() # bug in pytorch, normal_ on views doesn't work
expected = op(passed_expected, always_batched)
self._assert_all_slices_equal(vmap_result)
for i in range(B0):
self.assertEqual(vmap_result[i], expected)
- @parametrize('use_generator', [True, False])
- @parametrize('randomness', ['error', 'same', 'different'])
- @parametrize('batched_input', ["first", "last", "none"])
- @parametrize('batched_probability', ["first", "last", "none"])
- def test_bernoulli_in_place(self, device, use_generator, randomness, batched_input, batched_probability):
+ @parametrize("use_generator", [True, False])
+ @parametrize("randomness", ["error", "same", "different"])
+ @parametrize("batched_input", ["first", "last", "none"])
+ @parametrize("batched_probability", ["first", "last", "none"])
+ def test_bernoulli_in_place(
+ self, device, use_generator, randomness, batched_input, batched_probability
+ ):
B0 = 4
seed = 1234567
generator = torch.Generator(device=device)
orig_state = generator.get_state()
- kwargs = {'generator': generator} if use_generator else {}
+ kwargs = {"generator": generator} if use_generator else {}
in_dims = self._in_dims(batched_input, batched_probability)
def op(t, p, ignored):
@@ -4788,23 +5563,33 @@ class TestRandomness(TestCase):
input_expected = input.clone()
probability = self._get_image(batched_probability, B0, device) - 0.5
- if randomness == 'error':
- self._assert_throws_in_error_mode(op, (input, probability, always_batched), in_dims=in_dims)
+ if randomness == "error":
+ self._assert_throws_in_error_mode(
+ op, (input, probability, always_batched), in_dims=in_dims
+ )
return
- if randomness == 'same' and batched_probability != "none":
- self._assert_throws_in_same_mode_batched(op, (input, probability, always_batched), in_dims=in_dims)
+ if randomness == "same" and batched_probability != "none":
+ self._assert_throws_in_same_mode_batched(
+ op, (input, probability, always_batched), in_dims=in_dims
+ )
return
if batched_input == "none" and batched_probability != "none":
regex = r"there exists a Tensor `other` in extra_args that has more elements than `self`"
with self.assertRaisesRegex(RuntimeError, regex):
- vmap(op, in_dims=in_dims, randomness=randomness)(input, probability, always_batched)
+ vmap(op, in_dims=in_dims, randomness=randomness)(
+ input, probability, always_batched
+ )
return
- if randomness == 'different' and batched_input == "none":
- self._assert_throws_in_different_mode_inplace(op, (input, probability, always_batched), in_dims=in_dims)
+ if randomness == "different" and batched_input == "none":
+ self._assert_throws_in_different_mode_inplace(
+ op, (input, probability, always_batched), in_dims=in_dims
+ )
return
self._reset_random(generator, orig_state, use_generator, seed)
- vmap_result = vmap(op, in_dims=in_dims, randomness=randomness)(input, probability, always_batched)
+ vmap_result = vmap(op, in_dims=in_dims, randomness=randomness)(
+ input, probability, always_batched
+ )
self._reset_random(generator, orig_state, use_generator, seed)
if batched_input == "last":
@@ -4823,15 +5608,16 @@ class TestRandomness(TestCase):
for i in range(B0):
self.assertEqual(vmap_result[i], expected)
- @parametrize('use_generator', [True, False])
- @parametrize('randomness', ['error', 'same', 'different'])
- @parametrize('batched_input', ["first", "last", "none"])
- @parametrize('batched_other', ["first", "last", "none"])
- def test_random_binary_out_of_place(self, device, use_generator, randomness, batched_input, batched_other):
-
+ @parametrize("use_generator", [True, False])
+ @parametrize("randomness", ["error", "same", "different"])
+ @parametrize("batched_input", ["first", "last", "none"])
+ @parametrize("batched_other", ["first", "last", "none"])
+ def test_random_binary_out_of_place(
+ self, device, use_generator, randomness, batched_input, batched_other
+ ):
generator = torch.Generator(device=device)
orig_state = generator.get_state()
- kwargs = {'generator': generator} if use_generator else {}
+ kwargs = {"generator": generator} if use_generator else {}
ops = [
lambda t, o, _: torch.normal(t, o, **kwargs),
lambda t, o, _: torch.binomial(t, (o - 0.5), **kwargs),
@@ -4846,15 +5632,23 @@ class TestRandomness(TestCase):
input = self._get_image(batched_input, B0, device)
other = self._get_image(batched_other, B0, device)
- if randomness == 'error':
- self._assert_throws_in_error_mode(op, (input, other, always_batched), in_dims=in_dims)
+ if randomness == "error":
+ self._assert_throws_in_error_mode(
+ op, (input, other, always_batched), in_dims=in_dims
+ )
return
- if randomness == 'same' and (batched_input != "none" or batched_other != "none"):
- self._assert_throws_in_same_mode_batched(op, (input, other, always_batched), in_dims=in_dims)
+ if randomness == "same" and (
+ batched_input != "none" or batched_other != "none"
+ ):
+ self._assert_throws_in_same_mode_batched(
+ op, (input, other, always_batched), in_dims=in_dims
+ )
return
generator = self._reset_random(generator, orig_state, use_generator, seed)
- vmap_result = vmap(op, in_dims=in_dims, randomness=randomness)(input, other, always_batched)
+ vmap_result = vmap(op, in_dims=in_dims, randomness=randomness)(
+ input, other, always_batched
+ )
if batched_input == "last":
input = input.movedim(-1, 0)
@@ -4875,17 +5669,18 @@ class TestRandomness(TestCase):
for i in range(B0):
self.assertEqual(vmap_result[i], expected)
- @parametrize('use_generator', [True, False])
- @parametrize('randomness', ['error', 'same', 'different'])
- @parametrize('batched_input', ["first", "last", "none"])
- def test_random_unary_out_of_place(self, device, use_generator, randomness, batched_input):
-
+ @parametrize("use_generator", [True, False])
+ @parametrize("randomness", ["error", "same", "different"])
+ @parametrize("batched_input", ["first", "last", "none"])
+ def test_random_unary_out_of_place(
+ self, device, use_generator, randomness, batched_input
+ ):
generator = torch.Generator(device=device)
orig_state = generator.get_state()
- kwargs = {'generator': generator} if use_generator else {}
+ kwargs = {"generator": generator} if use_generator else {}
ops = [
- lambda t, _: torch.normal(0., torch.abs(t), **kwargs),
- lambda t, _: torch.normal(t, 1., **kwargs),
+ lambda t, _: torch.normal(0.0, torch.abs(t), **kwargs),
+ lambda t, _: torch.normal(t, 1.0, **kwargs),
lambda t, _: torch.bernoulli(t - 0.5, **kwargs),
lambda t, _: torch.bernoulli(t, 0.5, **kwargs),
lambda t, _: torch._standard_gamma(t, **kwargs),
@@ -4900,15 +5695,21 @@ class TestRandomness(TestCase):
for op in ops:
always_batched = torch.randn(B0, device=device)
passed = self._get_image(batched_input, B0, device)
- if randomness == 'error':
- self._assert_throws_in_error_mode(op, (passed, always_batched), in_dims=in_dims)
+ if randomness == "error":
+ self._assert_throws_in_error_mode(
+ op, (passed, always_batched), in_dims=in_dims
+ )
return
- if randomness == 'same' and batched_input != "none":
- self._assert_throws_in_same_mode_batched(op, (passed, always_batched), in_dims=in_dims)
+ if randomness == "same" and batched_input != "none":
+ self._assert_throws_in_same_mode_batched(
+ op, (passed, always_batched), in_dims=in_dims
+ )
return
generator = self._reset_random(generator, orig_state, use_generator, seed)
- vmap_result = vmap(op, in_dims=in_dims, randomness=randomness)(passed, always_batched)
+ vmap_result = vmap(op, in_dims=in_dims, randomness=randomness)(
+ passed, always_batched
+ )
generator = self._reset_random(generator, orig_state, use_generator, seed)
if randomness == "different":
@@ -4925,12 +5726,13 @@ class TestRandomness(TestCase):
for i in range(B0):
self.assertEqual(vmap_result[i], expected)
- @parametrize('use_generator', [True, False])
- @parametrize('randomness', ['error', 'same', 'different'])
- @parametrize('batched_call', [True, False])
- @parametrize('batched_input', ["first", "last", "none"])
- def test_multinomial(self, device, use_generator, randomness, batched_call, batched_input):
-
+ @parametrize("use_generator", [True, False])
+ @parametrize("randomness", ["error", "same", "different"])
+ @parametrize("batched_call", [True, False])
+ @parametrize("batched_input", ["first", "last", "none"])
+ def test_multinomial(
+ self, device, use_generator, randomness, batched_call, batched_input
+ ):
def flatten_input(input, batch_call, batch_location):
if batch_call and batch_location != "none":
final_size = 3 # [B0, B, N]
@@ -4943,7 +5745,9 @@ class TestRandomness(TestCase):
end_idx = -1
if batch_location == "last":
start_idx -= 1
- end_idx -= 1 # gets to correct final size because using negative indices
+ end_idx -= (
+ 1 # gets to correct final size because using negative indices
+ )
ret = input.flatten(start_idx, end_idx)
assert ret.dim() == final_size
@@ -4954,7 +5758,7 @@ class TestRandomness(TestCase):
generator = torch.Generator(device=device)
orig_state = generator.get_state()
- kwargs = {'generator': generator} if use_generator else {}
+ kwargs = {"generator": generator} if use_generator else {}
B0 = 4
seed = 1234567
@@ -4963,15 +5767,21 @@ class TestRandomness(TestCase):
always_batched = torch.randn(B0, device=device)
passed = self._get_image(batched_input, B0, device)
passed = flatten_input(passed, batched_call, batched_input)
- if randomness == 'error':
- self._assert_throws_in_error_mode(op, (passed, always_batched), in_dims=in_dims)
+ if randomness == "error":
+ self._assert_throws_in_error_mode(
+ op, (passed, always_batched), in_dims=in_dims
+ )
return
- if randomness == 'same' and batched_input != "none":
- self._assert_throws_in_same_mode_batched(op, (passed, always_batched), in_dims=in_dims)
+ if randomness == "same" and batched_input != "none":
+ self._assert_throws_in_same_mode_batched(
+ op, (passed, always_batched), in_dims=in_dims
+ )
return
generator = self._reset_random(generator, orig_state, use_generator, seed)
- vmap_result = vmap(op, in_dims=in_dims, randomness=randomness)(passed, always_batched)
+ vmap_result = vmap(op, in_dims=in_dims, randomness=randomness)(
+ passed, always_batched
+ )
generator = self._reset_random(generator, orig_state, use_generator, seed)
@@ -4997,22 +5807,27 @@ class TestRandomness(TestCase):
y = x.abs()
z = x.abs()
with self.assertRaisesRegex(RuntimeError, "calling out variants"):
+
def f(x):
return torch.randn(3, device=device, out=y)
- vmap(f, randomness='same')(x)
+
+ vmap(f, randomness="same")(x)
with self.assertRaisesRegex(RuntimeError, "calling out variants"):
+
def f(x0, x1):
return torch.normal(x, y, out=x)
- vmap(f, randomness='same')(z, z)
+
+ vmap(f, randomness="same")(z, z)
with self.assertRaisesRegex(RuntimeError, "do not yet support"):
+
def f(z):
return torch.rrelu(x)
- vmap(f, randomness='same')(z)
- @parametrize('in_dim', [0, 1, 2])
- @parametrize('out_dim', [0, 1, 2])
- def test_chunk_vmap(self, in_dim, out_dim):
+ vmap(f, randomness="same")(z)
+ @parametrize("in_dim", [0, 1, 2])
+ @parametrize("out_dim", [0, 1, 2])
+ def test_chunk_vmap(self, in_dim, out_dim):
randomness = "different"
x = torch.randn(4, 5, 6)
@@ -5023,14 +5838,17 @@ class TestRandomness(TestCase):
for chunks in [1, 2, 3, 4, 7, 10, 16]:
output = chunk_vmap(
- f, in_dims=in_dim, out_dims=out_dim, randomness=randomness, chunks=chunks
+ f,
+ in_dims=in_dim,
+ out_dims=out_dim,
+ randomness=randomness,
+ chunks=chunks,
)(x)
self._assert_all_slices_unique(output)
- @parametrize('in_dim', [0, 1, 2])
- @parametrize('out_dim', [0, 1, 2])
+ @parametrize("in_dim", [0, 1, 2])
+ @parametrize("out_dim", [0, 1, 2])
def test_vmap_chunksize(self, in_dim, out_dim):
-
randomness = "different"
x = torch.randn(4, 5, 6)
@@ -5041,26 +5859,30 @@ class TestRandomness(TestCase):
for chunk_size in [1, 2, 3, 4, 7, 10, 16, 100]:
output = vmap(
- f, in_dims=in_dim, out_dims=out_dim, randomness=randomness, chunk_size=chunk_size
+ f,
+ in_dims=in_dim,
+ out_dims=out_dim,
+ randomness=randomness,
+ chunk_size=chunk_size,
)(x)
self._assert_all_slices_unique(output)
-
def test_jacfwd_with_random(self):
# checks on behavior are above, this just checks that jacfwd respects
# the randomness param
x = torch.rand(3, 4)
- with self.assertRaisesRegex(RuntimeError, r"called random operation while in randomness error mode"):
+ with self.assertRaisesRegex(
+ RuntimeError, r"called random operation while in randomness error mode"
+ ):
jacfwd(torch.bernoulli)(x)
# x isn't batched so use bernoulli since it doesn't do inplace randomness
jacfwd(torch.bernoulli, randomness="same")(x)
jacfwd(torch.bernoulli, randomness="different")(x)
- @parametrize('randomness', ['error', 'same', 'different'])
+ @parametrize("randomness", ["error", "same", "different"])
def test_dropout_unbatched(self, device, randomness):
-
x = torch.randn(3, device=device)
y = torch.randn(1, 3, device=device)
@@ -5071,20 +5893,34 @@ class TestRandomness(TestCase):
# We just verify that this doesn't raise an error for
# `same` and `different` randomness.
# Ref: https://github.com/pytorch/pytorch/issues/92283
- context = self.assertRaises(RuntimeError) if randomness == 'error' else contextlib.nullcontext()
+ context = (
+ self.assertRaises(RuntimeError)
+ if randomness == "error"
+ else contextlib.nullcontext()
+ )
with context:
vmap(fn, in_dims=(0, None), randomness=randomness)(x, y)
+
@markDynamoStrictTest
class TestTransformFailure(TestCase):
@skipIfTorchDynamo()
- @parametrize('transform', ['vmap', 'grad', 'grad_and_value', 'vjp', 'jvp', 'jacrev', 'jacfwd'])
+ @parametrize(
+ "transform",
+ ["vmap", "grad", "grad_and_value", "vjp", "jvp", "jacrev", "jacfwd"],
+ )
def test_fails_with_autograd_function(self, device, transform):
- failed_build_envs = ('linux-focal-py3.8-clang10', 'linux-focal-py3.11-clang10')
- if (device == 'cpu' and transform in ['grad', 'vmap'] and
- TEST_WITH_TORCHDYNAMO and os.getenv('BUILD_ENVIRONMENT', '') in failed_build_envs):
- raise unittest.SkipTest("Unexpected successes on focal with dynamo," +
- " see https://github.com/pytorch/pytorch/issues/107173")
+ failed_build_envs = ("linux-focal-py3.8-clang10", "linux-focal-py3.11-clang10")
+ if (
+ device == "cpu"
+ and transform in ["grad", "vmap"]
+ and TEST_WITH_TORCHDYNAMO
+ and os.getenv("BUILD_ENVIRONMENT", "") in failed_build_envs
+ ):
+ raise unittest.SkipTest(
+ "Unexpected successes on focal with dynamo,"
+ + " see https://github.com/pytorch/pytorch/issues/107173"
+ )
class Test(torch.autograd.Function):
@staticmethod
@@ -5101,7 +5937,7 @@ class TestTransformFailure(TestCase):
return Test.apply(x)
if transform in (grad, grad_and_value):
- input = torch.tensor(4.)
+ input = torch.tensor(4.0)
else:
input = torch.randn(5)
@@ -5116,6 +5952,7 @@ class TestTransformFailure(TestCase):
with self.assertRaisesRegex(RuntimeError, "autograd.Function"):
transform(input)
+
@markDynamoStrictTest
class TestVmapDeviceType(Namespace.TestVmapBase):
def _vmap_test(self, *args, **kwargs):
@@ -5213,6 +6050,7 @@ class TestVmapDeviceType(Namespace.TestVmapBase):
check_vmap_fallback(self, test, torch._test_check_tensor)
+
@markDynamoStrictTest
class TestVmapNestedTensor(Namespace.TestVmapBase):
def _vmap_test(self, *args, **kwargs):
@@ -5222,12 +6060,15 @@ class TestVmapNestedTensor(Namespace.TestVmapBase):
# random ragged structure should be used
def _create_nt(self, dims, device):
sizes = [
- [d if d is not None else torch.randint(2, 10, size=(1,)).item() for d in dims[1:]]
+ [
+ d if d is not None else torch.randint(2, 10, size=(1,)).item()
+ for d in dims[1:]
+ ]
for d in range(dims[0])
]
- return torch.nested.nested_tensor([
- torch.randn(*size) for size in sizes
- ], device=device)
+ return torch.nested.nested_tensor(
+ [torch.randn(*size) for size in sizes], device=device
+ )
# Creates an NT matching another NT's number of components and
# shape / ragged structure for all dims specified to be -1.
@@ -5246,14 +6087,14 @@ class TestVmapNestedTensor(Namespace.TestVmapBase):
ret_size.append(d)
ret_sizes.append(ret_size)
- return torch.nested.nested_tensor([
- torch.randn(*size) for size in ret_sizes
- ], device=other.device)
+ return torch.nested.nested_tensor(
+ [torch.randn(*size) for size in ret_sizes], device=other.device
+ )
@allowVmapFallbackUsage
def test_fallback_unary(self, device):
def f(x):
- return x.sin() * 5. + 4.
+ return x.sin() * 5.0 + 4.0
nt = self._create_nt([4, None, 3], device=device)
self._vmap_test(f, (nt,))
@@ -5332,7 +6173,8 @@ class TestVmapNestedTensor(Namespace.TestVmapBase):
x = self._create_nt([3, None, 2], device=device)
with self.assertRaisesRegex(
- RuntimeError, "Nested tensors can only be vmapped over dim=0"):
+ RuntimeError, "Nested tensors can only be vmapped over dim=0"
+ ):
vmap(f, in_dims=2)(x)
def test_nt_with_nonzero_out_dim_raises(self, device):
@@ -5341,7 +6183,8 @@ class TestVmapNestedTensor(Namespace.TestVmapBase):
x = self._create_nt([3, None, 2], device=device)
with self.assertRaisesRegex(
- RuntimeError, "Nested tensors can only be vmapped over dim=0"):
+ RuntimeError, "Nested tensors can only be vmapped over dim=0"
+ ):
vmap(f, out_dims=2)(x)
def test_fallback_with_nt_and_batched_dense_with_nonzero_bdim_raises(self, device):
@@ -5353,22 +6196,27 @@ class TestVmapNestedTensor(Namespace.TestVmapBase):
with self.assertRaisesRegex(
RuntimeError,
- "Fallback not supported for mixed nested / non-nested arguments without bdim=0"
+ "Fallback not supported for mixed nested / non-nested arguments without bdim=0",
):
vmap(f, in_dims=(0, 1))(x, y)
def test_multilevel_vmap_raises(self, device):
def f(x):
- return x.sin() * 4. + 3.
+ return x.sin() * 4.0 + 3.0
x = self._create_nt([2, 2, 2, None], device=device)
- with self.assertRaisesRegex(RuntimeError, "Only one level of vmap is supported"):
+ with self.assertRaisesRegex(
+ RuntimeError, "Only one level of vmap is supported"
+ ):
vmap(vmap(f))(x)
- with self.assertRaisesRegex(RuntimeError, "Only one level of vmap is supported"):
+ with self.assertRaisesRegex(
+ RuntimeError, "Only one level of vmap is supported"
+ ):
vmap(vmap(vmap(f)))(x)
+
only_for = ("cpu", "cuda")
instantiate_device_type_tests(TestVmapOperatorsOpInfo, globals(), only_for=only_for)
@@ -5382,5 +6230,5 @@ instantiate_device_type_tests(TestRandomness, globals(), only_for=only_for)
instantiate_device_type_tests(TestVmapDeviceType, globals(), only_for=only_for)
instantiate_device_type_tests(TestVmapNestedTensor, globals(), only_for=only_for)
-if __name__ == '__main__':
+if __name__ == "__main__":
run_tests()
diff --git a/test/functorch/test_vmap_registrations.py b/test/functorch/test_vmap_registrations.py
index 4952f2745b..967152945a 100644
--- a/test/functorch/test_vmap_registrations.py
+++ b/test/functorch/test_vmap_registrations.py
@@ -2,16 +2,16 @@
import typing
import unittest
+from torch._C import (
+ _dispatch_get_registrations_for_dispatch_key as get_registrations_for_dispatch_key,
+)
+
from torch.testing._internal.common_utils import (
- TestCase,
- run_tests,
instantiate_parametrized_tests,
parametrize,
- subtest
-)
-
-from torch._C import (
- _dispatch_get_registrations_for_dispatch_key as get_registrations_for_dispatch_key,
+ run_tests,
+ subtest,
+ TestCase,
)
xfail_functorch_batched = {
@@ -226,17 +226,21 @@ xfail_not_implemented = {
"aten::var_mean.correction_names",
"aten::var_mean.names_dim",
"aten::where",
-
}
def dispatch_registrations(
- dispatch_key: str, xfails: set, filter_func: typing.Callable = lambda reg: True):
+ dispatch_key: str, xfails: set, filter_func: typing.Callable = lambda reg: True
+):
registrations = sorted(get_registrations_for_dispatch_key(dispatch_key))
subtests = [
- subtest(reg, name=f"[{reg}]",
- decorators=([unittest.expectedFailure] if reg in xfails else []))
- for reg in registrations if filter_func(reg)
+ subtest(
+ reg,
+ name=f"[{reg}]",
+ decorators=([unittest.expectedFailure] if reg in xfails else []),
+ )
+ for reg in registrations
+ if filter_func(reg)
]
return parametrize("registration", subtests)
@@ -262,17 +266,17 @@ def filter_vmap_implementable(reg):
return False
if reg.endswith("_out"):
return False
- if '.dimname' in reg:
+ if ".dimname" in reg:
return False
if "_dimname" in reg:
return False
- if 'fbgemm' in reg:
+ if "fbgemm" in reg:
return False
- if 'quantize' in reg:
+ if "quantize" in reg:
return False
- if 'sparse' in reg:
+ if "sparse" in reg:
return False
- if '::is_' in reg:
+ if "::is_" in reg:
return False
return True
diff --git a/test/functorch/xfail_suggester.py b/test/functorch/xfail_suggester.py
index cfe1460a01..5de2e0e3d8 100644
--- a/test/functorch/xfail_suggester.py
+++ b/test/functorch/xfail_suggester.py
@@ -1,4 +1,5 @@
import re
+
import torch
"""
@@ -8,11 +9,11 @@ Instructions:
2. python test/xfail_suggester.py
"""
-with open('result.txt') as f:
+with open("result.txt") as f:
lines = f.readlines()
-failed = [line for line in lines if line.startswith('FAILED')]
-p = re.compile('FAILED test/test_\w+.py::\w+::(\S+)') # noqa: W605
+failed = [line for line in lines if line.startswith("FAILED")]
+p = re.compile("FAILED test/test_\w+.py::\w+::(\S+)") # noqa: W605
def get_failed_test(line):
@@ -23,22 +24,22 @@ def get_failed_test(line):
base_names = {
- 'test_grad_',
- 'test_vjp_',
- 'test_vmapvjp_',
- 'test_vmapvjp_has_batch_rule_',
- 'test_vjpvmap_',
- 'test_jvp_',
- 'test_vmapjvp_',
- 'test_vmapjvpall_has_batch_rule_',
- 'test_vmapjvpall_',
- 'test_jvpvjp_',
- 'test_vjpvjp_',
- 'test_decomposition_',
- 'test_make_fx_exhaustive_',
- 'test_vmap_exhaustive_',
- 'test_op_has_batch_rule_',
- 'test_vmap_autograd_grad_',
+ "test_grad_",
+ "test_vjp_",
+ "test_vmapvjp_",
+ "test_vmapvjp_has_batch_rule_",
+ "test_vjpvmap_",
+ "test_jvp_",
+ "test_vmapjvp_",
+ "test_vmapjvpall_has_batch_rule_",
+ "test_vmapjvpall_",
+ "test_jvpvjp_",
+ "test_vjpvjp_",
+ "test_decomposition_",
+ "test_make_fx_exhaustive_",
+ "test_vmap_exhaustive_",
+ "test_op_has_batch_rule_",
+ "test_vmap_autograd_grad_",
}
failed_tests = [get_failed_test(line) for line in lines]
@@ -49,7 +50,7 @@ suggested_xfails = {}
def remove_device_dtype(test):
- return '_'.join(test.split('_')[:-2])
+ return "_".join(test.split("_")[:-2])
def belongs_to_base(test, base):
@@ -64,23 +65,23 @@ def belongs_to_base(test, base):
def parse_namespace(base):
mappings = {
- 'nn_functional_': 'nn.functional',
- 'fft_': 'fft',
- 'linalg_': 'linalg',
- '_masked_': '_masked',
- 'sparse_': 'sparse',
- 'special_': 'special',
+ "nn_functional_": "nn.functional",
+ "fft_": "fft",
+ "linalg_": "linalg",
+ "_masked_": "_masked",
+ "sparse_": "sparse",
+ "special_": "special",
}
for heading in mappings.keys():
if base.startswith(heading):
- return mappings[heading], base[len(heading):]
+ return mappings[heading], base[len(heading) :]
return None, base
def get_torch_module(namespace):
if namespace is None:
return torch
- if namespace == 'nn.functional':
+ if namespace == "nn.functional":
return torch.nn.functional
return getattr(torch, namespace)
@@ -92,11 +93,11 @@ def parse_base(base):
apis = sorted(apis, key=lambda x: -len(x))
api = rest
- variant = ''
+ variant = ""
for candidate in apis:
if rest.startswith(candidate):
api = candidate
- variant = rest[len(candidate) + 1:]
+ variant = rest[len(candidate) + 1 :]
break
print(base, namespace, api, variant)
return namespace, api, variant
@@ -111,19 +112,18 @@ def any_starts_with(strs, thing):
def get_suggested_xfails(base, tests):
result = []
- tests = [test[len(base):] for test in tests if
- belongs_to_base(test, base)]
+ tests = [test[len(base) :] for test in tests if belongs_to_base(test, base)]
base_tests = {remove_device_dtype(test) for test in tests}
tests = set(tests)
for base in base_tests:
- cpu_variant = base + '_cpu_float32'
- cuda_variant = base + '_cuda_float32'
+ cpu_variant = base + "_cpu_float32"
+ cuda_variant = base + "_cuda_float32"
namespace, api, variant = parse_base(base)
if namespace is None:
api = api
else:
- api = f'{namespace}.{api}'
+ api = f"{namespace}.{api}"
if cpu_variant in tests and cuda_variant in tests:
result.append(f"xfail('{api}', '{variant}'),")
continue
@@ -139,7 +139,7 @@ def get_suggested_xfails(base, tests):
result = {base: get_suggested_xfails(base, failed_tests) for base in base_names}
for k, v in result.items():
- print('=' * 50)
+ print("=" * 50)
print(k)
- print('=' * 50)
- print('\n'.join(v))
+ print("=" * 50)
+ print("\n".join(v))
|
2.41.0
|
cd7a7aa8e0942da627095b23b94dc89f5a54943
|
Mon, 15 Apr 2024 10:50:54 +0000
|
[PATCH 0150/1000] [xla hash update] update the pinned xla hash (#124042)
|
This PR is auto-generated nightly by [this action](https://github.com/pytorch/pytorch/blob/main/.github/workflows/nightly.yml). Update the pinned xla hash. Pull Request resolved: https://github.com/pytorch/pytorch/pull/124042 Approved by: https://github.com/pytorchbot
|
diff --git a/.github/ci_commit_pins/xla.txt b/.github/ci_commit_pins/xla.txt
index 4b92ed5afe..259a97684d 100644
--- a/.github/ci_commit_pins/xla.txt
+++ b/.github/ci_commit_pins/xla.txt
@@ -1 +1 @@
-5c48be19e6ded305bb524b3d1231fd4ce4d46208
+58a412cb271a3f98ae2e01fd1d24bdbb66645d4e
|
2.41.0
|
f9a70723374acd26ee2ce1fb5b37d513c8e8a17
|
Sun, 14 Apr 2024 23:32:09 -0400
|
[PATCH 0151/1000] Use uv in lintrunner init when it is available. (#124033)
|
Before, a no-op lintrunner init takes 12s. After, it takes 1s; a full order of magnitude improvement. Signed-off-by: Edward Z. Yang <ezyang@meta.com> Pull Request resolved: https://github.com/pytorch/pytorch/pull/124033 Approved by: https://github.com/cyyever, https://github.com/Skylion007
|
diff --git a/tools/linter/adapters/pip_init.py b/tools/linter/adapters/pip_init.py
index f177a920d0..0c1551f5e3 100644
--- a/tools/linter/adapters/pip_init.py
+++ b/tools/linter/adapters/pip_init.py
@@ -4,6 +4,7 @@ Initializer script that installs stuff to pip.
import argparse
import logging
import os
+import shutil
import subprocess
import sys
import time
@@ -50,7 +51,12 @@ if __name__ == "__main__":
stream=sys.stderr,
)
- pip_args = ["pip3", "install"]
+ uv_available = shutil.which("uv") is not None
+
+ if uv_available:
+ pip_args = ["uv", "pip", "install"]
+ else:
+ pip_args = ["pip", "install"]
# If we are in a global install, use `--user` to install so that you do not
# need root access in order to initialize linters.
|
2.41.0
|
ea1b99d89204989db64d0d63f5e46fce60d1962
|
Mon, 15 Apr 2024 15:36:55 +0000
|
[PATCH 0152/1000] Remove warning from LazyModuleMixin constructor. (#123968)
|
Remove warning from `LazyModuleMixin` about lazy modules being a new feature under heavy development. The last nontrivial change to the code happened more than three years ago. Fixes #123928 Pull Request resolved: https://github.com/pytorch/pytorch/pull/123968 Approved by: https://github.com/mikaylagawarecki
|
diff --git a/torch/nn/modules/lazy.py b/torch/nn/modules/lazy.py
index 52784ae511..c4b7459c4a 100644
--- a/torch/nn/modules/lazy.py
+++ b/torch/nn/modules/lazy.py
@@ -1,5 +1,4 @@
import itertools
-import warnings
from typing import Protocol, Optional, Type, Any
import torch
@@ -178,8 +177,6 @@ class LazyModuleMixin:
super().__init__(*args, **kwargs) # type: ignore[misc]
self._load_hook = self._register_load_state_dict_pre_hook(self._lazy_load_hook)
self._initialize_hook = self.register_forward_pre_hook(self._infer_parameters, with_kwargs=True)
- warnings.warn('Lazy modules are a new feature under heavy development '
- 'so changes to the API or functionality can happen at any moment.')
def _save_to_state_dict(self: _LazyProtocol, destination, prefix, keep_vars):
# This should be ideally implemented as a hook,
|
2.41.0
|
c4fc5fa348eec9a93ec97f95971279797dfbf6f
|
Mon, 15 Apr 2024 16:51:40 +0000
|
[PATCH 0153/1000] [BE][Ez]: Fix minor potential perf regression from #123960 (#124013)
|
The `non_blocking` arg here is useless if the values are all eagerly consumed, so revert the change. Pull Request resolved: https://github.com/pytorch/pytorch/pull/124013 Approved by: https://github.com/ezyang
|
diff --git a/torch/amp/grad_scaler.py b/torch/amp/grad_scaler.py
index f2fae37142..a72c6246c9 100644
--- a/torch/amp/grad_scaler.py
+++ b/torch/amp/grad_scaler.py
@@ -426,8 +426,10 @@ class GradScaler:
found_inf = cast(
torch.Tensor,
sum(
- t.to(scaler.device, non_blocking=True)
- for t in optimizer_state["found_inf_per_device"].values()
+ [ # noqa: C419
+ t.to(scaler.device, non_blocking=True)
+ for t in optimizer_state["found_inf_per_device"].values()
+ ]
),
)
optimizer.grad_scale = ( # type: ignore[attr-defined]
|
2.41.0
|
3dcb5b0f2ef3578e81841fd8a2166e732c0ca99
|
Fri, 12 Apr 2024 06:23:04 -0700
|
[PATCH 0154/1000] make sure dynamo doesn't inline DTensor __new__ or __torch_dispatch__ (#123347)
|
Fixes https://github.com/pytorch/pytorch/issues/122459, https://github.com/pytorch/torchtrain/issues/61 Even with the previous PR ("support DTensor/subclass constructors directly in the graph"), I still see some errors when running the repro above that start some logs showing that dynamo is inlining `__new__`. I noticed that putting `@torch._dynamo.disable` on DTensor's `__new__` makes the entire repro pass. Why does having dynamo try to inline `Subclass.__new__` run into problems? Morally, dynamo probably shouldn't be inlining __new__ ("creating a subclass" is a blackbox operation that AOTAutograd can trace through anyway). But concretely, we can end up with a node in the dynamo FX graph that has a "partially initialized tensor subclass" as its example value, because the subclass has been created but its fields have not been assigned to yet. This breaks a bunch of invariants throughout dynamo: there are many places where if we have a tensor subclass node, we want to look at its inner tensors, to see if they are FakeTensors, what their FakeTensorMode is, and if they have dynamic shapes. One option is to decide that "uninitialized subclass" is a first-class thing that anyone looking at the FX node examples values on the dynamo graph needs to handle, but this seems like a lot of work when in reality we don't need dynamo to trace the __new__ at all. Hence the `torch._dynamo.disable`. I still wasn't very satisfied, since it was unclear to me **why** dynamo was inlining the `__new__` call, instead of interposing on the `DTensor()` constructor directly. After a long chat with @anijain2305, he explained that with code like this: ``` @torch._dynamo.disable(recursive=False) def f(x): out = SubclassConstructor(x) ``` Dynamo will never get the chance to interpose on the subclass constructor. Instead, what will happen is: (1) Dynamo hands back control to cpython to run `f()`, since we disabled that frame (2) `SubclassConstructor(x)` is run in eager mode (3) `SubclassConstructor(x)` eventually calls `SubclassConstructor__new__` (4) this is a new frame, that cpython then allows dynamo to intercept and start compiling So it looks like we are basically forced to handle the situation where dynamo might directly start compiling `Subclass.__new__` All of the above does not explain the story for `__torch_dispatch__` though. Empirically, I have a repro in torchtrain where looking at the dynamo logs, we see dynamo try to inline `__torch_dispatch__`. ``` [rank0]:DEBUG: Skipping frame because no content in function call _prepare_output_fn /data/users/hirsheybar/b/pytorch/torch/distributed/tensor/parallel/style.py 318 [rank0]:DEBUG: torchdynamo start compiling __torch_dispatch__ /data/users/hirsheybar/b/pytorch/torch/distributed/_tensor/api.py:297, stack (elided 5 frames): ``` I haven't been able to create a smaller repro of the problem (even using `_dynamo.disable(recursive=False)`), although in theory, if there is a `torch.*` op that you were to inline (where one of the inputs is a subclass), the next frame would likely be `__torch_dispatch__`. Dynamo always treats `torch.*` operations as not-inlinable though, so in theory we shouldn't ever see dynamo inline `__torch_dispatch__`, but a `_dynamo.disable()` fixes the problem. I asked Animesh if we can have dynamo automatically apply this behavior to subclasses instead of needing it to be added explicitly. He pointed out that for `disable(recursive=False)`, we can't really do this within dynamo Pull Request resolved: https://github.com/pytorch/pytorch/pull/123347 Approved by: https://github.com/zou3519 ghstack dependencies: #122502, #122751, #123348
|
diff --git a/test/distributed/_tensor/test_dtensor_compile.py b/test/distributed/_tensor/test_dtensor_compile.py
index f9ad0278d7..5f98050e83 100644
--- a/test/distributed/_tensor/test_dtensor_compile.py
+++ b/test/distributed/_tensor/test_dtensor_compile.py
@@ -191,6 +191,47 @@ class TestDTensorCompile(torch._dynamo.test_case.TestCase):
res = opt_fn(x)
self.assertEqual(res, ref)
+ def test_dtensor_constructor_w_graph_break(self):
+ mesh = DeviceMesh(self.device_type, torch.arange(self.world_size))
+
+ # test passing in DTensor as inputs/outputs and run some tensor computation
+ def fn(x):
+ print("graph break!")
+ return DTensor(
+ x,
+ mesh,
+ (Replicate(), Shard(0)),
+ shape=[128, 32],
+ dtype=x.dtype,
+ requires_grad=x.requires_grad,
+ stride=[32, 1],
+ )
+
+ x = torch.randn(64, 32, requires_grad=True)
+ out = fn(x)
+ out2 = torch.compile(fn, backend="eager")(x)
+
+ def test_dtensor_constructor_w_dynamo_disable(self):
+ mesh = DeviceMesh(self.device_type, torch.arange(self.world_size))
+
+ @torch._dynamo.disable(recursive=False)
+ def fn(x):
+ print("foo")
+ return DTensor(
+ x,
+ mesh,
+ (Replicate(),),
+ shape=torch.Size([32]),
+ dtype=x.dtype,
+ requires_grad=x.requires_grad,
+ stride=(1,),
+ )
+
+ x = torch.randn(32, requires_grad=True)
+ out = fn(x)
+ out2 = torch.compile(fn, backend="eager")(x)
+ self.assertEqual(out, out2)
+
def test_dtensor_noncontiguous_output(self):
mesh = DeviceMesh(self.device_type, torch.arange(self.world_size))
diff --git a/torch/distributed/_tensor/api.py b/torch/distributed/_tensor/api.py
index 00079ef946..9ef008156c 100644
--- a/torch/distributed/_tensor/api.py
+++ b/torch/distributed/_tensor/api.py
@@ -198,6 +198,7 @@ class DTensor(torch.Tensor): # pyre-ignore[13]: pyre is bad at __new__
_op_dispatcher: op_dispatch.OpDispatcher = op_dispatch.OpDispatcher()
@staticmethod
+ @torch._dynamo.disable
def __new__(
cls,
local_tensor: torch.Tensor,
@@ -288,6 +289,7 @@ class DTensor(torch.Tensor): # pyre-ignore[13]: pyre is bad at __new__
)
@classmethod
+ @torch._dynamo.disable
# pyre-fixme[3]: Return type must be annotated.
# pyre-fixme[2]: Parameter must be annotated.
def __torch_dispatch__(cls, func, types, args=(), kwargs=None):
|
2.41.0
|
ce29f1416f6a3852dc10bfe9326afc6cca4c2f0
|
Mon, 15 Apr 2024 17:46:54 +0000
|
[PATCH 0155/1000] Enable UFMT on `test/onnx_caffe2`, `test/optim`, `test/package` and `test/profiler` (#123901)
|
Part of: #123062 Ran lintrunner on: - `test/onnx_caffe2` - `test/optim` - `test/package` - `test/profiler` Pull Request resolved: https://github.com/pytorch/pytorch/pull/123901 Approved by: https://github.com/ezyang
|
diff --git a/.lintrunner.toml b/.lintrunner.toml
index e223d1a069..73e225e9ee 100644
--- a/.lintrunner.toml
+++ b/.lintrunner.toml
@@ -1256,65 +1256,6 @@ exclude_patterns = [
'test/nn/test_parametrization.py',
'test/nn/test_pooling.py',
'test/nn/test_pruning.py',
- 'test/onnx_caffe2/export_onnx_tests_filter.py',
- 'test/onnx_caffe2/export_onnx_tests_generator.py',
- 'test/onnx_caffe2/test_caffe2_common.py',
- 'test/onnx_caffe2/test_custom_ops.py',
- 'test/onnx_caffe2/test_pytorch_helper.py',
- 'test/onnx_caffe2/test_pytorch_onnx_caffe2.py',
- 'test/onnx_caffe2/test_pytorch_onnx_caffe2_quantized.py',
- 'test/onnx_caffe2/test_verify.py',
- 'test/optim/test_lrscheduler.py',
- 'test/optim/test_optim.py',
- 'test/optim/test_swa_utils.py',
- 'test/package/__init__.py',
- 'test/package/common.py',
- 'test/package/generate_bc_packages.py',
- 'test/package/module_a.py',
- 'test/package/module_a_remapped_path.py',
- 'test/package/package_a/__init__.py',
- 'test/package/package_a/fake_interface.py',
- 'test/package/package_a/fake_script_class.py',
- 'test/package/package_a/long_name.py',
- 'test/package/package_a/std_sys_module_hacks.py',
- 'test/package/package_a/subpackage.py',
- 'test/package/package_a/test_all_leaf_modules_tracer.py',
- 'test/package/package_a/test_module.py',
- 'test/package/package_a/test_nn_module.py',
- 'test/package/package_a/use_dunder_package.py',
- 'test/package/package_a/use_torch_package_importer.py',
- 'test/package/package_b/__init__.py',
- 'test/package/package_b/subpackage_0/__init__.py',
- 'test/package/package_b/subpackage_0/subsubpackage_0/__init__.py',
- 'test/package/package_b/subpackage_1.py',
- 'test/package/package_b/subpackage_2.py',
- 'test/package/package_c/__init__.py',
- 'test/package/package_c/test_module.py',
- 'test/package/package_d/__init__.py',
- 'test/package/package_d/imports_directly.py',
- 'test/package/package_d/imports_indirectly.py',
- 'test/package/package_d/subpackage_0/__init__.py',
- 'test/package/package_d/subpackage_0/subsubpackage_0/__init__.py',
- 'test/package/test_analyze.py',
- 'test/package/test_dependency_api.py',
- 'test/package/test_dependency_hooks.py',
- 'test/package/test_digraph.py',
- 'test/package/test_directory_reader.py',
- 'test/package/test_glob_group.py',
- 'test/package/test_importer.py',
- 'test/package/test_load_bc_packages.py',
- 'test/package/test_mangling.py',
- 'test/package/test_misc.py',
- 'test/package/test_model.py',
- 'test/package/test_package_fx.py',
- 'test/package/test_package_script.py',
- 'test/package/test_repackage.py',
- 'test/package/test_resources.py',
- 'test/package/test_save_load.py',
- 'test/package/test_trace_dep/__init__.py',
- 'test/profiler/test_memory_profiler.py',
- 'test/profiler/test_profiler.py',
- 'test/profiler/test_profiler_tree.py',
'test/quantization/__init__.py',
'test/quantization/core/__init__.py',
'test/quantization/core/experimental/apot_fx_graph_mode_ptq.py',
diff --git a/test/onnx_caffe2/test_pytorch_onnx_caffe2.py b/test/onnx_caffe2/test_pytorch_onnx_caffe2.py
index a3b0d0656e..a3254995a2 100644
--- a/test/onnx_caffe2/test_pytorch_onnx_caffe2.py
+++ b/test/onnx_caffe2/test_pytorch_onnx_caffe2.py
@@ -3071,7 +3071,6 @@ def setup_rnn_tests():
variable_length_opts,
dropout_opts,
):
-
for base, name, extra_kwargs in (
("elman", "elman_relu", {"nonlinearity": "relu"}),
("elman", "elman_tanh", {"nonlinearity": "tanh"}),
diff --git a/test/optim/test_lrscheduler.py b/test/optim/test_lrscheduler.py
index 7b5687a8a7..cf455f6a6e 100644
--- a/test/optim/test_lrscheduler.py
+++ b/test/optim/test_lrscheduler.py
@@ -1,40 +1,40 @@
# Owner(s): ["module: optimizer", "module: LrScheduler" ]
-import types
-import warnings
import math
import pickle
+import types
+import warnings
from functools import partial
import torch
import torch.nn.functional as F
from torch.nn import Parameter
-from torch.optim import Adam, SGD, Rprop
+from torch.optim import Adam, Rprop, SGD
from torch.optim.lr_scheduler import (
- LambdaLR,
- MultiplicativeLR,
- SequentialLR,
- StepLR,
- MultiStepLR,
+ ChainedScheduler,
ConstantLR,
- LinearLR,
- ExponentialLR,
CosineAnnealingLR,
- ReduceLROnPlateau,
- LRScheduler,
- CyclicLR,
CosineAnnealingWarmRestarts,
+ CyclicLR,
+ EPOCH_DEPRECATION_WARNING,
+ ExponentialLR,
+ LambdaLR,
+ LinearLR,
+ LRScheduler,
+ MultiplicativeLR,
+ MultiStepLR,
OneCycleLR,
- ChainedScheduler,
PolynomialLR,
- EPOCH_DEPRECATION_WARNING,
+ ReduceLROnPlateau,
+ SequentialLR,
+ StepLR,
)
from torch.optim.swa_utils import SWALR
from torch.testing._internal.common_utils import (
- TestCase,
+ instantiate_parametrized_tests,
load_tests,
parametrize,
- instantiate_parametrized_tests,
- skipIfTorchDynamo
+ skipIfTorchDynamo,
+ TestCase,
)
# load_tests from common_utils is used to automatically filter tests for
@@ -52,7 +52,6 @@ class TestLRScheduler(TestCase):
def forward(self, x):
return self.conv2(F.relu(self.conv1(x)))
-
class LambdaLRTestObject:
def __init__(self, value):
self.value = value
@@ -65,6 +64,7 @@ class TestLRScheduler(TestCase):
return self.__dict__ == other.__dict__
else:
return False
+
exact_dtype = True
def setUp(self):
@@ -112,7 +112,9 @@ class TestLRScheduler(TestCase):
with self.assertRaises(TypeError):
scheduler = MultiStepLR(optimizer, gamma=1, milestones=[10, 20])
- @skipIfTorchDynamo("Torchdynamo keeps references to optim in the guards and the stack of the graph break frames")
+ @skipIfTorchDynamo(
+ "Torchdynamo keeps references to optim in the guards and the stack of the graph break frames"
+ )
def test_no_cyclic_references(self):
import gc
@@ -132,7 +134,9 @@ class TestLRScheduler(TestCase):
gc.collect(), 0, msg="Optimizer should be garbage-collected on __del__"
)
- @skipIfTorchDynamo("Torchdynamo keeps references to optim in the guards and the stack of the graph break frames")
+ @skipIfTorchDynamo(
+ "Torchdynamo keeps references to optim in the guards and the stack of the graph break frames"
+ )
def test_no_cyclic_references_in_step(self):
import gc
import weakref
@@ -347,9 +351,7 @@ class TestLRScheduler(TestCase):
from torch.nn import Parameter
epochs = 10
- optimizer = SGD(
- [Parameter(torch.randn(2, 2, requires_grad=True))], 0.1
- )
+ optimizer = SGD([Parameter(torch.randn(2, 2, requires_grad=True))], 0.1)
targets = [[0.1] * 3 + [0.01] * 3 + [0.001] * 3 + [0.0001]]
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, 3, gamma=0.1)
self._test_get_last_lr(scheduler, targets, epochs)
@@ -692,7 +694,9 @@ class TestLRScheduler(TestCase):
scheduler = ReduceLROnPlateau(
self.opt,
)
- self.assertEqual(scheduler.get_last_lr(), [0.5 for param_group in self.opt.param_groups])
+ self.assertEqual(
+ scheduler.get_last_lr(), [0.5 for param_group in self.opt.param_groups]
+ )
def test_sequentiallr1(self):
epochs = 19
@@ -1555,7 +1559,9 @@ class TestLRScheduler(TestCase):
def scale_fn(_):
return 0.5
- scheduler = CyclicLR(adam_opt, base_lr=1, max_lr=5, cycle_momentum=False, scale_fn=scale_fn)
+ scheduler = CyclicLR(
+ adam_opt, base_lr=1, max_lr=5, cycle_momentum=False, scale_fn=scale_fn
+ )
state = scheduler.state_dict()
self.assertNotIn("_scale_fn_ref", state)
self.assertIs(state["_scale_fn_custom"], None)
@@ -1571,7 +1577,9 @@ class TestLRScheduler(TestCase):
scale_fn = ScaleFn()
- scheduler = CyclicLR(adam_opt, base_lr=1, max_lr=5, cycle_momentum=False, scale_fn=scale_fn)
+ scheduler = CyclicLR(
+ adam_opt, base_lr=1, max_lr=5, cycle_momentum=False, scale_fn=scale_fn
+ )
state = scheduler.state_dict()
self.assertNotIn("_scale_fn_ref", state)
self.assertEqual(state["_scale_fn_custom"], scale_fn.__dict__)
@@ -1581,11 +1589,17 @@ class TestLRScheduler(TestCase):
adam_opt = Adam(self.net.parameters())
# Case 1: Built-in mode
- scheduler = CyclicLR(adam_opt, base_lr=1, max_lr=5, cycle_momentum=False, mode="triangular2")
- restored_scheduler = CyclicLR(adam_opt, base_lr=1, max_lr=5, cycle_momentum=False)
+ scheduler = CyclicLR(
+ adam_opt, base_lr=1, max_lr=5, cycle_momentum=False, mode="triangular2"
+ )
+ restored_scheduler = CyclicLR(
+ adam_opt, base_lr=1, max_lr=5, cycle_momentum=False
+ )
restored_scheduler.load_state_dict(scheduler.state_dict())
self.assertTrue(restored_scheduler.mode == scheduler.mode == "triangular2")
- self.assertIsNotNone(restored_scheduler._scale_fn_ref) and self.assertIsNotNone(scheduler._scale_fn_ref)
+ self.assertIsNotNone(restored_scheduler._scale_fn_ref) and self.assertIsNotNone(
+ scheduler._scale_fn_ref
+ )
self.assertIs(restored_scheduler._scale_fn_custom, None)
self.assertIs(scheduler._scale_fn_custom, None)
@@ -1593,8 +1607,12 @@ class TestLRScheduler(TestCase):
def scale_fn(_):
return 0.5
- scheduler = CyclicLR(adam_opt, base_lr=1, max_lr=5, cycle_momentum=False, scale_fn=scale_fn)
- restored_scheduler = CyclicLR(adam_opt, base_lr=1, max_lr=5, cycle_momentum=False, scale_fn=scale_fn)
+ scheduler = CyclicLR(
+ adam_opt, base_lr=1, max_lr=5, cycle_momentum=False, scale_fn=scale_fn
+ )
+ restored_scheduler = CyclicLR(
+ adam_opt, base_lr=1, max_lr=5, cycle_momentum=False, scale_fn=scale_fn
+ )
restored_scheduler.load_state_dict(scheduler.state_dict())
self.assertIs(scheduler._scale_fn_custom, scale_fn)
self.assertIs(restored_scheduler._scale_fn_custom, scale_fn)
@@ -2253,30 +2271,40 @@ class TestLRScheduler(TestCase):
self.assertLessEqual(last_lr, max_lr)
-
- @parametrize("LRClass", [
- partial(LambdaLR, lr_lambda=lambda e: e // 10),
- partial(MultiplicativeLR, lr_lambda=lambda: 0.95),
- partial(StepLR, step_size=30),
- partial(MultiStepLR, milestones=[30, 80]),
- ConstantLR,
- LinearLR,
- partial(ExponentialLR, gamma=0.9),
- lambda opt, **kwargs: SequentialLR(
- opt, schedulers=[ConstantLR(opt), ConstantLR(opt)], milestones=[2], **kwargs),
- PolynomialLR,
- partial(CosineAnnealingLR, T_max=10),
- ReduceLROnPlateau,
- partial(CyclicLR, base_lr=0.01, max_lr=0.1),
- partial(CosineAnnealingWarmRestarts, T_0=20),
- partial(OneCycleLR, max_lr=0.01, total_steps=10),
- ])
+ @parametrize(
+ "LRClass",
+ [
+ partial(LambdaLR, lr_lambda=lambda e: e // 10),
+ partial(MultiplicativeLR, lr_lambda=lambda: 0.95),
+ partial(StepLR, step_size=30),
+ partial(MultiStepLR, milestones=[30, 80]),
+ ConstantLR,
+ LinearLR,
+ partial(ExponentialLR, gamma=0.9),
+ lambda opt, **kwargs: SequentialLR(
+ opt,
+ schedulers=[ConstantLR(opt), ConstantLR(opt)],
+ milestones=[2],
+ **kwargs,
+ ),
+ PolynomialLR,
+ partial(CosineAnnealingLR, T_max=10),
+ ReduceLROnPlateau,
+ partial(CyclicLR, base_lr=0.01, max_lr=0.1),
+ partial(CosineAnnealingWarmRestarts, T_0=20),
+ partial(OneCycleLR, max_lr=0.01, total_steps=10),
+ ],
+ )
def test_lr_scheduler_verbose_deprecation_warning(self, LRClass):
"""Check that a deprecating warning with verbose parameter."""
- with self.assertWarnsOnceRegex(UserWarning, "The verbose parameter is deprecated"):
+ with self.assertWarnsOnceRegex(
+ UserWarning, "The verbose parameter is deprecated"
+ ):
LRClass(self.opt, verbose=True)
- with self.assertWarnsOnceRegex(UserWarning, "The verbose parameter is deprecated"):
+ with self.assertWarnsOnceRegex(
+ UserWarning, "The verbose parameter is deprecated"
+ ):
LRClass(self.opt, verbose=False)
# No warning is raised when verbose is the default value.
diff --git a/test/optim/test_optim.py b/test/optim/test_optim.py
index 5f87f9b856..7738f0da41 100644
--- a/test/optim/test_optim.py
+++ b/test/optim/test_optim.py
@@ -2,17 +2,26 @@
import torch
from torch.optim import (
- Adadelta, Adagrad, Adam, Adamax, AdamW, ASGD, NAdam, RAdam, RMSprop, Rprop, SGD
+ Adadelta,
+ Adagrad,
+ Adam,
+ Adamax,
+ AdamW,
+ ASGD,
+ NAdam,
+ RAdam,
+ RMSprop,
+ Rprop,
+ SGD,
)
from torch.testing._internal.common_utils import (
- TestCase,
- load_tests,
gradcheck,
- skipIfTorchDynamo
+ load_tests,
+ skipIfTorchDynamo,
+ TestCase,
)
-
# load_tests from common_utils is used to automatically filter tests for
# sharding on sandcastle. This line silences flake warnings
load_tests = load_tests
@@ -41,7 +50,6 @@ def _diff_fn(p, grad, opt_differentiable_state, opt_class, kwargs, *ignored):
@skipIfTorchDynamo("Differentiable optimizers not supported")
class TestDifferentiableOptimizer(TestCase):
-
def test_sgd(self):
p = torch.rand(10, requires_grad=True, dtype=torch.float64)
grad = torch.rand(10, requires_grad=True, dtype=torch.float64)
@@ -59,7 +67,6 @@ class TestDifferentiableOptimizer(TestCase):
),
)
-
def test_adam(self):
state = {}
p = torch.rand(10, requires_grad=True, dtype=torch.float64)
@@ -85,7 +92,6 @@ class TestDifferentiableOptimizer(TestCase):
),
)
-
def test_rmsprop(self):
state = {}
p = torch.rand(10, requires_grad=True, dtype=torch.float64)
@@ -118,7 +124,6 @@ class TestDifferentiableOptimizer(TestCase):
),
)
-
def test_adadelta(self):
state = {}
p = torch.rand(10, requires_grad=True, dtype=torch.float64)
@@ -140,7 +145,6 @@ class TestDifferentiableOptimizer(TestCase):
),
)
-
def test_adagrad(self):
state = {}
p = torch.rand(10, requires_grad=True, dtype=torch.float64)
@@ -161,7 +165,6 @@ class TestDifferentiableOptimizer(TestCase):
),
)
-
def test_adamax(self):
state = {}
p = torch.rand(10, requires_grad=True, dtype=torch.float64)
@@ -183,9 +186,10 @@ class TestDifferentiableOptimizer(TestCase):
),
)
-
- @skipIfTorchDynamo("The inplace mu update fails with dynamo, "
- "since this is only happening when differentiable is enabled, skipping for now")
+ @skipIfTorchDynamo(
+ "The inplace mu update fails with dynamo, "
+ "since this is only happening when differentiable is enabled, skipping for now"
+ )
def test_asgd(self):
state = {}
p = torch.rand(10, requires_grad=True, dtype=torch.float64)
@@ -319,7 +323,12 @@ class TestDifferentiableOptimizer(TestCase):
grad,
state,
RAdam,
- {"lr": 0.9, "weight_decay": 0.1, "decoupled_weight_decay": True, "differentiable": True},
+ {
+ "lr": 0.9,
+ "weight_decay": 0.1,
+ "decoupled_weight_decay": True,
+ "differentiable": True,
+ },
*state.values(),
),
)
diff --git a/test/optim/test_swa_utils.py b/test/optim/test_swa_utils.py
index aaacbf3cb9..edb900c6c0 100644
--- a/test/optim/test_swa_utils.py
+++ b/test/optim/test_swa_utils.py
@@ -4,12 +4,17 @@ import itertools
import pickle
import torch
-from torch.optim.swa_utils import AveragedModel, update_bn, get_swa_multi_avg_fn, get_ema_multi_avg_fn
+from torch.optim.swa_utils import (
+ AveragedModel,
+ get_ema_multi_avg_fn,
+ get_swa_multi_avg_fn,
+ update_bn,
+)
from torch.testing._internal.common_utils import (
- TestCase,
+ instantiate_parametrized_tests,
load_tests,
parametrize,
- instantiate_parametrized_tests,
+ TestCase,
)
# load_tests from common_utils is used to automatically filter tests for
@@ -75,9 +80,13 @@ class TestSWAUtils(TestCase):
def _run_averaged_steps(self, dnn, swa_device, ema):
ema_decay = 0.999
if ema:
- averaged_dnn = AveragedModel(dnn, device=swa_device, multi_avg_fn=get_ema_multi_avg_fn(ema_decay))
+ averaged_dnn = AveragedModel(
+ dnn, device=swa_device, multi_avg_fn=get_ema_multi_avg_fn(ema_decay)
+ )
else:
- averaged_dnn = AveragedModel(dnn, device=swa_device, multi_avg_fn=get_swa_multi_avg_fn())
+ averaged_dnn = AveragedModel(
+ dnn, device=swa_device, multi_avg_fn=get_swa_multi_avg_fn()
+ )
averaged_params = [torch.zeros_like(param) for param in dnn.parameters()]
@@ -86,7 +95,11 @@ class TestSWAUtils(TestCase):
for p, p_avg in zip(dnn.parameters(), averaged_params):
p.detach().add_(torch.randn_like(p))
if ema:
- p_avg += p.detach() * ema_decay ** (n_updates - i - 1) * ((1 - ema_decay) if i > 0 else 1.0)
+ p_avg += (
+ p.detach()
+ * ema_decay ** (n_updates - i - 1)
+ * ((1 - ema_decay) if i > 0 else 1.0)
+ )
else:
p_avg += p.detach() / n_updates
averaged_dnn.update_parameters(dnn)
@@ -157,8 +170,11 @@ class TestSWAUtils(TestCase):
decay = 0.9
if use_multi_avg_fn:
- averaged_dnn = AveragedModel(dnn, multi_avg_fn=get_ema_multi_avg_fn(decay), use_buffers=use_buffers)
+ averaged_dnn = AveragedModel(
+ dnn, multi_avg_fn=get_ema_multi_avg_fn(decay), use_buffers=use_buffers
+ )
else:
+
def avg_fn(p_avg, p, n_avg):
return decay * p_avg + (1 - decay) * p
@@ -206,7 +222,6 @@ class TestSWAUtils(TestCase):
self.assertEqual(b_avg, b_swa)
def _test_update_bn(self, dnn, dl_x, dl_xy, cuda):
-
preactivation_sum = torch.zeros(dnn.n_features)
preactivation_squared_sum = torch.zeros(dnn.n_features)
if cuda:
diff --git a/test/package/package_a/long_name.py b/test/package/package_a/long_name.py
index 2b4a2af849..dd315223e8 100644
--- a/test/package/package_a/long_name.py
+++ b/test/package/package_a/long_name.py
@@ -1,6 +1,9 @@
def add_function(d):
# noqa: B950
- d.append(function_with_a_long_name_256charsplus_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx)
+ d.append(
+ function_with_a_long_name_256charsplus_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
+ )
+
def function_with_a_long_name_256charsplus_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx(): # noqa: B950
return 1337
diff --git a/test/package/package_d/imports_directly.py b/test/package/package_d/imports_directly.py
index 4bb96bf4c4..a7c93766d2 100644
--- a/test/package/package_d/imports_directly.py
+++ b/test/package/package_d/imports_directly.py
@@ -4,7 +4,6 @@ from .subpackage_0.subsubpackage_0 import important_string
class ImportsDirectlyFromSubSubPackage(torch.nn.Module):
-
key = important_string
def forward(self, inp):
diff --git a/test/package/package_d/imports_indirectly.py b/test/package/package_d/imports_indirectly.py
index e18434deaf..1ffd41f447 100644
--- a/test/package/package_d/imports_indirectly.py
+++ b/test/package/package_d/imports_indirectly.py
@@ -4,7 +4,6 @@ from .subpackage_0 import important_string
class ImportsIndirectlyFromSubPackage(torch.nn.Module):
-
key = important_string
def forward(self, inp):
diff --git a/test/package/test_directory_reader.py b/test/package/test_directory_reader.py
index 0f19b47895..f98289345d 100644
--- a/test/package/test_directory_reader.py
+++ b/test/package/test_directory_reader.py
@@ -44,7 +44,10 @@ class DirectoryReaderTest(PackageTestCase):
"""Tests use of DirectoryReader as accessor for opened packages."""
@skipIfNoTorchVision
- @skipIf(True, "Does not work with latest TorchVision, see https://github.com/pytorch/pytorch/issues/81115")
+ @skipIf(
+ True,
+ "Does not work with latest TorchVision, see https://github.com/pytorch/pytorch/issues/81115",
+ )
def test_loading_pickle(self):
"""
Test basic saving and loading of modules and pickles from a DirectoryReader.
diff --git a/test/package/test_load_bc_packages.py b/test/package/test_load_bc_packages.py
index af1c26cae9..527a65d190 100644
--- a/test/package/test_load_bc_packages.py
+++ b/test/package/test_load_bc_packages.py
@@ -32,7 +32,6 @@ class TestLoadBCPackages(PackageTestCase):
"Tests that use temporary files are disabled in fbcode",
)
def test_load_bc_packages_torchscript_module(self):
-
"""Tests for backwards compatible torchscript module"""
importer2 = PackageImporter(f"{packaging_directory}/test_torchscript_module.pt")
loaded2 = importer2.load_pickle("torchscript_module", "torchscript_module.pkl")
diff --git a/test/package/test_misc.py b/test/package/test_misc.py
index 5bc53f8b49..59b25ca2e6 100644
--- a/test/package/test_misc.py
+++ b/test/package/test_misc.py
@@ -11,7 +11,12 @@ from unittest import skipIf
from torch.package import is_from_package, PackageExporter, PackageImporter
from torch.package.package_exporter import PackagingError
-from torch.testing._internal.common_utils import IS_FBCODE, IS_SANDCASTLE, run_tests, skipIfTorchDynamo
+from torch.testing._internal.common_utils import (
+ IS_FBCODE,
+ IS_SANDCASTLE,
+ run_tests,
+ skipIfTorchDynamo,
+)
try:
from .common import PackageTestCase
@@ -118,7 +123,9 @@ class TestMisc(PackageTestCase):
def get_filename(self, name):
result = super().get_filename(name)
if name == "module_a":
- return os.path.join(os.path.dirname(result), "module_a_remapped_path.py")
+ return os.path.join(
+ os.path.dirname(result), "module_a_remapped_path.py"
+ )
else:
return result
@@ -139,7 +146,9 @@ class TestMisc(PackageTestCase):
if spec is not None:
break
assert spec is not None and isinstance(spec.loader, SourceFileLoader)
- spec.loader = LoaderThatRemapsModuleA(spec.loader.name, spec.loader.path)
+ spec.loader = LoaderThatRemapsModuleA(
+ spec.loader.name, spec.loader.path
+ )
return spec
sys.meta_path.insert(0, FinderThatRemapsModuleA())
@@ -154,7 +163,6 @@ class TestMisc(PackageTestCase):
he.intern("**")
he.save_module(module_a.__name__)
-
buffer.seek(0)
hi = PackageImporter(buffer)
self.assertTrue("remapped_path" in hi.get_source("module_a"))
diff --git a/test/package/test_model.py b/test/package/test_model.py
index 05da095411..65818a49d3 100644
--- a/test/package/test_model.py
+++ b/test/package/test_model.py
@@ -23,7 +23,10 @@ except ImportError:
from common import PackageTestCase
-@skipIf(True, "Does not work with recent torchvision, see https://github.com/pytorch/pytorch/issues/81115")
+@skipIf(
+ True,
+ "Does not work with recent torchvision, see https://github.com/pytorch/pytorch/issues/81115",
+)
@skipIfNoTorchVision
class ModelTest(PackageTestCase):
"""End-to-end tests packaging an entire model."""
@@ -88,7 +91,6 @@ class ModelTest(PackageTestCase):
@skipIfNoTorchVision
def test_model_save(self):
-
# This example shows how you might package a model
# so that the creator of the model has flexibility about
# how they want to save it but the 'server' can always
diff --git a/test/package/test_package_fx.py b/test/package/test_package_fx.py
index 2ff4b2efa6..93b599ef0c 100644
--- a/test/package/test_package_fx.py
+++ b/test/package/test_package_fx.py
@@ -22,6 +22,7 @@ torch.fx.wrap("len")
# Do it twice to make sure it doesn't affect anything
torch.fx.wrap("len")
+
class TestPackageFX(PackageTestCase):
"""Tests for compatibility with FX."""
@@ -186,6 +187,5 @@ class TestPackageFX(PackageTestCase):
self.assertEqual(loaded_traced(input), traced(input))
-
if __name__ == "__main__":
run_tests()
diff --git a/test/package/test_save_load.py b/test/package/test_save_load.py
index 112c43b4dc..44fbb448bb 100644
--- a/test/package/test_save_load.py
+++ b/test/package/test_save_load.py
@@ -163,7 +163,9 @@ class TestSaveLoad(PackageTestCase):
buffer = BytesIO()
with PackageExporter(buffer) as exporter:
exporter.intern("**")
- exporter.save_pickle("container", "container.pkl", container, pickle_protocol=4)
+ exporter.save_pickle(
+ "container", "container.pkl", container, pickle_protocol=4
+ )
buffer.seek(0)
importer = PackageImporter(buffer)
diff --git a/test/profiler/test_memory_profiler.py b/test/profiler/test_memory_profiler.py
index a4927305cf..6bc1f5d0af 100644
--- a/test/profiler/test_memory_profiler.py
+++ b/test/profiler/test_memory_profiler.py
@@ -103,7 +103,6 @@ class TestIdentifyGradients(TestCase):
grad_tensor: torch.Tensor,
parameter: Optional[torch.Tensor] = None,
) -> None:
-
# This is not an exhaustive check, but for the purpose of unit testing
# it is sufficient.
def key_matches_tensor(key, tensor) -> bool:
@@ -219,7 +218,6 @@ class TestIdentifyGradients(TestCase):
check(cold_start=False)
def _test_extract_gradients_from_optimizer(self, set_to_none: bool) -> None:
-
x = torch.ones((1,))
w0 = torch.ones((1,), requires_grad=True)
w1 = torch.ones((1,), requires_grad=True)
@@ -844,14 +842,19 @@ class TestMemoryProfilerE2E(TestCase):
if key.storage.allocation_id == max(ids | {-1})
}
- def _run_and_check_parameters_and_gradients(self, inner_fn, model, grads_none: bool = False):
-
+ def _run_and_check_parameters_and_gradients(
+ self, inner_fn, model, grads_none: bool = False
+ ):
with profile() as prof:
inner_fn()
memory_profile = prof._memory_profile()
- def assert_category(t: torch.Tensor, category: _memory_profiler.Category, should_be_none: bool = False):
+ def assert_category(
+ t: torch.Tensor,
+ category: _memory_profiler.Category,
+ should_be_none: bool = False,
+ ):
if should_be_none:
assert t is None, "tensor should be None but is not."
return
@@ -940,7 +943,9 @@ class TestMemoryProfilerE2E(TestCase):
# If we profile the first step then gradients will not have been
# created when we call `model.forward`, so if we don't call `.backward`
# then gradients are never created.
- self._run_and_check_parameters_and_gradients(inner_fn=fwd_only, model=model, grads_none=True)
+ self._run_and_check_parameters_and_gradients(
+ inner_fn=fwd_only, model=model, grads_none=True
+ )
# On the first step we must rely on `AccumulateGrad`, since gradients
# did not exist when `model.forward` was called.
@@ -1461,7 +1466,6 @@ class TestMemoryProfilerE2E(TestCase):
return f"{size / 1024:3.1f} kB"
return f"{size // 1024} kB"
-
# We generate sequential IDs for Tensors; however platforms vary
# slightly in the exact computation executed. If this results in
# tensor creation the IDs will be shifted and the unit test will fail.
@@ -1477,7 +1481,6 @@ class TestMemoryProfilerE2E(TestCase):
f"{action.name.lower():<25} {format_action(action, key, version):<25} "
f"{id_for_testing(key):>3}(v{version}) {format_size(size):>15}"
for _, action, (key, version), size in prof._memory_profile().timeline
-
# We generally don't care about tiny allocations during memory
# profiling and they add a lot of noise to the unit test.
if size > 1024
@@ -1547,7 +1550,8 @@ class TestMemoryProfilerE2E(TestCase):
destroy ??? 29(v1) 1024 kB
destroy GRADIENT 16(v0) 128 kB
destroy GRADIENT 17(v0) 2 kB
- destroy GRADIENT 13(v0) 1024 kB""")
+ destroy GRADIENT 13(v0) 1024 kB""",
+ )
def test_memory_timeline_no_id(self) -> None:
# On CPU the default behavior is to simply forward to malloc. That
@@ -1594,7 +1598,9 @@ class TestMemoryProfilerE2E(TestCase):
if not torch.cuda.is_available():
expected = expected[2:]
for event in expected:
- self.assertTrue(event in actual, f"event: {event} was not found in actual.")
+ self.assertTrue(
+ event in actual, f"event: {event} was not found in actual."
+ )
else:
self.assertEqual(
actual,
diff --git a/test/profiler/test_profiler.py b/test/profiler/test_profiler.py
index 5f367063c0..f46087c6f0 100644
--- a/test/profiler/test_profiler.py
+++ b/test/profiler/test_profiler.py
@@ -4,29 +4,29 @@ import gc
import json
import os
import re
+import subprocess
+import sys
import tempfile
import textwrap
import threading
import unittest
-from unittest.mock import patch
import weakref
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional
+from unittest.mock import patch
import expecttest
-import subprocess
-import sys
import torch
import torch.nn as nn
import torch.optim
import torch.utils.data
import torch.utils.data.datapipes as dp
+from torch._C._profiler import _TensorMetadata
from torch.autograd import (
_record_function_with_args_enter,
_record_function_with_args_exit,
)
-from torch.autograd.profiler import profile as _profile
-from torch.autograd.profiler import KinetoStepTracker
+from torch.autograd.profiler import KinetoStepTracker, profile as _profile
from torch.autograd.profiler_legacy import profile as _profile_legacy
from torch.profiler import (
_utils,
@@ -39,7 +39,6 @@ from torch.profiler import (
record_function,
supported_activities,
)
-from torch._C._profiler import _TensorMetadata
from torch.profiler._pattern_matcher import (
Conv2dBiasFollowedByBatchNorm2dPattern,
ExtraCUDACopyPattern,
@@ -56,18 +55,18 @@ from torch.profiler._pattern_matcher import (
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_device_type import skipCUDAVersionIn
from torch.testing._internal.common_utils import (
+ instantiate_parametrized_tests,
IS_JETSON,
IS_WINDOWS,
- instantiate_parametrized_tests,
parametrize,
run_tests,
+ skipIfTorchDynamo,
TemporaryDirectoryName,
TemporaryFileName,
TEST_WITH_ASAN,
TEST_WITH_CROSSREF,
TEST_WITH_ROCM,
TestCase,
- skipIfTorchDynamo,
)
Json = Dict[str, Any]
@@ -88,11 +87,9 @@ from torch._C._profiler import _ExperimentalConfig, _ExtraFields_PyCall
@unittest.skipIf(IS_WINDOWS, "Test is flaky on Windows")
@unittest.skipIf(not torch.cuda.is_available(), "CUDA is required")
class TestProfilerCUDA(TestCase):
-
@skipCUDAVersionIn([(11, 5)]) # https://github.com/pytorch/pytorch/issues/69023
def test_mem_leak(self):
- """Checks that there's no memory leak when using profiler with CUDA
- """
+ """Checks that there's no memory leak when using profiler with CUDA"""
t = torch.rand(1, 1).cuda()
p = psutil.Process()
last_rss = collections.deque(maxlen=5)
@@ -108,12 +105,15 @@ class TestProfilerCUDA(TestCase):
# with CUDA events leaking the increase in memory was ~7 MB between
# profiler invocations above
is_increasing = all(
- last_rss[idx] > last_rss[idx - 1] for idx in range(1, len(last_rss)))
+ last_rss[idx] > last_rss[idx - 1] for idx in range(1, len(last_rss))
+ )
max_diff = -1
for idx in range(1, len(last_rss)):
max_diff = max(max_diff, last_rss[idx] - last_rss[idx - 1])
- self.assertTrue(not (is_increasing and max_diff > 100 * 1024),
- msg=f'memory usage is increasing, {str(last_rss)}')
+ self.assertTrue(
+ not (is_increasing and max_diff > 100 * 1024),
+ msg=f"memory usage is increasing, {str(last_rss)}",
+ )
def test_custom_module_input_op_ids(self):
class MyFunc(torch.autograd.Function):
@@ -124,7 +124,7 @@ class TestProfilerCUDA(TestCase):
@staticmethod
def backward(ctx, gO):
- x, = ctx.saved_tensors
+ (x,) = ctx.saved_tensors
return x
def custom_layer(input_ten):
@@ -147,7 +147,11 @@ class TestProfilerCUDA(TestCase):
# repro taken from #75504
# Launch in a separate process to catch hanging/illegal memory errors
# and to make sure CUPTI isn't already initialized.
- p = subprocess.check_call([sys.executable, "-c", """
+ p = subprocess.check_call(
+ [
+ sys.executable,
+ "-c",
+ """
import os
import torch
from torch.profiler import ProfilerActivity, profile
@@ -170,14 +174,17 @@ with profile(activities=[ProfilerActivity.CPU]):
with profile(activities=[ProfilerActivity.CUDA]):
add_one_graphed(zeros)
-"""], universal_newlines=True, timeout=60)
+""",
+ ],
+ universal_newlines=True,
+ timeout=60,
+ )
# ^ this will throw an exception if the script fails.
@unittest.skipIf(not torch.profiler.itt.is_available(), "ITT is required")
class TestProfilerITT(TestCase):
-
def test_custom_module_input_op_ids(self):
class MyFunc(torch.autograd.Function):
@staticmethod
@@ -187,7 +194,7 @@ class TestProfilerITT(TestCase):
@staticmethod
def backward(ctx, gO):
- x, = ctx.saved_tensors
+ (x,) = ctx.saved_tensors
return x
def custom_layer(input_ten):
@@ -203,12 +210,17 @@ class TestProfilerITT(TestCase):
q = s.sum()
q.backward()
+
class TestRecordFunction(TestCase):
def _record_function_with_param(self):
u = torch.randn(3, 4, 5, requires_grad=True)
- with _profile(with_stack=True, use_kineto=kineto_available(), record_shapes=True) as prof:
+ with _profile(
+ with_stack=True, use_kineto=kineto_available(), record_shapes=True
+ ) as prof:
with record_function("## TEST 1 ##", "1, 2, 3"):
- rf_handle = _record_function_with_args_enter("## TEST 2 ##", 1, False, 2.5, [u, u], "hello", u)
+ rf_handle = _record_function_with_args_enter(
+ "## TEST 2 ##", 1, False, 2.5, [u, u], "hello", u
+ )
_record_function_with_args_exit(rf_handle)
with record_function("## TEST 3 ##"):
rf_handle = _record_function_with_args_enter("## TEST 4 ##")
@@ -240,7 +252,9 @@ class TestRecordFunction(TestCase):
self.assertTrue(found_test_4)
def test_datapipe_with_record_function(self):
- with _profile(with_stack=True, use_kineto=kineto_available(), record_shapes=True) as prof:
+ with _profile(
+ with_stack=True, use_kineto=kineto_available(), record_shapes=True
+ ) as prof:
input_dp1 = dp.iter.IterableWrapper(range(4))
input_dp2 = dp.iter.IterableWrapper(range(4, 8))
input_dp3 = dp.iter.IterableWrapper(range(8, 12))
@@ -298,7 +312,9 @@ class TestRecordFunction(TestCase):
self.assertEqual(list(range(10)), list(it_dp2))
def test_datapipe_with_record_function_fork(self):
- with _profile(with_stack=True, use_kineto=kineto_available(), record_shapes=True) as prof:
+ with _profile(
+ with_stack=True, use_kineto=kineto_available(), record_shapes=True
+ ) as prof:
input_dp = dp.iter.IterableWrapper(range(10))
dp1, dp2, dp3 = input_dp.fork(num_instances=3)
output1 = list(dp1)
@@ -323,8 +339,19 @@ class TestExecutionTrace(TestCase):
inf_val = float("inf")
neg_inf_val = float("-inf")
nan_val = float("nan")
- rf_handle = _record_function_with_args_enter("## TEST 2 ##", 1, False, 2.5, [u, u], (u, u),
- "hello", u, inf_val, neg_inf_val, nan_val)
+ rf_handle = _record_function_with_args_enter(
+ "## TEST 2 ##",
+ 1,
+ False,
+ 2.5,
+ [u, u],
+ (u, u),
+ "hello",
+ u,
+ inf_val,
+ neg_inf_val,
+ nan_val,
+ )
x = torch.randn(10, 10, requires_grad=True)
if use_cuda:
x = x.cuda()
@@ -350,24 +377,30 @@ class TestExecutionTrace(TestCase):
def get_execution_trace_rf_ids(self, nodes: List[Json]) -> List[int]:
"""Returns a sorted list of rf_id (record function ids) in execution trace"""
+
def get_rf_id(node):
- attrs = node['attrs']
+ attrs = node["attrs"]
for a in attrs:
- if a['name'] == 'rf_id':
- return a['value']
+ if a["name"] == "rf_id":
+ return a["value"]
return None
+
rf_ids_ = (
- get_rf_id(n) for n in nodes
- if n['name'] != "[pytorch|profiler|execution_trace|process]"
- and n['name'] != "[pytorch|profiler|execution_trace|thread]")
+ get_rf_id(n)
+ for n in nodes
+ if n["name"] != "[pytorch|profiler|execution_trace|process]"
+ and n["name"] != "[pytorch|profiler|execution_trace|thread]"
+ )
return sorted(rf_id for rf_id in rf_ids_ if rf_id is not None)
-
def get_kineto_rf_ids(self, events: List[Json]) -> List[int]:
"""Returns a sorted list of Record function IDs for CPU operators and user annotations"""
- ops_and_annotations = (e for e in events if e.get("cat", "") in ['cpu_op', 'user_annotation'])
- return sorted(e.get("args", {}).get("Record function id", -1) for e in ops_and_annotations)
-
+ ops_and_annotations = (
+ e for e in events if e.get("cat", "") in ["cpu_op", "user_annotation"]
+ )
+ return sorted(
+ e.get("args", {}).get("Record function id", -1) for e in ops_and_annotations
+ )
@unittest.skipIf(not kineto_available(), "Kineto is required")
def test_execution_trace_with_kineto(self):
@@ -379,19 +412,18 @@ class TestExecutionTrace(TestCase):
use_cuda = torch.profiler.ProfilerActivity.CUDA in supported_activities()
# Create a temp file to save execution trace and kineto data.
- fp = tempfile.NamedTemporaryFile('w+t', suffix='.et.json', delete=False)
+ fp = tempfile.NamedTemporaryFile("w+t", suffix=".et.json", delete=False)
fp.close()
- kt = tempfile.NamedTemporaryFile(mode="w+t", suffix=".kineto.json", delete=False)
+ kt = tempfile.NamedTemporaryFile(
+ mode="w+t", suffix=".kineto.json", delete=False
+ )
kt.close()
with profile(
activities=supported_activities(),
schedule=torch.profiler.schedule(
- skip_first=3,
- wait=1,
- warmup=1,
- active=2,
- repeat=1),
+ skip_first=3, wait=1, warmup=1, active=2, repeat=1
+ ),
on_trace_ready=trace_handler,
execution_trace_observer=(
ExecutionTraceObserver().register_callback(fp.name)
@@ -401,10 +433,7 @@ class TestExecutionTrace(TestCase):
with record_function(f"## LOOP {idx} ##"):
self.payload(use_cuda=use_cuda)
p.step()
- self.assertEqual(
- fp.name,
- p.execution_trace_observer.get_output_file_path()
- )
+ self.assertEqual(fp.name, p.execution_trace_observer.get_output_file_path())
# Uncomment for debugging
# print("Output kineto = ", kt.name)
@@ -443,15 +472,14 @@ class TestExecutionTrace(TestCase):
rf_ids_et,
rf_ids_kineto,
msg=f"ET and kineto rf_id should exactly match\n"
- f" rf_ids_et = {rf_ids_et}\n"
- f" rf_ids_kineto = {rf_ids_kineto}\n"
+ f" rf_ids_et = {rf_ids_et}\n"
+ f" rf_ids_kineto = {rf_ids_kineto}\n",
)
-
def test_execution_trace_alone(self):
use_cuda = torch.profiler.ProfilerActivity.CUDA in supported_activities()
# Create a temp file to save execution trace data.
- fp = tempfile.NamedTemporaryFile('w+t', suffix='.et.json', delete=False)
+ fp = tempfile.NamedTemporaryFile("w+t", suffix=".et.json", delete=False)
fp.close()
expected_loop_events = 0
@@ -483,9 +511,8 @@ class TestExecutionTrace(TestCase):
assert found_root_node
assert loop_count == expected_loop_events
- @unittest.skipIf(IS_WINDOWS, 'torch.compile does not support WINDOWS')
+ @unittest.skipIf(IS_WINDOWS, "torch.compile does not support WINDOWS")
def test_execution_trace_with_pt2(self):
-
class ConvAndRelu(nn.Module):
def __init__(self) -> None:
super().__init__()
@@ -498,7 +525,7 @@ class TestExecutionTrace(TestCase):
return x
# Create a temp file to save execution trace data.
- fp = tempfile.NamedTemporaryFile('w+t', suffix='.et.json', delete=False)
+ fp = tempfile.NamedTemporaryFile("w+t", suffix=".et.json", delete=False)
fp.close()
test_module = torch.compile(ConvAndRelu())
@@ -524,7 +551,7 @@ class TestExecutionTrace(TestCase):
def test_execution_trace_start_stop(self):
use_cuda = torch.profiler.ProfilerActivity.CUDA in supported_activities()
# Create a temp file to save execution trace data.
- fp = tempfile.NamedTemporaryFile('w+t', suffix='.et.json', delete=False)
+ fp = tempfile.NamedTemporaryFile("w+t", suffix=".et.json", delete=False)
fp.close()
expected_loop_events = 0
et = ExecutionTraceObserver()
@@ -565,7 +592,7 @@ class TestExecutionTrace(TestCase):
for idx in range(10):
if idx in iter_list:
# Create a temp file to save execution trace data.
- fp = tempfile.NamedTemporaryFile('w+t', suffix='.et.json', delete=False)
+ fp = tempfile.NamedTemporaryFile("w+t", suffix=".et.json", delete=False)
fp.close()
output_files.append(fp.name)
et = ExecutionTraceObserver()
@@ -592,7 +619,7 @@ class TestExecutionTrace(TestCase):
assert event_count == expected_loop_events
def test_execution_trace_no_capture(self):
- fp = tempfile.NamedTemporaryFile('w+t', suffix='.et.json', delete=False)
+ fp = tempfile.NamedTemporaryFile("w+t", suffix=".et.json", delete=False)
fp.close()
et = ExecutionTraceObserver()
et.register_callback(fp.name)
@@ -634,11 +661,11 @@ class TestExecutionTrace(TestCase):
@instantiate_parametrized_tests
class TestProfiler(TestCase):
-
- @unittest.skipIf(TEST_WITH_CROSSREF, "crossref intercepts calls and changes the callsite.")
+ @unittest.skipIf(
+ TEST_WITH_CROSSREF, "crossref intercepts calls and changes the callsite."
+ )
def test_source(self):
- """Checks that source code attribution works for eager, TS and autograd mode
- """
+ """Checks that source code attribution works for eager, TS and autograd mode"""
# avoid automatic inlining
prev_opt = torch._C._get_graph_executor_optimize()
torch._C._set_graph_executor_optimize(False)
@@ -656,7 +683,9 @@ class TestProfiler(TestCase):
class DummyModule(nn.Module):
def __init__(self):
super().__init__()
- self.conv = torch.nn.Conv2d(3, 2, kernel_size=1, stride=2, padding=3, bias=False)
+ self.conv = torch.nn.Conv2d(
+ 3, 2, kernel_size=1, stride=2, padding=3, bias=False
+ )
def forward(self, x):
return self.conv(x)
@@ -666,7 +695,11 @@ class TestProfiler(TestCase):
def call_module(x):
return mod(x)
- with _profile(with_stack=True, use_kineto=kineto_available(), experimental_config=_ExperimentalConfig(verbose=True)) as p:
+ with _profile(
+ with_stack=True,
+ use_kineto=kineto_available(),
+ experimental_config=_ExperimentalConfig(verbose=True),
+ ) as p:
x = torch.randn(10, 10, requires_grad=True)
y = torch.randn(10, 10, requires_grad=True)
z = x + y
@@ -681,10 +714,16 @@ class TestProfiler(TestCase):
for e in p.function_events:
if "aten::add" in e.name or "AddBackward" in e.name:
self.assertTrue(any("test_profiler" in entry for entry in e.stack))
- self.assertTrue(any((
- "test_source" in entry or
- "ts_method_1" in entry or
- "ts_method_2" in entry) for entry in e.stack))
+ self.assertTrue(
+ any(
+ (
+ "test_source" in entry
+ or "ts_method_1" in entry
+ or "ts_method_2" in entry
+ )
+ for entry in e.stack
+ )
+ )
# TODO: https://github.com/pytorch/kineto/issues/617
if kineto_available() and not IS_WINDOWS:
@@ -695,12 +734,17 @@ class TestProfiler(TestCase):
def extract(pattern: str):
matches = [e for e in events if re.search(pattern, e["name"])]
- self.assertEqual(len(matches), 1, repr([e["name"] for e in matches]))
+ self.assertEqual(
+ len(matches), 1, repr([e["name"] for e in matches])
+ )
return matches[0]
module_event = extract(r"DummyModule_0")
wrapper_event = extract(r"call_module")
- self.assertEqual(module_event["args"]["Python parent id"], wrapper_event["args"]["Python id"])
+ self.assertEqual(
+ module_event["args"]["Python parent id"],
+ wrapper_event["args"]["Python id"],
+ )
torch._C._set_graph_executor_optimize(prev_opt)
@@ -708,7 +752,7 @@ class TestProfiler(TestCase):
"name,thread_spec",
{
"basic": ((False, False),),
- "multiple_preexisting": ((False, False), ) * 2,
+ "multiple_preexisting": ((False, False),) * 2,
"open_in_scope": ((True, False),),
"close_in_scope": ((False, True),),
"complex": (
@@ -717,18 +761,15 @@ class TestProfiler(TestCase):
(False, False),
(False, False),
(False, False),
-
# some of which finish during profiling
(False, True),
(False, True),
-
# And the profiled section is also multithreaded
(True, False),
(True, True),
-
),
}.items(),
- name_fn=lambda name, thread_spec: name
+ name_fn=lambda name, thread_spec: name,
)
@parametrize("work_in_main_thread", [True, False])
def test_source_multithreaded(self, name, thread_spec, work_in_main_thread):
@@ -746,7 +787,6 @@ class TestProfiler(TestCase):
end_barrier = threading.Barrier(num_threads, timeout=timeout)
class Task(threading.Thread):
-
def __init__(self):
self._end_gate = threading.Event()
super().__init__(daemon=True)
@@ -761,7 +801,6 @@ class TestProfiler(TestCase):
@staticmethod
def _run(end_gate=None):
-
def known_preexisting_function():
start_barrier.wait()
@@ -837,25 +876,39 @@ class TestProfiler(TestCase):
self.assertFalse(t.is_alive())
roots = prof.profiler.kineto_results.experimental_event_tree()
- nodes = [node for node in _utils.traverse_dfs(roots) if isinstance(node.extra_fields, _ExtraFields_PyCall)]
+ nodes = [
+ node
+ for node in _utils.traverse_dfs(roots)
+ if isinstance(node.extra_fields, _ExtraFields_PyCall)
+ ]
tid_counts = collections.Counter([node.start_tid for node in nodes])
- prior_threads = sum(not start_under_profiler for start_under_profiler, _ in thread_spec)
+ prior_threads = sum(
+ not start_under_profiler for start_under_profiler, _ in thread_spec
+ )
expected_threads = prior_threads + 1
- self.assertEqual(len(tid_counts), expected_threads, f"{expected_threads}, {tid_counts}")
+ self.assertEqual(
+ len(tid_counts), expected_threads, f"{expected_threads}, {tid_counts}"
+ )
self.assertEqual(len(nodes), sum(tid_counts.values()))
# Profiler uses uint64_t max as a placeholder until TID can be determined.
- no_tid = 2 ** 64 - 1
+ no_tid = 2**64 - 1
self.assertFalse(no_tid in tid_counts)
worker_threads = prior_threads + (1 if work_in_main_thread else 0)
- observed_preexisting = [node.start_tid for node in nodes if "known_preexisting_function" in node.name]
+ observed_preexisting = [
+ node.start_tid
+ for node in nodes
+ if "known_preexisting_function" in node.name
+ ]
self.assertEqual(len(observed_preexisting), worker_threads)
self.assertEqual(len(observed_preexisting), len(set(observed_preexisting)))
- observed_during_run = [node.start_tid for node in nodes if "invoked_during_run" in node.name]
+ observed_during_run = [
+ node.start_tid for node in nodes if "invoked_during_run" in node.name
+ ]
self.assertEqual(len(observed_during_run), worker_threads)
self.assertEqual(len(observed_during_run), len(set(observed_during_run)))
@@ -878,7 +931,9 @@ class TestProfiler(TestCase):
self.assertGreater(profiler_stats.profiler_enable_call_duration_us, 0)
self.assertGreater(profiler_stats.profiler_disable_call_duration_us, 0)
self.assertGreater(profiler_stats.parse_kineto_call_duration_us, 0)
- self.assertGreater(profiler_stats.function_events_build_tree_call_duration_us, 0)
+ self.assertGreater(
+ profiler_stats.function_events_build_tree_call_duration_us, 0
+ )
@unittest.skipIf(not kineto_available(), "Kineto is required")
def test_kineto(self):
@@ -890,7 +945,9 @@ class TestProfiler(TestCase):
with _profile(use_cuda=use_cuda, use_kineto=True) as p:
self.payload(use_cuda=use_cuda)
output = p.key_averages().table(
- sort_by="self_cuda_time_total" if use_cuda else "self_cpu_time_total", row_limit=-1)
+ sort_by="self_cuda_time_total" if use_cuda else "self_cpu_time_total",
+ row_limit=-1,
+ )
# print(output)
found_gemm = False
found_memcpy = False
@@ -914,10 +971,7 @@ class TestProfiler(TestCase):
@unittest.skipIf(not TEST_MULTIGPU, "Multiple GPUs needed")
@unittest.skipIf(TEST_WITH_ROCM, "Not supported on ROCm")
def test_kineto_multigpu(self):
- with profile(
- activities=[
- ProfilerActivity.CPU,
- ProfilerActivity.CUDA]) as prof:
+ with profile(activities=[ProfilerActivity.CPU, ProfilerActivity.CUDA]) as prof:
for gpu_id in [0, 1]:
x = torch.randn(10, 10).cuda(gpu_id)
y = torch.randn(10, 10).cuda(gpu_id)
@@ -943,7 +997,9 @@ class TestProfiler(TestCase):
def test_memory_profiler(self):
def run_profiler(tensor_creation_fn):
# collecting allocs / deallocs
- with _profile(profile_memory=True, record_shapes=True, use_kineto=kineto_available()) as prof:
+ with _profile(
+ profile_memory=True, record_shapes=True, use_kineto=kineto_available()
+ ) as prof:
x = None
with record_function("test_user_scope_alloc"):
x = tensor_creation_fn()
@@ -984,7 +1040,7 @@ class TestProfiler(TestCase):
],
deallocs=[
"test_user_scope_dealloc",
- ]
+ ],
)
if kineto_available():
@@ -1029,7 +1085,7 @@ class TestProfiler(TestCase):
],
deallocs=[
"test_user_scope_dealloc",
- ]
+ ],
)
check_metrics(
stats,
@@ -1037,7 +1093,7 @@ class TestProfiler(TestCase):
allocs=[
"aten::rand",
"aten::empty",
- ]
+ ],
)
if torch.backends.mkldnn.is_available():
@@ -1054,7 +1110,7 @@ class TestProfiler(TestCase):
],
deallocs=[
"test_user_scope_dealloc",
- ]
+ ],
)
# check top-level memory events
@@ -1069,24 +1125,15 @@ class TestProfiler(TestCase):
check_metrics(
stats,
"cpu_memory_usage",
- allocs=[
- "aten::rand",
- "aten::empty"
- ],
- deallocs=[
- "[memory]"
- ]
+ allocs=["aten::rand", "aten::empty"],
+ deallocs=["[memory]"],
)
if torch.cuda.is_available():
- check_metrics(
- stats,
- "cuda_memory_usage",
- deallocs=[
- "[memory]"
- ]
- )
+ check_metrics(stats, "cuda_memory_usage", deallocs=["[memory]"])
- @unittest.skipIf(IS_JETSON, "Jetson has a guard against OOM since host and gpu memory are shared")
+ @unittest.skipIf(
+ IS_JETSON, "Jetson has a guard against OOM since host and gpu memory are shared"
+ )
def test_oom_tracing(self):
def run_profiler(tensor_creation_fn):
with _profile(profile_memory=True, record_shapes=True) as prof:
@@ -1124,9 +1171,6 @@ class TestProfiler(TestCase):
prof = run_profiler(create_cuda_tensor_oom)
check_trace(fname)
-
-
-
@unittest.skipIf(not kineto_available(), "Kineto is required")
def test_module_hierarchy(self):
class A(nn.Module):
@@ -1163,12 +1207,18 @@ class TestProfiler(TestCase):
op_to_module_hierarchy = {}
op_to_module_hierarchy["aten::sub"] = ["TOP(C)::forward.A0(A)::forward."]
op_to_module_hierarchy["aten::mul"] = [
- "TOP(C)::forward.A0(A)::forward.SELF(A)::forward_impl_.SELF(A)::my_new_method."]
+ "TOP(C)::forward.A0(A)::forward.SELF(A)::forward_impl_.SELF(A)::my_new_method."
+ ]
op_to_module_hierarchy["aten::add"] = [
"TOP(C)::forward.A0(A)::forward.SELF(A)::forward_impl_.",
- "TOP(C)::forward.SELF(C)::call_b.B0(B)::forward.", "TOP(C)::forward."]
+ "TOP(C)::forward.SELF(C)::call_b.B0(B)::forward.",
+ "TOP(C)::forward.",
+ ]
with TemporaryFileName(mode="w+") as fname:
- with profile(activities=[torch.profiler.ProfilerActivity.CPU], with_modules=True,) as prof:
+ with profile(
+ activities=[torch.profiler.ProfilerActivity.CPU],
+ with_modules=True,
+ ) as prof:
model(input_a, input_b)
prof.export_chrome_trace(fname)
with open(fname) as f:
@@ -1186,8 +1236,8 @@ class TestProfiler(TestCase):
assert hierarchy in op_to_module_hierarchy[op_name]
def test_high_level_trace(self):
- """Checks that python side high level events are recorded.
- """
+ """Checks that python side high level events are recorded."""
+
class RepeatedDataset(torch.utils.data.Dataset):
def __init__(self, N, D_in, D_out):
self.N = N
@@ -1226,7 +1276,7 @@ class TestProfiler(TestCase):
N, D_in, H, D_out = 8, 10, 5, 2
model = TwoLayerNet(D_in, H, D_out)
- criterion = torch.nn.MSELoss(reduction='sum')
+ criterion = torch.nn.MSELoss(reduction="sum")
optimizer = torch.optim.SGD(model.parameters(), lr=1e-4)
ds = RepeatedDataset(N, D_in, D_out)
dataloader = torch.utils.data.DataLoader(ds, batch_size=1)
@@ -1247,9 +1297,14 @@ class TestProfiler(TestCase):
if "#" in e.name:
key = e.name
if key in expected_event_count.keys():
- actual_event_count[key] = actual_event_count.setdefault(key, 0) + 1
+ actual_event_count[key] = (
+ actual_event_count.setdefault(key, 0) + 1
+ )
for key, count in expected_event_count.items():
- self.assertTrue((key in actual_event_count.keys()) and (count == actual_event_count[key]))
+ self.assertTrue(
+ (key in actual_event_count.keys())
+ and (count == actual_event_count[key])
+ )
with _profile(use_kineto=kineto_available()) as prof:
train()
@@ -1257,7 +1312,7 @@ class TestProfiler(TestCase):
# "+1" because the final iteration will enter __next__ but skip the loop body.
"enumerate(DataLoader)#_SingleProcessDataLoaderIter.__next__": (N + 1),
"Optimizer.step#SGD.step": N,
- "Optimizer.zero_grad#SGD.zero_grad": N
+ "Optimizer.zero_grad#SGD.zero_grad": N,
}
judge(expected_event_count, prof)
@@ -1274,7 +1329,7 @@ class TestProfiler(TestCase):
expected_event_count = {
"enumerate(DataLoader)#_SingleProcessDataLoaderIter.__next__": (N + 1),
"Optimizer.step#CustomSGD.step": N,
- "Optimizer.zero_grad#CustomSGD.zero_grad": N
+ "Optimizer.zero_grad#CustomSGD.zero_grad": N,
}
judge(expected_event_count, prof)
@@ -1325,7 +1380,9 @@ class TestProfiler(TestCase):
def trace_handler(p):
output = p.key_averages().table(
- sort_by="self_cuda_time_total" if use_cuda else "self_cpu_time_total", row_limit=-1)
+ sort_by="self_cuda_time_total" if use_cuda else "self_cpu_time_total",
+ row_limit=-1,
+ )
# print(output)
# p.export_chrome_trace("/tmp/test_trace_" + str(called_num[0]) + ".json")
called_num[0] += 1
@@ -1334,11 +1391,8 @@ class TestProfiler(TestCase):
with profile(
activities=supported_activities(),
- schedule=torch.profiler.schedule(
- wait=1,
- warmup=1,
- active=2),
- on_trace_ready=trace_handler
+ schedule=torch.profiler.schedule(wait=1, warmup=1, active=2),
+ on_trace_ready=trace_handler,
) as p:
for idx in range(8):
self.payload(use_cuda=use_cuda)
@@ -1348,21 +1402,18 @@ class TestProfiler(TestCase):
self.assertEqual(KinetoStepTracker.current_step(), initial_step + 8)
# case without schedule
- with profile(
- activities=supported_activities()
- ) as p:
+ with profile(activities=supported_activities()) as p:
self.payload(use_cuda=use_cuda)
self.payload(use_cuda=use_cuda)
output = p.key_averages().table(
- sort_by="self_cuda_time_total" if use_cuda else "self_cpu_time_total", row_limit=-1)
+ sort_by="self_cuda_time_total" if use_cuda else "self_cpu_time_total",
+ row_limit=-1,
+ )
# print(output)
test_schedule = torch.profiler.schedule(
- skip_first=2,
- wait=1,
- warmup=1,
- active=2,
- repeat=2)
+ skip_first=2, wait=1, warmup=1, active=2, repeat=2
+ )
test_schedule_expected_outputs = [
ProfilerAction.NONE,
ProfilerAction.NONE,
@@ -1414,10 +1465,7 @@ class TestProfiler(TestCase):
with profile(
activities=supported_activities(),
- schedule=torch.profiler.schedule(
- wait=1,
- warmup=1,
- active=2),
+ schedule=torch.profiler.schedule(wait=1, warmup=1, active=2),
) as p:
for idx in range(niters):
run_batch()
@@ -1426,7 +1474,11 @@ class TestProfiler(TestCase):
self.assertEqual(KinetoStepTracker.current_step(), initial_step + 2 * niters)
def test_export_stacks(self):
- with _profile(with_stack=True, use_kineto=kineto_available(), experimental_config=_ExperimentalConfig(verbose=True)) as p:
+ with _profile(
+ with_stack=True,
+ use_kineto=kineto_available(),
+ experimental_config=_ExperimentalConfig(verbose=True),
+ ) as p:
x = torch.randn(10, 10)
y = torch.randn(10, 10)
z = torch.mm(x, y)
@@ -1454,17 +1506,10 @@ class TestProfiler(TestCase):
with TemporaryDirectoryName() as dname:
with profile(
- activities=[
- torch.profiler.ProfilerActivity.CPU
- ] + ([
- torch.profiler.ProfilerActivity.CUDA
- ] if use_cuda else []),
- schedule=torch.profiler.schedule(
- wait=1,
- warmup=1,
- active=2,
- repeat=3),
- on_trace_ready=torch.profiler.tensorboard_trace_handler(dname)
+ activities=[torch.profiler.ProfilerActivity.CPU]
+ + ([torch.profiler.ProfilerActivity.CUDA] if use_cuda else []),
+ schedule=torch.profiler.schedule(wait=1, warmup=1, active=2, repeat=3),
+ on_trace_ready=torch.profiler.tensorboard_trace_handler(dname),
) as p:
for _ in range(18):
self.payload(use_cuda=use_cuda)
@@ -1473,27 +1518,25 @@ class TestProfiler(TestCase):
self.assertTrue(os.path.exists(dname))
file_num = 0
for file_name in os.listdir(dname):
- parts = file_name.split('.')
+ parts = file_name.split(".")
self.assertTrue(len(parts) > 4)
- self.assertTrue(parts[-4].isdigit() and int(parts[-4]) > 0, "Wrong tracing file name pattern")
- self.assertEqual(parts[-3:], ['pt', 'trace', 'json'])
+ self.assertTrue(
+ parts[-4].isdigit() and int(parts[-4]) > 0,
+ "Wrong tracing file name pattern",
+ )
+ self.assertEqual(parts[-3:], ["pt", "trace", "json"])
file_num += 1
self.assertEqual(file_num, 3)
# test case for gzip file format
with TemporaryDirectoryName() as dname:
p = profile(
- activities=[
- torch.profiler.ProfilerActivity.CPU
- ] + ([
- torch.profiler.ProfilerActivity.CUDA
- ] if use_cuda else []),
- schedule=torch.profiler.schedule(
- wait=1,
- warmup=1,
- active=2,
- repeat=3),
- on_trace_ready=torch.profiler.tensorboard_trace_handler(dname, use_gzip=True)
+ activities=[torch.profiler.ProfilerActivity.CPU]
+ + ([torch.profiler.ProfilerActivity.CUDA] if use_cuda else []),
+ schedule=torch.profiler.schedule(wait=1, warmup=1, active=2, repeat=3),
+ on_trace_ready=torch.profiler.tensorboard_trace_handler(
+ dname, use_gzip=True
+ ),
)
p.start()
for _ in range(18):
@@ -1504,10 +1547,13 @@ class TestProfiler(TestCase):
self.assertTrue(os.path.exists(dname))
file_num = 0
for file_name in os.listdir(dname):
- parts = file_name.split('.')
+ parts = file_name.split(".")
self.assertTrue(len(parts) > 4)
- self.assertTrue(parts[-5].isdigit() and int(parts[-5]) > 0, "Wrong tracing file name pattern")
- self.assertEqual(parts[-4:], ['pt', 'trace', 'json', 'gz'])
+ self.assertTrue(
+ parts[-5].isdigit() and int(parts[-5]) > 0,
+ "Wrong tracing file name pattern",
+ )
+ self.assertEqual(parts[-4:], ["pt", "trace", "json", "gz"])
file_num += 1
self.assertEqual(file_num, 3)
@@ -1574,11 +1620,12 @@ class TestProfiler(TestCase):
a = torch.ones((64, 32), dtype=torch.float32)
c = torch.cat([a, a]).sin()
with TemporaryFileName(mode="w+") as fname:
-
prof.export_chrome_trace(fname)
with open(fname) as f:
j = json.load(f)
- op_events = [e for e in j["traceEvents"] if e.get("cat", "") == "cpu_op"]
+ op_events = [
+ e for e in j["traceEvents"] if e.get("cat", "") == "cpu_op"
+ ]
for e in op_events:
args = e["args"]
if e["name"] == "aten::ones":
@@ -1598,13 +1645,14 @@ class TestProfiler(TestCase):
self.assertGreaterEqual(
args.get("Record function id", -1),
0,
- f"Failed finding record funciont for op = {e}"
+ f"Failed finding record funciont for op = {e}",
)
-
def test_profiler_fwd_bwd_link(self):
with _profile(use_kineto=True) as prof:
- t1, t2 = torch.ones(1, requires_grad=True), torch.ones(1, requires_grad=True)
+ t1, t2 = torch.ones(1, requires_grad=True), torch.ones(
+ 1, requires_grad=True
+ )
z = torch.add(t1, t2)
y = torch.ones(1)
loss = torch.nn.functional.binary_cross_entropy_with_logits(z, y)
@@ -1620,7 +1668,12 @@ class TestProfiler(TestCase):
for e in events:
if e["ph"] == "X":
ts_to_name[e["ts"]] = e["name"]
- if "cat" in e and "name" in e and e["cat"] == "fwdbwd" and e["name"] == "fwdbwd":
+ if (
+ "cat" in e
+ and "name" in e
+ and e["cat"] == "fwdbwd"
+ and e["name"] == "fwdbwd"
+ ):
if e["ph"] == "s":
flow_s_to_ts[e["id"]] = e["ts"]
elif e["ph"] == "f":
@@ -1636,8 +1689,15 @@ class TestProfiler(TestCase):
f_ts_1 = flow_f_to_ts[1]
s_ts_2 = flow_s_to_ts[2]
f_ts_2 = flow_f_to_ts[2]
- self.assertTrue(all(ts in ts_to_name.keys() for ts in [s_ts_1, f_ts_1, s_ts_2, f_ts_2]))
- self.assertTrue(ts_to_name[s_ts_1] == "aten::binary_cross_entropy_with_logits")
+ self.assertTrue(
+ all(
+ ts in ts_to_name.keys()
+ for ts in [s_ts_1, f_ts_1, s_ts_2, f_ts_2]
+ )
+ )
+ self.assertTrue(
+ ts_to_name[s_ts_1] == "aten::binary_cross_entropy_with_logits"
+ )
self.assertTrue(ts_to_name[s_ts_2] == "aten::add")
def test_profiler_disable_fwd_bwd_link(self):
@@ -1645,7 +1705,9 @@ class TestProfiler(TestCase):
torch._C._profiler._set_fwd_bwd_enabled_val(False)
with _profile(use_kineto=True) as prof:
- t1, t2 = torch.ones(1, requires_grad=True), torch.ones(1, requires_grad=True)
+ t1, t2 = torch.ones(1, requires_grad=True), torch.ones(
+ 1, requires_grad=True
+ )
z = torch.add(t1, t2)
y = torch.ones(1)
loss = torch.nn.functional.binary_cross_entropy_with_logits(z, y)
@@ -1678,9 +1740,11 @@ class TestProfiler(TestCase):
torch.add(t1, t2)
def trace_and_check(exp_config: Optional[_ExperimentalConfig]) -> None:
- with _profile(use_kineto=True, use_cuda=True,
- experimental_config=exp_config,
- ) as prof:
+ with _profile(
+ use_kineto=True,
+ use_cuda=True,
+ experimental_config=exp_config,
+ ) as prof:
workload()
with TemporaryFileName(mode="w+") as fname:
@@ -1689,8 +1753,10 @@ class TestProfiler(TestCase):
with open(fname) as f:
j = json.load(f)
cats = {e.get("cat", None) for e in j["traceEvents"]}
- self.assertTrue("cuda_sync" in cats, "Expected to find cuda_sync event"
- f" found = {cats}")
+ self.assertTrue(
+ "cuda_sync" in cats,
+ "Expected to find cuda_sync event" f" found = {cats}",
+ )
print("Testing enable_cuda_sync_events in _ExperimentalConfig")
trace_and_check(exp_config=_ExperimentalConfig(enable_cuda_sync_events=True))
@@ -1716,10 +1782,10 @@ class TestProfiler(TestCase):
self.assertEqual(profiler_type(), ActiveProfilerType.KINETO)
def test_profiler_correlation_id(self):
- '''
+ """
We expect the correlation_id to be unique across multiple invokation of the profiler,
So we will reuse id_uniqueness_set.
- '''
+ """
id_uniqueness_set = set()
model = torch.nn.Sequential(
nn.Conv2d(16, 33, 18),
@@ -1819,12 +1885,15 @@ assert KinetoStepTracker.current_step() == initial_step + 2 * niters
"""
try:
subprocess.check_output(
- [sys.executable, '-W', 'all', '-c', script],
- cwd=os.path.dirname(os.path.realpath(__file__))
+ [sys.executable, "-W", "all", "-c", script],
+ cwd=os.path.dirname(os.path.realpath(__file__)),
)
except subprocess.CalledProcessError as e:
if e.returncode != 0:
- self.assertTrue(False, "Kineto is not working properly with the Dynolog environment variable")
+ self.assertTrue(
+ False,
+ "Kineto is not working properly with the Dynolog environment variable",
+ )
def test_concrete_inputs_profiling(self):
x = torch.rand(2, 6)
@@ -1845,7 +1914,7 @@ assert KinetoStepTracker.current_step() == initial_step + 2 * niters
def test_concrete_inputs_profiling_toggling(self):
try:
- for (before, after) in [(True, False), (False, True)]:
+ for before, after in [(True, False), (False, True)]:
x = torch.rand(2, 6)
torch._C._profiler._set_record_concrete_inputs_enabled_val(before)
with profile(record_shapes=True) as p:
@@ -1870,25 +1939,33 @@ assert KinetoStepTracker.current_step() == initial_step + 2 * niters
with torch._C._profiler._RecordFunctionFast("add_test_fast_rf1"):
x.add(y)
- self.assertGreaterEqual(len([e for e in p.events() if e.name == "add_test_fast_rf1"]), 4)
+ self.assertGreaterEqual(
+ len([e for e in p.events() if e.name == "add_test_fast_rf1"]), 4
+ )
for e in p.events():
if e.name == "add_test_fast_rf1":
self.assertTrue(e.input_shapes == [])
with profile(record_shapes=True) as p:
# add optional args
- cm = torch._C._profiler._RecordFunctionFast("add_test_fast_rf2", [x, y], {"stream" : 0, "grid" : "lambda x : x + 1"})
+ cm = torch._C._profiler._RecordFunctionFast(
+ "add_test_fast_rf2", [x, y], {"stream": 0, "grid": "lambda x : x + 1"}
+ )
for _ in range(4):
with cm:
x.add(y)
- self.assertGreaterEqual(len([e for e in p.events() if e.name == "add_test_fast_rf2"]), 4)
+ self.assertGreaterEqual(
+ len([e for e in p.events() if e.name == "add_test_fast_rf2"]), 4
+ )
for e in p.events():
if e.name == "add_test_fast_rf2":
self.assertTrue(e.input_shapes == [[4, 4], [4, 4]])
with profile(record_shapes=True) as p:
- cm = torch._C._profiler._RecordFunctionFast("add_test_fast_rf3", input_values=["hi"], keyword_values={"hi" : "hello"})
+ cm = torch._C._profiler._RecordFunctionFast(
+ "add_test_fast_rf3", input_values=["hi"], keyword_values={"hi": "hello"}
+ )
for _ in range(4):
try:
with cm:
@@ -1898,44 +1975,57 @@ assert KinetoStepTracker.current_step() == initial_step + 2 * niters
except ValueError:
pass
- self.assertGreaterEqual(len([e for e in p.events() if e.name == "add_test_fast_rf3"]), 4)
+ self.assertGreaterEqual(
+ len([e for e in p.events() if e.name == "add_test_fast_rf3"]), 4
+ )
self.assertFalse(any((e.name and "relu" in e.name) for e in p.events()))
for e in p.events():
if e.name == "add_test_fast_rf3":
self.assertTrue(e.input_shapes == [[]])
-
with profile() as p:
for _ in range(4):
- with torch._C._profiler._RecordFunctionFast("add_test_fast_rf4", [x, y]):
+ with torch._C._profiler._RecordFunctionFast(
+ "add_test_fast_rf4", [x, y]
+ ):
x.add(y)
with torch._C._profiler._RecordFunctionFast("add_test_fast_rf5"):
x.relu()
- self.assertGreaterEqual(len([e for e in p.events() if e.name == "add_test_fast_rf4"]), 4)
+ self.assertGreaterEqual(
+ len([e for e in p.events() if e.name == "add_test_fast_rf4"]), 4
+ )
for e in p.events():
if e.name == "add_test_fast_rf4":
self.assertTrue(e.input_shapes == [])
-
- self.assertGreaterEqual(len([e for e in p.events() if e.name == "add_test_fast_rf5"]), 4)
+ self.assertGreaterEqual(
+ len([e for e in p.events() if e.name == "add_test_fast_rf5"]), 4
+ )
with profile(record_shapes=True) as p:
# test optional args with tuple
- cm = torch._C._profiler._RecordFunctionFast("add_test_fast_rf6", (x, y,))
+ cm = torch._C._profiler._RecordFunctionFast(
+ "add_test_fast_rf6",
+ (
+ x,
+ y,
+ ),
+ )
for _ in range(4):
with cm:
x.add(y)
- self.assertGreaterEqual(len([e for e in p.events() if e.name == "add_test_fast_rf6"]), 4)
+ self.assertGreaterEqual(
+ len([e for e in p.events() if e.name == "add_test_fast_rf6"]), 4
+ )
for e in p.events():
if e.name == "add_test_fast_rf6":
self.assertTrue(e.input_shapes == [[4, 4], [4, 4]])
-
def test_is_profiler_enabled(self):
self.assertFalse(torch.autograd.profiler._is_profiler_enabled)
@@ -1961,7 +2051,9 @@ assert KinetoStepTracker.current_step() == initial_step + 2 * niters
else:
x.add(y)
- self.assertGreaterEqual(len([e for e in p.events() if e.name == "guarded_rff"]), 4)
+ self.assertGreaterEqual(
+ len([e for e in p.events() if e.name == "guarded_rff"]), 4
+ )
@unittest.skipIf(not torch.cuda.is_available(), "CUDA is required")
def test_event_list(self):
@@ -1988,6 +2080,7 @@ def find_node_with_name(nodes, name):
if node.name == name:
return node
+
def find_node_with_regex(nodes, pattern):
for node in _utils.traverse_dfs(nodes):
if re.search(pattern, node.name):
@@ -2005,12 +2098,11 @@ class SimpleNet(nn.Module):
class TestTorchTidyProfiler(TestCase):
-
def _get_tensor_fields(self, node, index):
self.assertIsNotNone(node)
self.assertIsInstance(
- node.extra_fields,
- torch._C._profiler._ExtraFields_TorchOp)
+ node.extra_fields, torch._C._profiler._ExtraFields_TorchOp
+ )
tensor_info = node.extra_fields.inputs[index]
self.assertIsInstance(tensor_info, _TensorMetadata)
self.assertIsNotNone(tensor_info.impl_ptr)
@@ -2044,9 +2136,7 @@ class TestTorchTidyProfiler(TestCase):
nodes = p.profiler.kineto_results.experimental_event_tree()
def get_fields(op_name, index):
- return self._get_tensor_fields(
- find_node_with_name(nodes, op_name),
- index)
+ return self._get_tensor_fields(find_node_with_name(nodes, op_name), index)
a_impl, a_storage_data, a_id = get_fields("aten::add", 0)
b_impl, b_storage_data, b_id = get_fields("aten::mul", 0)
@@ -2089,15 +2179,20 @@ class TestTorchTidyProfiler(TestCase):
allocations = tuple(
event.extra_fields
for event in events
- if isinstance(event.extra_fields, torch._C._profiler._ExtraFields_Allocation)
+ if isinstance(
+ event.extra_fields, torch._C._profiler._ExtraFields_Allocation
+ )
)
- return textwrap.indent("\n".join(
- f"{repr(i.id):>5}{' ' * 6}"
- f"{repr(i.allocation_id):>5}{' ' * 6}"
- f"{'Allocation' if i.alloc_size > 0 else 'Free'}"
- for i in allocations
- ), " " * 12)
+ return textwrap.indent(
+ "\n".join(
+ f"{repr(i.id):>5}{' ' * 6}"
+ f"{repr(i.allocation_id):>5}{' ' * 6}"
+ f"{'Allocation' if i.alloc_size > 0 else 'Free'}"
+ for i in allocations
+ ),
+ " " * 12,
+ )
def test_tensorimpl_invalidation_set(self) -> None:
def profiled_code(add_empty_set: bool):
@@ -2117,7 +2212,7 @@ class TestTorchTidyProfiler(TestCase):
0 1 Allocation
0 2 Allocation
0 1 Free
- 0 2 Free"""
+ 0 2 Free""",
)
self.assertExpectedInline(
@@ -2126,7 +2221,7 @@ class TestTorchTidyProfiler(TestCase):
0 1 Allocation
0 1 Free
0 2 Allocation
- 0 2 Free"""
+ 0 2 Free""",
)
def test_tensorimpl_invalidation_keep_alive(self) -> None:
@@ -2175,7 +2270,7 @@ class TestTorchTidyProfiler(TestCase):
0 6 Free
0 8 Allocation
0 7 Free
- 0 8 Free"""
+ 0 8 Free""",
)
self.assertExpectedInline(
@@ -2194,7 +2289,7 @@ class TestTorchTidyProfiler(TestCase):
0 6 Free
0 8 Allocation
0 7 Free
- 0 8 Free"""
+ 0 8 Free""",
)
def test_tensorimpl_invalidation_full(self) -> None:
@@ -2251,7 +2346,7 @@ class TestTorchTidyProfiler(TestCase):
0 12 Free
0 14 Allocation
0 13 Free
- 0 14 Free"""
+ 0 14 Free""",
)
def test_tensorimpl_invalidation_scalar_args(self) -> None:
@@ -2305,8 +2400,8 @@ class TestTorchTidyProfiler(TestCase):
20 21 Allocation
20 21 Free
19 20 Free
- 0 1 Free""")
-
+ 0 1 Free""",
+ )
def test_module_and_optimizer_ids(self) -> None:
model = torch.nn.Linear(2, 1, bias=True)
@@ -2318,15 +2413,17 @@ class TestTorchTidyProfiler(TestCase):
_ = x.sin() # Mark `x`
model(x).backward()
optimizer.step()
- _ = optimizer.state[model.weight]["momentum_buffer"].cos() # Mark weight momentum
+ _ = optimizer.state[model.weight][
+ "momentum_buffer"
+ ].cos() # Mark weight momentum
_ = model.weight.grad.tan() # Mark weight gradient
nodes = p.profiler.kineto_results.experimental_event_tree()
def get_fields(op_name, index):
return self._get_tensor_fields(
- find_node_with_name(nodes, op_name),
- index)
+ find_node_with_name(nodes, op_name), index
+ )
# Marked Tensors act as ground truth for python tracer IDs.
_, _, x_id = get_fields("aten::sin", 0)
@@ -2405,7 +2502,9 @@ class TestTorchTidyProfiler(TestCase):
self.assertIsNotNone(out[-1], name)
return out
- allocation = find_chain(["aten::rand", "aten::empty", "[memory]"])[-1].extra_fields
+ allocation = find_chain(["aten::rand", "aten::empty", "[memory]"])[
+ -1
+ ].extra_fields
_, uniform_node = find_chain(["aten::rand", "aten::uniform_"])
x_impl, x_storage_data, x_id = self._get_tensor_fields(uniform_node, 0)
@@ -2428,7 +2527,9 @@ class TestTorchTidyProfiler(TestCase):
self.assertNotEqual(allocate_new.ptr, allocation.ptr)
# Deletion when `x` goes out of scope.
- free_new = [i for i in nodes if i.tag == torch._C._profiler._EventType.Allocation][-1].extra_fields
+ free_new = [
+ i for i in nodes if i.tag == torch._C._profiler._EventType.Allocation
+ ][-1].extra_fields
self.assertIsInstance(free_new, torch._C._profiler._ExtraFields_Allocation)
self.assertEqual(free_new.id, allocate_new.id)
self.assertEqual(free_new.ptr, allocate_new.ptr)
@@ -2439,8 +2540,7 @@ class TestTorchTidyProfiler(TestCase):
def test_allocation_ids_with_other_ops(self) -> None:
x = torch.ones((1,))
self._test_allocation_ids(
- lambda: (x + 1).relu_(),
- lambda: torch.zeros((1,)).cos()
+ lambda: (x + 1).relu_(), lambda: torch.zeros((1,)).cos()
)
def test_impl_reuse(self) -> None:
@@ -2472,7 +2572,11 @@ class TestTorchTidyProfiler(TestCase):
for e in _utils.traverse_dfs(roots):
fields = e.extra_fields
if isinstance(fields, torch._C._profiler._ExtraFields_TorchOp):
- id_set |= {t.allocation_id for t in fields.inputs if isinstance(t, _TensorMetadata)}
+ id_set |= {
+ t.allocation_id
+ for t in fields.inputs
+ if isinstance(t, _TensorMetadata)
+ }
elif isinstance(fields, torch._C._profiler._ExtraFields_Allocation):
id_set.add(fields.allocation_id)
@@ -2489,18 +2593,19 @@ class TestTorchTidyProfiler(TestCase):
self.assertIsNotNone(node)
self.assertIsInstance(
- node.extra_fields,
- torch._C._profiler._ExtraFields_TorchOp)
+ node.extra_fields, torch._C._profiler._ExtraFields_TorchOp
+ )
self.assertIsInstance(
- node.parent.extra_fields,
- torch._C._profiler._ExtraFields_PyCCall)
+ node.parent.extra_fields, torch._C._profiler._ExtraFields_PyCCall
+ )
self.assertEqual(node.children[0].name, "aten::empty")
self.assertEqual(node.children[0].children[0].name, "[memory]")
self.assertIsInstance(
node.children[0].children[0].extra_fields,
- torch._C._profiler._ExtraFields_Allocation)
+ torch._C._profiler._ExtraFields_Allocation,
+ )
def test_tensor_properties(self):
x = torch.ones(10, 10).as_strided([4, 4], [12, 3])
@@ -2515,24 +2620,31 @@ class TestTorchTidyProfiler(TestCase):
self.assertIsNotNone(node)
self.assertIsInstance(
- node.extra_fields,
- torch._C._profiler._ExtraFields_TorchOp)
+ node.extra_fields, torch._C._profiler._ExtraFields_TorchOp
+ )
def getattr_inputs(name, default):
return [getattr(i, name, default) for i in node.extra_fields.inputs]
self.assertEqual(getattr_inputs("sizes", []), [[4, 4], [4, 1], []])
self.assertEqual(getattr_inputs("strides", []), [[12, 3], [1, 1], []])
- self.assertEqual(getattr_inputs("layout", None), [torch.strided, torch.strided, None])
- self.assertEqual(getattr_inputs("device", None), [torch.device("cpu"), torch.device("cpu"), None])
- self.assertEqual(getattr_inputs("dtype", None), [torch.float32, torch.float32, None])
+ self.assertEqual(
+ getattr_inputs("layout", None), [torch.strided, torch.strided, None]
+ )
+ self.assertEqual(
+ getattr_inputs("device", None),
+ [torch.device("cpu"), torch.device("cpu"), None],
+ )
+ self.assertEqual(
+ getattr_inputs("dtype", None), [torch.float32, torch.float32, None]
+ )
self.assertEqual(node.extra_fields.scope, torch.profiler.RecordScope.FUNCTION)
mul_node = find_node_with_name(nodes, "aten::mul")
self.assertIsNotNone(mul_node)
self.assertEqual(
- node.extra_fields.sequence_number + 1,
- mul_node.extra_fields.sequence_number)
+ node.extra_fields.sequence_number + 1, mul_node.extra_fields.sequence_number
+ )
def test_sparse_tensors(self):
i = [[0, 1, 1], [2, 0, 2]]
@@ -2547,18 +2659,25 @@ class TestTorchTidyProfiler(TestCase):
self.assertIsNotNone(node)
self.assertIsInstance(
- node.extra_fields,
- torch._C._profiler._ExtraFields_TorchOp)
+ node.extra_fields, torch._C._profiler._ExtraFields_TorchOp
+ )
def getattr_inputs(name, default):
return [getattr(i, name, default) for i in node.extra_fields.inputs]
self.assertEqual(getattr_inputs("sizes", []), [[2, 3], [2, 3], []])
self.assertEqual(getattr_inputs("strides", []), [[], [], []])
- self.assertEqual(getattr_inputs("layout", None), [torch.sparse_coo, torch.sparse_coo, None])
- self.assertEqual(getattr_inputs("device", None), [torch.device("cpu"), torch.device("cpu"), None])
+ self.assertEqual(
+ getattr_inputs("layout", None), [torch.sparse_coo, torch.sparse_coo, None]
+ )
+ self.assertEqual(
+ getattr_inputs("device", None),
+ [torch.device("cpu"), torch.device("cpu"), None],
+ )
- @unittest.skipIf(not torch.backends.mkldnn.is_available(), "MKL-DNN build is disabled")
+ @unittest.skipIf(
+ not torch.backends.mkldnn.is_available(), "MKL-DNN build is disabled"
+ )
def test_mkldnn_tensors(self):
x = torch.ones(4, 3).to_mkldnn()
@@ -2570,16 +2689,21 @@ class TestTorchTidyProfiler(TestCase):
self.assertIsNotNone(node)
self.assertIsInstance(
- node.extra_fields,
- torch._C._profiler._ExtraFields_TorchOp)
+ node.extra_fields, torch._C._profiler._ExtraFields_TorchOp
+ )
def getattr_inputs(name, default):
return [getattr(i, name, default) for i in node.extra_fields.inputs]
self.assertEqual(getattr_inputs("sizes", []), [[4, 3], [4, 3], []])
self.assertEqual(getattr_inputs("strides", []), [[], [], []])
- self.assertEqual(getattr_inputs("layout", None), [torch._mkldnn, torch._mkldnn, None])
- self.assertEqual(getattr_inputs("device", None), [torch.device("cpu"), torch.device("cpu"), None])
+ self.assertEqual(
+ getattr_inputs("layout", None), [torch._mkldnn, torch._mkldnn, None]
+ )
+ self.assertEqual(
+ getattr_inputs("device", None),
+ [torch.device("cpu"), torch.device("cpu"), None],
+ )
def test_scalar_ins(self):
x = torch.ones(5, 5)
@@ -2596,7 +2720,9 @@ class TestTorchTidyProfiler(TestCase):
return [getattr(i, name, default) for i in node.extra_fields.inputs]
# The second argument to the add gets promotoed to a zerodim Tensor
- self.assertEqual(getattr_inputs("dtype", None), [torch.float32, torch.float64, None])
+ self.assertEqual(
+ getattr_inputs("dtype", None), [torch.float32, torch.float64, None]
+ )
self.assertEqual(getattr_inputs("sizes", []), [[5, 5], [], []])
self.assertEqual(node.extra_fields.inputs[2], alpha)
@@ -2615,14 +2741,15 @@ class TestTorchTidyProfiler(TestCase):
self.assertEqual(x.storage().data_ptr(), inputs[0][0].storage_data_ptr)
self.assertEqual(y.storage().data_ptr(), inputs[0][1].storage_data_ptr)
-
def test_nnmodule_params(self):
-
def flat_out_extrafields(nodes, out=None):
if out is None:
out = []
for node in nodes:
- if isinstance(node.extra_fields, _ExtraFields_PyCall) and node.extra_fields.module:
+ if (
+ isinstance(node.extra_fields, _ExtraFields_PyCall)
+ and node.extra_fields.module
+ ):
if node.extra_fields.module.parameters:
out.append(node.extra_fields.module)
flat_out_extrafields(node.children, out)
@@ -2635,20 +2762,37 @@ class TestTorchTidyProfiler(TestCase):
with torch.profiler.profile(with_stack=True, profile_memory=True) as p:
_ = net(inputs)
- modules = flat_out_extrafields(p.profiler.kineto_results.experimental_event_tree())
- self.assertEqual(len(modules), 2, f"Expected two parameter list, but got {len(modules)}")
+ modules = flat_out_extrafields(
+ p.profiler.kineto_results.experimental_event_tree()
+ )
+ self.assertEqual(
+ len(modules), 2, f"Expected two parameter list, but got {len(modules)}"
+ )
- params = [(n, p.storage_data_ptr, g.storage_data_ptr) for module in modules for (n, p, g) in module.parameters]
- expected = [(name, val.storage().data_ptr(), val.grad.storage().data_ptr()) for name, val in net.fc1._parameters.items()]
- expected += [(name, val.storage().data_ptr(), val.grad.storage().data_ptr()) for name, val in net.fc2._parameters.items()]
+ params = [
+ (n, p.storage_data_ptr, g.storage_data_ptr)
+ for module in modules
+ for (n, p, g) in module.parameters
+ ]
+ expected = [
+ (name, val.storage().data_ptr(), val.grad.storage().data_ptr())
+ for name, val in net.fc1._parameters.items()
+ ]
+ expected += [
+ (name, val.storage().data_ptr(), val.grad.storage().data_ptr())
+ for name, val in net.fc2._parameters.items()
+ ]
self.assertEqual(expected, params, f"{expected} vs. {params}")
def _flat_out_extrafields(self, nodes, out=None):
if out is None:
out = []
for node in nodes:
- if (isinstance(node.extra_fields, _ExtraFields_PyCall) and
- node.extra_fields.optimizer and node.extra_fields.optimizer.parameters):
+ if (
+ isinstance(node.extra_fields, _ExtraFields_PyCall)
+ and node.extra_fields.optimizer
+ and node.extra_fields.optimizer.parameters
+ ):
# avoiding OptInfo duplicates from iterations
addr = node.extra_fields.optimizer.parameters[0][0].storage_data_ptr
if not [o for o in out if addr == o.parameters[0][0].storage_data_ptr]:
@@ -2658,13 +2802,17 @@ class TestTorchTidyProfiler(TestCase):
def _check_results(self, opt, opts, check_items=False):
self.assertEqual(len(opts), 1, f"Expected 1 optimizer: len(opts): {len(opts)}")
- self.assertEqual(id(opt), opts[0].self_ptr, f"Optimizer addr ({id(opt)}) vs. profiled addr ({opts[0].self_ptr})")
+ self.assertEqual(
+ id(opt),
+ opts[0].self_ptr,
+ f"Optimizer addr ({id(opt)}) vs. profiled addr ({opts[0].self_ptr})",
+ )
if check_items:
self.assertEqual(len(opt.param_groups), len(opts))
for group, opt_ in zip(opt.param_groups, opts):
self.assertEqual(
[(v.storage().data_ptr()) for v in group.get("params", [])],
- [(o.storage_data_ptr) for (o, _, _) in opt_.parameters]
+ [(o.storage_data_ptr) for (o, _, _) in opt_.parameters],
)
for opt_ in opts:
observed_state = {
@@ -2676,8 +2824,11 @@ class TestTorchTidyProfiler(TestCase):
# that the address recorded by the profiler is correct.
for parameter, parameter_state in opt.state.items():
self.assertEqual(
- {name: value.storage().data_ptr() for name, value in parameter_state.items()},
- observed_state.get(parameter.storage().data_ptr(), [])
+ {
+ name: value.storage().data_ptr()
+ for name, value in parameter_state.items()
+ },
+ observed_state.get(parameter.storage().data_ptr(), []),
)
def test_optimizer(self):
@@ -2691,7 +2842,13 @@ class TestTorchTidyProfiler(TestCase):
loss = torch.nn.functional.cross_entropy(out, torch.rand(2))
loss.backward()
opt.step()
- self._check_results(opt, self._flat_out_extrafields(p.profiler.kineto_results.experimental_event_tree()), False)
+ self._check_results(
+ opt,
+ self._flat_out_extrafields(
+ p.profiler.kineto_results.experimental_event_tree()
+ ),
+ False,
+ )
def _test_optimizer_parameters(self, optimizer_factory):
inputs = torch.rand(10)
@@ -2704,13 +2861,23 @@ class TestTorchTidyProfiler(TestCase):
loss = torch.nn.functional.cross_entropy(out, torch.rand(2))
loss.backward()
opt.step()
- self._check_results(opt, self._flat_out_extrafields(p.profiler.kineto_results.experimental_event_tree()), True)
+ self._check_results(
+ opt,
+ self._flat_out_extrafields(
+ p.profiler.kineto_results.experimental_event_tree()
+ ),
+ True,
+ )
def test_optimizer_parameters_sgd(self):
- self._test_optimizer_parameters(lambda params: torch.optim.SGD(params, lr=0.01, momentum=0.9))
+ self._test_optimizer_parameters(
+ lambda params: torch.optim.SGD(params, lr=0.01, momentum=0.9)
+ )
def test_optimizer_parameters_adam(self):
- self._test_optimizer_parameters(lambda params: torch.optim.Adam(params, foreach=True))
+ self._test_optimizer_parameters(
+ lambda params: torch.optim.Adam(params, foreach=True)
+ )
def test_allocations(self):
gc.collect()
@@ -2742,10 +2909,11 @@ class TestTorchTidyProfiler(TestCase):
self.assertEqual(node.extra_fields.ptr, ptr)
self.assertEqual(node.extra_fields.alloc_size, -alloc_size)
self.assertEqual(node.extra_fields.device, torch.device("cpu"))
- self.assertEqual(node.extra_fields.total_allocated, total_allocated - alloc_size)
+ self.assertEqual(
+ node.extra_fields.total_allocated, total_allocated - alloc_size
+ )
def test_refcounts(self):
-
class Sentinel:
pass
@@ -2760,7 +2928,6 @@ class TestTorchTidyProfiler(TestCase):
def inner():
_ = inner_sentinel
-
with profile(with_stack=True):
inner()
@@ -2830,6 +2997,7 @@ class MockProfilerEvent:
object.__setattr__(self, "parent", parent)
object.__setattr__(self, "children", children)
+
class MockNode:
def __init__(self, name, children) -> None:
self.name = name
@@ -2837,13 +3005,10 @@ class MockNode:
class TestExperimentalUtils(TestCase):
-
def make_tree(self) -> List[MockNode]:
tree = {
"root_0": {
- "1": {
- "2": {}
- },
+ "1": {"2": {}},
"3": {
"4": {},
"5": {},
@@ -2853,9 +3018,7 @@ class TestExperimentalUtils(TestCase):
"6": {},
"7": {},
"8": {
- "9": {
- "10": {}
- },
+ "9": {"10": {}},
},
},
}
@@ -2864,12 +3027,14 @@ class TestExperimentalUtils(TestCase):
def test_dfs(self) -> None:
self.assertEqual(
" ".join(i.name for i in _utils.traverse_dfs(self.make_tree())),
- "root_0 1 2 3 4 5 root_1 6 7 8 9 10")
+ "root_0 1 2 3 4 5 root_1 6 7 8 9 10",
+ )
def test_bfs(self) -> None:
self.assertEqual(
" ".join(i.name for i in _utils.traverse_bfs(self.make_tree())),
- "root_0 root_1 1 3 6 7 8 2 4 5 9 10")
+ "root_0 root_1 1 3 6 7 8 2 4 5 9 10",
+ )
@staticmethod
def generate_mock_profile():
@@ -2885,24 +3050,17 @@ class TestExperimentalUtils(TestCase):
MockKinetoEvent("GPU", 1100, 100, 3, 1),
MockKinetoEvent("GPU", 1200, 100, 4, 1),
MockKinetoEvent("GPU", 1300, 100, 5, 1),
- MockKinetoEvent("GPU", 1700, 100, 6, 1)
+ MockKinetoEvent("GPU", 1700, 100, 6, 1),
]
cpu_events = [
MockProfilerEvent("CPU (Before cudaLaunchKernel)", 1, 0, 100000),
- MockProfilerEvent("CPU (Before cudaLaunchKernel)", 2, 100000,
- 100000),
- MockProfilerEvent("CPU (Before cudaLaunchKernel)", 3, 200000,
- 100000),
- MockProfilerEvent("CPU (Before cudaLaunchKernel)", 4, 300000,
- 100000),
- MockProfilerEvent("CPU (After cudaLaunchKernel)", 5, 400000,
- 100000),
- MockProfilerEvent("CPU (After cudaLaunchKernel)", 6, 500000,
- 100000),
- MockProfilerEvent("CPU (After cudaLaunchKernel)", 7, 600000,
- 100000),
- MockProfilerEvent("CPU (After cudaLaunchKernel)", 8, 700000,
- 100000),
+ MockProfilerEvent("CPU (Before cudaLaunchKernel)", 2, 100000, 100000),
+ MockProfilerEvent("CPU (Before cudaLaunchKernel)", 3, 200000, 100000),
+ MockProfilerEvent("CPU (Before cudaLaunchKernel)", 4, 300000, 100000),
+ MockProfilerEvent("CPU (After cudaLaunchKernel)", 5, 400000, 100000),
+ MockProfilerEvent("CPU (After cudaLaunchKernel)", 6, 500000, 100000),
+ MockProfilerEvent("CPU (After cudaLaunchKernel)", 7, 600000, 100000),
+ MockProfilerEvent("CPU (After cudaLaunchKernel)", 8, 700000, 100000),
MockProfilerEvent("CPU (After GPU)", 9, 800000, 100000),
MockProfilerEvent("CPU (After GPU)", 10, 900000, 100000),
MockProfilerEvent("CPU (After GPU)", 11, 1100000, 100000),
@@ -2911,10 +3069,10 @@ class TestExperimentalUtils(TestCase):
profiler = unittest.mock.Mock()
profiler.kineto_results = unittest.mock.Mock()
- profiler.kineto_results.events = unittest.mock.Mock(
- return_value=cuda_events)
+ profiler.kineto_results.events = unittest.mock.Mock(return_value=cuda_events)
profiler.kineto_results.experimental_event_tree = unittest.mock.Mock(
- return_value=cpu_events)
+ return_value=cpu_events
+ )
return profiler
@staticmethod
@@ -2922,7 +3080,8 @@ class TestExperimentalUtils(TestCase):
accept = expecttest.ACCEPT
json_file_path = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
- "profiler_utils_mock_events.json")
+ "profiler_utils_mock_events.json",
+ )
if accept and torch.cuda.is_available():
def garbage_code(x):
@@ -2932,30 +3091,30 @@ class TestExperimentalUtils(TestCase):
x = torch.ones((4096, 4096), device="cuda")
x = x @ x
with profile(
- activities=[ProfilerActivity.CPU, ProfilerActivity.CUDA],
- record_shapes=True,
- with_stack=True) as prof:
+ activities=[ProfilerActivity.CPU, ProfilerActivity.CUDA],
+ record_shapes=True,
+ with_stack=True,
+ ) as prof:
for _ in range(5):
x = x @ x
garbage_code(x)
for _ in range(5):
x = x @ x
- kineto_events = [{
- '_name':
- e.name,
- '_start_ns':
- e.start_ns(),
- '_duration_ns':
- e.duration_ns(),
- '_linked_correlation_id':
- e.linked_correlation_id(),
- '_device_type':
- 1 if e.device_type() == DeviceType.CUDA else 0
- } for e in prof.profiler.kineto_results.events()]
+ kineto_events = [
+ {
+ "_name": e.name,
+ "_start_ns": e.start_ns(),
+ "_duration_ns": e.duration_ns(),
+ "_linked_correlation_id": e.linked_correlation_id(),
+ "_device_type": 1 if e.device_type() == DeviceType.CUDA else 0,
+ }
+ for e in prof.profiler.kineto_results.events()
+ ]
def EventTreeDFS(event_tree):
from collections import deque
+
stack = deque(event_tree)
while stack:
curr_event = stack.pop()
@@ -2963,27 +3122,29 @@ class TestExperimentalUtils(TestCase):
for child_event in curr_event.children:
stack.append(child_event)
- profiler_events = [{
- '_name': e.name,
- 'id': e.id,
- 'start_time_ns': e.start_time_ns,
- 'duration_time_ns': e.duration_time_ns,
- 'correlation_id': e.correlation_id,
- 'children': [child.id for child in e.children],
- 'parent': e.parent.id if e.parent else None
- } for e in EventTreeDFS(
- prof.profiler.kineto_results.experimental_event_tree())]
+ profiler_events = [
+ {
+ "_name": e.name,
+ "id": e.id,
+ "start_time_ns": e.start_time_ns,
+ "duration_time_ns": e.duration_time_ns,
+ "correlation_id": e.correlation_id,
+ "children": [child.id for child in e.children],
+ "parent": e.parent.id if e.parent else None,
+ }
+ for e in EventTreeDFS(
+ prof.profiler.kineto_results.experimental_event_tree()
+ )
+ ]
with open(json_file_path, "w") as f:
json.dump([kineto_events, profiler_events], f)
- assert (os.path.exists(json_file_path))
+ assert os.path.exists(json_file_path)
with open(json_file_path) as f:
kineto_events, profiler_events = json.load(f)
- cuda_events = [
- MockKinetoEvent(*event.values()) for event in kineto_events
- ]
+ cuda_events = [MockKinetoEvent(*event.values()) for event in kineto_events]
cpu_events = []
id_map = {}
for e in profiler_events:
@@ -2997,16 +3158,17 @@ class TestExperimentalUtils(TestCase):
cpu_events = [event for event in cpu_events if event.parent is None]
profiler = unittest.mock.Mock()
profiler.kineto_results = unittest.mock.Mock()
- profiler.kineto_results.events = unittest.mock.Mock(
- return_value=cuda_events)
+ profiler.kineto_results.events = unittest.mock.Mock(return_value=cuda_events)
profiler.kineto_results.experimental_event_tree = unittest.mock.Mock(
- return_value=cpu_events)
+ return_value=cpu_events
+ )
return profiler
def test_utils_compute_self_time(self):
with profile() as prof:
t1, t2 = torch.ones(1, requires_grad=True), torch.ones(
- 1, requires_grad=True)
+ 1, requires_grad=True
+ )
z = torch.add(t1, t2)
y = torch.ones(1)
loss = torch.nn.functional.binary_cross_entropy_with_logits(z, y)
@@ -3017,9 +3179,9 @@ class TestExperimentalUtils(TestCase):
for event_key, event_metrics in metrics.items():
self.assertEqual(
event_metrics.self_time_ns,
- event_key.event.duration_time_ns - sum(
- child.duration_time_ns
- for child in event_key.event.children))
+ event_key.event.duration_time_ns
+ - sum(child.duration_time_ns for child in event_key.event.children),
+ )
def test_utils_intervals_overlap(self):
event = _utils.EventKey(MockProfilerEvent("Event 1", 1, 5, 5))
@@ -3035,7 +3197,6 @@ class TestExperimentalUtils(TestCase):
self.assertEqual(event.intervals_overlap(intervals), 5)
def test_utils_compute_queue_depth(self):
-
def format_queue_depth(queue_depth_list, events):
res = ""
for data, event in zip(queue_depth_list, events):
@@ -3046,8 +3207,10 @@ class TestExperimentalUtils(TestCase):
profiler = self.generate_mock_profile()
basic_evaluation = _utils.BasicEvaluation(profiler)
self.assertExpectedInline(
- format_queue_depth(basic_evaluation.queue_depth_list,
- basic_evaluation.cuda_events), """\
+ format_queue_depth(
+ basic_evaluation.queue_depth_list, basic_evaluation.cuda_events
+ ),
+ """\
1 [cudaLaunchKernel]
2 [cudaLaunchKernel]
3 [cudaLaunchKernel]
@@ -3060,12 +3223,14 @@ class TestExperimentalUtils(TestCase):
0 [GPU]
1 [cudaLaunchKernel]
0 [GPU]
-""")
+""",
+ )
self.assertExpectedInline(
- format_queue_depth([
- basic_evaluation.metrics[k]
- for k in basic_evaluation.event_keys
- ], basic_evaluation.events), """\
+ format_queue_depth(
+ [basic_evaluation.metrics[k] for k in basic_evaluation.event_keys],
+ basic_evaluation.events,
+ ),
+ """\
0 [CPU (Before cudaLaunchKernel)]
0 [CPU (Before cudaLaunchKernel)]
0 [CPU (Before cudaLaunchKernel)]
@@ -3078,7 +3243,8 @@ class TestExperimentalUtils(TestCase):
4 [CPU (After GPU)]
2 [CPU (After GPU)]
1 [CPU (After GPU)]
-""")
+""",
+ )
def test_utils_compute_queue_depth_when_no_cuda_events(self):
# For traces with only cpu events, we expect empty queue depth list
@@ -3092,12 +3258,15 @@ class TestExperimentalUtils(TestCase):
def test_utils_compute_idle_time(self):
profiler = self.generate_mock_profile()
basic_evaluation = _utils.BasicEvaluation(profiler)
- expected_output = "\n".join([
- f"{basic_evaluation.metrics[event_key].idle_time_ns} [{event_key.event.name}]"
- for event_key in basic_evaluation.event_keys
- ])
+ expected_output = "\n".join(
+ [
+ f"{basic_evaluation.metrics[event_key].idle_time_ns} [{event_key.event.name}]"
+ for event_key in basic_evaluation.event_keys
+ ]
+ )
self.assertExpectedInline(
- expected_output, """\
+ expected_output,
+ """\
100000 [CPU (Before cudaLaunchKernel)]
100000 [CPU (Before cudaLaunchKernel)]
100000 [CPU (Before cudaLaunchKernel)]
@@ -3109,19 +3278,24 @@ class TestExperimentalUtils(TestCase):
0 [CPU (After GPU)]
0 [CPU (After GPU)]
0 [CPU (After GPU)]
-100000 [CPU (After GPU)]""")
+100000 [CPU (After GPU)]""",
+ )
@unittest.skipIf(IS_JETSON, "JSON not behaving as expected on Jetson")
def test_utils_get_optimizable_events(self):
basic_evaluation = _utils.BasicEvaluation(self.load_mock_profile())
optimizable_events = basic_evaluation.get_optimizable_events(
- 2, print_enable=False)
+ 2, print_enable=False
+ )
expected_output = "\n".join(
- [f"{event_key.event.name}" for event_key in optimizable_events])
+ [f"{event_key.event.name}" for event_key in optimizable_events]
+ )
self.assertExpectedInline(
- expected_output, """\
+ expected_output,
+ """\
<built-in function _cuda_synchronize>
-aten::copy_""")
+aten::copy_""",
+ )
def test_profiler_name_pattern(self):
x = torch.ones((4096, 4096))
@@ -3131,12 +3305,15 @@ aten::copy_""")
x = x + x
matched_events = NamePattern(prof, "aten::mm").matched_events()
output = "\n".join([f"{event.name}" for event in matched_events])
- self.assertExpectedInline(output, """\
+ self.assertExpectedInline(
+ output,
+ """\
aten::mm
aten::mm
aten::mm
aten::mm
-aten::mm""")
+aten::mm""",
+ )
# TODO: Add logic for CUDA version of test
@unittest.skipIf(torch.cuda.is_available(), "Test not working for CUDA")
@@ -3153,13 +3330,16 @@ aten::mm""")
child_nodes = event_tree[0].children
self.assertEqual([], pattern.siblings_of(child_nodes[0])[0])
self.assertEqual(child_nodes[1:], pattern.siblings_of(child_nodes[0])[1])
- self.assertEqual(event_tree[0],
- pattern.root_of(event_tree[0].children[0].children[0]))
+ self.assertEqual(
+ event_tree[0], pattern.root_of(event_tree[0].children[0].children[0])
+ )
self.assertEqual(None, pattern.next_of(event_tree[-1]))
self.assertEqual(event_tree[1], pattern.next_of(event_tree[0]))
self.assertEqual(event_tree[0], pattern.prev_of(event_tree[1]))
- @unittest.skipIf(TEST_WITH_CROSSREF, "crossref intercepts calls and changes the callsite.")
+ @unittest.skipIf(
+ TEST_WITH_CROSSREF, "crossref intercepts calls and changes the callsite."
+ )
@unittest.skipIf(not torch.cuda.is_available(), "CUDA is required")
def test_profiler_extra_cuda_copy_pattern(self):
cases = (
@@ -3185,8 +3365,9 @@ aten::mm""")
num_matched.append(len(pattern.matched_events()))
self.assertEqual(num_matched, [i for i, _ in cases])
- @unittest.skipIf(TEST_WITH_CROSSREF,
- "crossref intercepts calls and changes the callsite.")
+ @unittest.skipIf(
+ TEST_WITH_CROSSREF, "crossref intercepts calls and changes the callsite."
+ )
def test_profiler_for_loop_indexing_pattern(self):
x = torch.ones((100, 100))
@@ -3222,7 +3403,6 @@ aten::mm""")
num_matched.append(len(pattern.matched_events()))
self.assertEqual(num_matched, [i for i, _ in cases])
-
@unittest.skipIf(not torch.cuda.is_available(), "CUDA is required")
def test_profiler_fp32_matmul_pattern(self):
x = torch.ones((100, 100), device="cuda")
@@ -3233,7 +3413,6 @@ aten::mm""")
num_matched = len(pattern.matched_events())
self.assertEqual(num_matched, has_tf32)
-
@unittest.skipIf(not torch.cuda.is_available(), "CUDA is required")
def test_profiler_extra_cuda_copy_pattern_benchmark(self):
with profile(with_stack=True, record_shapes=True) as prof:
@@ -3264,7 +3443,9 @@ aten::mm""")
optimizer = fn()
optimizer.zero_grad()
y_hat = model(x)
- loss = torch.nn.functional.cross_entropy(y_hat, torch.randint(0, 10, (100,)))
+ loss = torch.nn.functional.cross_entropy(
+ y_hat, torch.randint(0, 10, (100,))
+ )
loss.backward()
optimizer.step()
pattern = OptimizerSingleTensorPattern(prof)
@@ -3274,7 +3455,9 @@ aten::mm""")
def test_profiler_synchronized_dataloader_pattern(self):
dataset = torch.rand((100, 100))
sync_dataloader = torch.utils.data.DataLoader(dataset, batch_size=10)
- async_dataloader = torch.utils.data.DataLoader(dataset, batch_size=10, num_workers=4)
+ async_dataloader = torch.utils.data.DataLoader(
+ dataset, batch_size=10, num_workers=4
+ )
with profile(with_stack=True) as prof:
next(iter(sync_dataloader))
next(iter(async_dataloader))
@@ -3282,7 +3465,9 @@ aten::mm""")
num_matched = len(pattern.matched_events())
self.assertEqual(num_matched, 1)
- @skipIfTorchDynamo("pattern checks for aten::_zero op which might not be there with torch.compile'd graph")
+ @skipIfTorchDynamo(
+ "pattern checks for aten::_zero op which might not be there with torch.compile'd graph"
+ )
def test_profiler_grad_not_set_to_none_pattern(self):
x = torch.ones((100, 100))
model = nn.Sequential(
@@ -3295,13 +3480,15 @@ aten::mm""")
(0, lambda: optimizer.zero_grad()),
(0, lambda: model.zero_grad()),
(1, lambda: optimizer.zero_grad(set_to_none=False)),
- (1, lambda: model.zero_grad(set_to_none=False))
+ (1, lambda: model.zero_grad(set_to_none=False)),
)
num_matched = []
for _, fn in cases:
with profile(with_stack=True) as prof:
y_hat = model(x)
- loss = torch.nn.functional.cross_entropy(y_hat, torch.randint(0, 10, (100,)))
+ loss = torch.nn.functional.cross_entropy(
+ y_hat, torch.randint(0, 10, (100,))
+ )
loss.backward()
optimizer.step()
fn()
@@ -3314,7 +3501,7 @@ aten::mm""")
cases = (
(1, nn.Sequential(nn.Conv2d(3, 3, 3, 1, 1), nn.BatchNorm2d(3))),
(0, nn.Sequential(nn.Conv2d(3, 3, 3, 1, 1, bias=False), nn.BatchNorm2d(3))),
- (0, nn.Sequential(nn.Conv2d(3, 3, 3, 1, 1)))
+ (0, nn.Sequential(nn.Conv2d(3, 3, 3, 1, 1))),
)
num_matched = []
for _, model in cases:
@@ -3324,14 +3511,13 @@ aten::mm""")
num_matched.append(len(pattern.matched_events()))
self.assertEqual(num_matched, [i for i, _ in cases])
-
@unittest.skipIf(not torch.cuda.is_available(), "CUDA is required")
def test_profiler_matmul_dim_fp16_pattern(self):
cases = (
- (1, torch.randn((201, 201), device='cuda', dtype=torch.float16)),
- (1, torch.randn((3, 97, 97), device='cuda', dtype=torch.float16)),
- (0, torch.randn((200, 200), device='cuda', dtype=torch.float16)),
- (0, torch.randn((3, 200, 200), device='cuda', dtype=torch.float16))
+ (1, torch.randn((201, 201), device="cuda", dtype=torch.float16)),
+ (1, torch.randn((3, 97, 97), device="cuda", dtype=torch.float16)),
+ (0, torch.randn((200, 200), device="cuda", dtype=torch.float16)),
+ (0, torch.randn((3, 200, 200), device="cuda", dtype=torch.float16)),
)
num_matched = []
for _, x in cases:
@@ -3351,7 +3537,9 @@ aten::mm""")
optimizer = torch.optim.Adam(model.parameters())
with profile(with_stack=True, record_shapes=True) as prof:
y_hat = model(x)
- loss = torch.nn.functional.cross_entropy(y_hat, torch.randint(0, 10, (100,)))
+ loss = torch.nn.functional.cross_entropy(
+ y_hat, torch.randint(0, 10, (100,))
+ )
loss.backward()
optimizer.step()
optimizer.zero_grad()
@@ -3373,5 +3561,6 @@ aten::mm""")
finally:
os.remove("torchtidy_report.json")
-if __name__ == '__main__':
+
+if __name__ == "__main__":
run_tests()
diff --git a/test/profiler/test_profiler_tree.py b/test/profiler/test_profiler_tree.py
index df9bfd7f2d..b5448025d1 100644
--- a/test/profiler/test_profiler_tree.py
+++ b/test/profiler/test_profiler_tree.py
@@ -12,7 +12,13 @@ import expecttest
import torch
from torch._C._profiler import _ExtraFields_PyCall, _ExtraFields_PyCCall
from torch.testing._internal.common_utils import (
- TestCase, run_tests, IS_WINDOWS, TEST_WITH_CROSSREF, IS_ARM64, skipIfTorchDynamo)
+ IS_ARM64,
+ IS_WINDOWS,
+ run_tests,
+ skipIfTorchDynamo,
+ TEST_WITH_CROSSREF,
+ TestCase,
+)
from torch.utils._pytree import tree_map
# These functions can vary from based on platform and build (e.g. with CUDA)
@@ -28,7 +34,6 @@ PRUNE_FUNCTIONS = {
"torch/profiler/profiler.py(...): _transit_action": KEEP_ELLIPSES,
"<built-in method __exit__ of torch._C.DisableTorchFunctionSubclass object at 0xXXXXXXXXXXXX>": PRUNE_ALL,
"cudaStreamIsCapturing": PRUNE_ALL,
-
# These show up only on CUDA, prune them so the CUDA and CPU expected results can be the same
"cudaGetDeviceCount": PRUNE_ALL,
"cudaGetDeviceProperties_v2": PRUNE_ALL,
@@ -46,14 +51,12 @@ ALLOW_CUDA_FAILURE = (torch.version.hip is not None) or IS_WINDOWS
class TorchFunctionTensor(torch.Tensor):
-
@classmethod
def __torch_function__(cls, func, types, args=(), kwargs=None):
return super().__torch_function__(func, types, args, kwargs)
class TorchDispatchTensor(torch.Tensor):
-
@staticmethod
def __new__(cls, elem):
t = torch.Tensor._make_subclass(cls, elem, elem.requires_grad)
@@ -62,7 +65,6 @@ class TorchDispatchTensor(torch.Tensor):
@classmethod
def __torch_dispatch__(cls, func, types, args=(), kwargs=None):
-
def unwrap(x):
return x.elem if isinstance(x, TorchDispatchTensor) else x
@@ -76,7 +78,6 @@ class TorchDispatchTensor(torch.Tensor):
class ProfilerTree:
-
@staticmethod
def test(f):
"""Mark unit test that will be using ProfilerTree to test traces.
@@ -99,11 +100,11 @@ class ProfilerTree:
return out
finally:
delattr(self, "tree_replicate")
+
return begin_unit_test_marker
@classmethod
def format(cls, profiler, indent: int = 0):
-
def flatten(nodes, depth=0, out=None):
if out is None:
out = []
@@ -140,10 +141,19 @@ class ProfilerTree:
if flat_nodes and flat_nodes[-1][1] == "hipDeviceSynchronize":
flat_nodes = flat_nodes[:-1]
- min_depth = min([d + 1 for d, name in flat_nodes if "begin_unit_test_marker" in name] or [0])
+ min_depth = min(
+ [d + 1 for d, name in flat_nodes if "begin_unit_test_marker" in name] or [0]
+ )
return textwrap.indent(
- "\n".join([f"{' ' * (d - min_depth)}{name.rstrip()}" for d, name in flat_nodes if d >= min_depth]),
- " " * indent)
+ "\n".join(
+ [
+ f"{' ' * (d - min_depth)}{name.rstrip()}"
+ for d, name in flat_nodes
+ if d >= min_depth
+ ]
+ ),
+ " " * indent,
+ )
@staticmethod
def fmt_name(name: str) -> str:
@@ -172,18 +182,15 @@ class ProfilerTree:
"void at::native::reduce_kernel",
"void at::native::vectorized_elementwise_kernel",
"void at::native::unrolled_elementwise_kernel",
-
r"void [a-zA-Z0-9]+_kernel", # Nvidia kernels.
):
name = re.sub(
rf"{kernel_pattern}<.+>\(.+\)$",
f"{kernel_pattern.replace('[a-zA-Z0-9]+', '...')}<...>(...)",
- name)
+ name,
+ )
- return re.sub(
- "object at 0x[0-9a-fA-F]+>",
- "object at 0xXXXXXXXXXXXX>",
- name)
+ return re.sub("object at 0x[0-9a-fA-F]+>", "object at 0xXXXXXXXXXXXX>", name)
@classmethod
def validate_node(cls, node):
@@ -205,6 +212,7 @@ class ProfilerTree:
caller_name = to_string(extra_fields.caller)
assert parent_name == caller_name, f"{parent_name} vs. {caller_name}"
+
@unittest.skipIf(IS_ARM64, "Not working on ARM")
class TestProfilerTree(TestCase):
def assertTreesMatch(self, actual: str, expected: str, allow_failure: bool = False):
@@ -228,7 +236,9 @@ class TestProfilerTree(TestCase):
self.maxDiff = None
replicate = getattr(self, "tree_replicate", None)
- self.assertIsNotNone(replicate, "Please annotate test with `@ProfilerTree.test`")
+ self.assertIsNotNone(
+ replicate, "Please annotate test with `@ProfilerTree.test`"
+ )
# The profiler should produce deterministic results and should return
# to a clean state after each run. As a result, only the first
@@ -299,7 +309,7 @@ class TestProfilerTree(TestCase):
autograd::engine::evaluate_function: torch::autograd::AccumulateGrad
torch::autograd::AccumulateGrad
aten::detach
- detach"""
+ detach""",
)
# TODO: Add logic for CUDA version of test
@@ -313,7 +323,9 @@ class TestProfilerTree(TestCase):
# Check that we correctly handle the case when a user
# annotation does not call `__exit__`.
- _ = torch.autograd.profiler.record_function("Second Annotation").__enter__()
+ _ = torch.autograd.profiler.record_function(
+ "Second Annotation"
+ ).__enter__()
y = x + 1
with torch.autograd.profiler.record_function("Third Annotation"):
@@ -347,7 +359,7 @@ class TestProfilerTree(TestCase):
torch::autograd::AccumulateGrad
aten::new_empty_strided
aten::empty_strided
- aten::copy_"""
+ aten::copy_""",
)
# TODO: Add logic for CUDA version of test
@@ -421,10 +433,12 @@ class TestProfilerTree(TestCase):
torch::autograd::AccumulateGrad
aten::detach
detach
- [memory]"""
+ [memory]""",
)
- @unittest.skipIf(TEST_WITH_CROSSREF, "crossref intercepts calls and changes the callsite.")
+ @unittest.skipIf(
+ TEST_WITH_CROSSREF, "crossref intercepts calls and changes the callsite."
+ )
@ProfilerTree.test
def test_profiler_experimental_tree_with_memory_and_stack(self):
t1, t2 = torch.ones(1, requires_grad=True), torch.ones(1, requires_grad=True)
@@ -519,11 +533,13 @@ class TestProfilerTree(TestCase):
[memory]
torch/profiler/profiler.py(...): __exit__
torch/profiler/profiler.py(...): stop
- ..."""
+ ...""",
)
@skipIfTorchDynamo("too slow")
- @unittest.skipIf(TEST_WITH_CROSSREF, "crossref intercepts calls and changes the callsite.")
+ @unittest.skipIf(
+ TEST_WITH_CROSSREF, "crossref intercepts calls and changes the callsite."
+ )
@ProfilerTree.test
def test_profiler_experimental_tree_with_stack_and_modules(self):
class MyModule(torch.nn.Module):
@@ -647,10 +663,12 @@ class TestProfilerTree(TestCase):
aten::clamp_min
torch/profiler/profiler.py(...): __exit__
torch/profiler/profiler.py(...): stop
- ..."""
+ ...""",
)
- @unittest.skipIf(TEST_WITH_CROSSREF, "crossref intercepts calls and changes the callsite.")
+ @unittest.skipIf(
+ TEST_WITH_CROSSREF, "crossref intercepts calls and changes the callsite."
+ )
@ProfilerTree.test
def test_profiler_experimental_tree_with_stack_and_torch_function(self):
x = TorchFunctionTensor(torch.ones((1,)))
@@ -686,10 +704,12 @@ class TestProfilerTree(TestCase):
<built-in function isinstance>
torch/profiler/profiler.py(...): __exit__
torch/profiler/profiler.py(...): stop
- ..."""
+ ...""",
)
- @unittest.skipIf(TEST_WITH_CROSSREF, "crossref intercepts calls and changes the callsite.")
+ @unittest.skipIf(
+ TEST_WITH_CROSSREF, "crossref intercepts calls and changes the callsite."
+ )
@ProfilerTree.test
def test_profiler_experimental_tree_with_stack_and_torch_dispatch(self):
x = TorchDispatchTensor(torch.ones((1,)))
@@ -717,7 +737,8 @@ class TestProfilerTree(TestCase):
...
torch/profiler/profiler.py(...): __exit__
torch/profiler/profiler.py(...): stop
- ...""")
+ ...""",
+ )
@unittest.skip("https://github.com/pytorch/pytorch/issues/83606")
@unittest.skipIf(not torch.cuda.is_available(), "CUDA is required")
@@ -875,7 +896,9 @@ class TestProfilerTree(TestCase):
)
@unittest.skip("https://github.com/pytorch/pytorch/issues/83606")
- @unittest.skipIf(TEST_WITH_CROSSREF, "crossref intercepts calls and changes the callsite.")
+ @unittest.skipIf(
+ TEST_WITH_CROSSREF, "crossref intercepts calls and changes the callsite."
+ )
@unittest.skipIf(not torch.cuda.is_available(), "CUDA is required")
@ProfilerTree.test
def test_profiler_experimental_tree_cuda_detailed(self):
@@ -1071,5 +1094,5 @@ class TestProfilerTree(TestCase):
)
-if __name__ == '__main__':
+if __name__ == "__main__":
run_tests()
|
2.41.0
|
21c397e2ecc14c09bf57892eb55dd6f7dca80b8
|
Sun, 14 Apr 2024 08:20:04 -0700
|
[PATCH 0156/1000] Use NEON to speedup `int8pack_mm` on aarch64 (#124023)
|
Just vectorizing innter loop as follows: ```cpp float32x4_t c_val = vdupq_n_f32(0.0); for (int k = 0; k < K; k += 8) { float16x8_t a_val = vld1q_f16(reinterpret_cast<const float16_t *>(A) + m * lda + k); int16x8_t b_val = vmovl_s8(vld1_s8(B + n * ldb + k)); auto a_val_low = vcvt_f32_f16(vget_low_f16(a_val)); auto a_val_high = vcvt_f32_f16(vget_high_f16(a_val)); auto b_val_low = vcvtq_f32_s32(vmovl_s16(vget_low_s16(b_val))); auto b_val_high = vcvtq_f32_s32(vmovl_s16(vget_high_s16(b_val))); c_val = vaddq_f32(c_val, vmulq_f32(a_val_low, b_val_low)); c_val = vaddq_f32(c_val, vmulq_f32(a_val_high, b_val_high)); } float scale_val = static_cast<float>(scales[n]); C[m * ldc + n] = reduce(c_val) * scale_val; ``` Which bumps perf from 35 to 58 tokens per second (65% perf gain). Unrolling both inner and outer loops bumps perf to 64 tokens per sec (i.e. another 10% gain) Before/after performance running stories110M on M2Pro | eager (before) | eager (after) | compile(before) | compile (after) | | ---- | --- | -- | -- | | 35 | 64 | 56 | 132 | Pull Request resolved: https://github.com/pytorch/pytorch/pull/124023 Approved by: https://github.com/mikekgfb ghstack dependencies: #124022
|
diff --git a/aten/src/ATen/native/cpu/int8mm_kernel.cpp b/aten/src/ATen/native/cpu/int8mm_kernel.cpp
index 3645bae3a6..37495a08f4 100644
--- a/aten/src/ATen/native/cpu/int8mm_kernel.cpp
+++ b/aten/src/ATen/native/cpu/int8mm_kernel.cpp
@@ -182,6 +182,51 @@ inline void tinygemm_kernel(
#endif
+#if !defined(C10_MOBILE) && defined(__aarch64__)
+#include <arm_neon.h>
+
+static inline float reduce(float32x4_t x) {
+ auto sum = vpaddq_f32(x, x);
+ return vgetq_lane_f32(vpaddq_f32(sum, sum), 0);
+}
+
+template <int BLOCK_M, int BLOCK_N>
+inline void tinygemm_kernel(
+ const Half* RESTRICT A,
+ const int8_t* RESTRICT B,
+ const Half* RESTRICT scales,
+ Half* RESTRICT C,
+ int lda,
+ int ldb,
+ int ldc,
+ int K) {
+
+ for (const auto m : c10::irange(BLOCK_M)) {
+ float32x4_t c_val[BLOCK_N];
+ c10::ForcedUnroll<BLOCK_N>{}([&](auto i) {
+ c_val[i] = vdupq_n_f32(0.0);
+ });
+ for (int k = 0; k < K; k += 8) {
+ float16x8_t a_val = vld1q_f16(reinterpret_cast<const float16_t *>(A) + m * lda + k);
+ auto a_val_low = vcvt_f32_f16(vget_low_f16(a_val));
+ auto a_val_high = vcvt_f32_f16(vget_high_f16(a_val));
+ c10::ForcedUnroll<BLOCK_N>{}([&](auto i) {
+ int16x8_t b_val = vmovl_s8(vld1_s8(B + i * ldb + k));
+ auto b_val_low = vcvtq_f32_s32(vmovl_s16(vget_low_s16(b_val)));
+ auto b_val_high = vcvtq_f32_s32(vmovl_s16(vget_high_s16(b_val)));
+ c_val[i] = vfmaq_f32(c_val[i], a_val_high, b_val_high);
+ c_val[i] = vfmaq_f32(c_val[i], a_val_low, b_val_low);
+ });
+ }
+
+ float32x4_t scale_val = vcvt_f32_f16(vld1_f16(reinterpret_cast<const float16_t *>(scales)));
+ c10::ForcedUnroll<BLOCK_N>{}([&](auto i) {
+ C[m * ldc + i] = reduce(c_val[i]) * vgetq_lane_f32(scale_val, i);
+ });
+ }
+}
+#endif
+
// non-vectorized version
template <int BLOCK_M, int BLOCK_N, typename T>
inline void tinygemm_kernel(
|
2.41.0
|
b6f6270d6b77726ef0d17c01486593058d12453
|
Sun, 14 Apr 2024 23:13:28 -0700
|
[PATCH 0157/1000] [inductor] comprehensive padding (#120758)
|
This PR adds the ability to pad tensor strides during lowering. The goal is to make sure (if possible) tensors with bad shape can have aligned strides so GPU can access the memory more efficiently. By testing BlenderbotSmallForConditionalGeneration I already see 2.5ms speedup. Pull Request resolved: https://github.com/pytorch/pytorch/pull/120758 Approved by: https://github.com/jansel
|
diff --git a/test/inductor/test_padding.py b/test/inductor/test_padding.py
new file mode 100644
index 0000000000..2270c33291
--- /dev/null
+++ b/test/inductor/test_padding.py
@@ -0,0 +1,639 @@
+# Owner(s): ["module: inductor"]
+import copy
+
+import functools
+import os
+import unittest
+
+import torch
+from torch import nn, Tensor
+from torch._dynamo.test_case import run_tests, TestCase
+from torch._dynamo.testing import rand_strided, reduce_to_scalar_loss
+from torch._dynamo.utils import maybe_cprofile
+from torch._inductor import config, ir, metrics
+from torch._inductor.fx_passes import pad_mm as pad_mm_pass
+from torch._inductor.utils import do_bench, run_and_get_code
+from torch.testing._internal.inductor_utils import HAS_CUDA
+
+DO_PERF_TEST = os.environ.get("DO_PERF_TEST") == "1"
+DO_ACC_TEST = os.environ.get("DO_ACC_TEST", "1") == "1"
+WITH_STACK = os.environ.get("WITH_STACK") == "1"
+USE_CUDA_GRAPHS = os.environ.get("USE_CUDA_GRAPHS", "1") == "1"
+
+try:
+ import transformers # noqa: F401
+
+ HAS_TRANSFORMER = True
+except ImportError:
+ HAS_TRANSFORMER = False
+
+
+def get_optim(m):
+ return torch.optim.Adam(m.parameters(), lr=0.01, capturable=True, foreach=True)
+
+
+def gen_transformer_inputs(vocab_size, bs, seq_length):
+ def geninp():
+ return torch.randint(
+ 0, vocab_size, (bs, seq_length), dtype=torch.int64, requires_grad=False
+ )
+
+ input_dict = {"input_ids": geninp(), "labels": geninp()}
+ return input_dict
+
+
+class LinearAndSoftmax(nn.Module):
+ """
+ It's very common that a transformer model will do a matmul and then
+ softmax/log_softmax in the end.
+
+ Creating this toy model to capture the pattern and make sure we do
+ proper padding.
+ """
+
+ def __init__(self, vocab_size=30523, bias=True):
+ """
+ The default vocab size for BertForMaskedLM is 30522.
+ We run a few test cases with good or bad vocab_size around Bert's
+ default value.
+ """
+ super().__init__()
+ self.vocab_size = vocab_size
+ self.linear = nn.Linear(768, vocab_size, bias=bias)
+ self.ce = nn.CrossEntropyLoss()
+
+ def forward(self, x, label):
+ x = self.linear(x)
+ return self.ce(x.view(-1, self.vocab_size), label.view(-1))
+
+ def get_example_inputs(self, batch_size=16):
+ return torch.randn(batch_size, 512, 768), torch.randint(
+ 0, self.vocab_size, (batch_size, 512)
+ )
+
+
+def forward_and_backward_pass(m, inputs):
+ m(*inputs).sum().backward()
+
+
+@config.patch(
+ {
+ "benchmark_kernel": True,
+ "triton.unique_kernel_names": True,
+ "triton.cudagraphs": USE_CUDA_GRAPHS,
+ }
+)
+class TestCaseBase(TestCase):
+ def check_close(self, ref, act, tol=1e-3):
+ if type(ref).__name__ == "LongformerMaskedLMOutput":
+ ref = ref.loss
+ act = act.loss
+ if type(ref).__name__ == "SequenceClassifierOutput":
+ ref = ref.logits
+ act = act.logits
+ if isinstance(ref, dict) and "loss" in ref:
+ ref = ref["loss"]
+ act = act["loss"]
+ self.assertTrue(
+ torch.allclose(ref, act, atol=tol, rtol=tol), f"ref:\n{ref}\nact:\n{act}"
+ )
+
+ def common_numeric_check(self, f, *args, tol=1e-3, **kwargs):
+ ref = f(*args, **kwargs)
+ opt_f = torch.compile(f)
+ act = opt_f(*args, **kwargs)
+ self.check_close(ref, act, tol)
+
+ def do_profiling(
+ self,
+ f_lhs,
+ f_rhs,
+ tag_lhs="With padding",
+ tag_rhs="Without padding",
+ args=(),
+ kwargs=None,
+ ):
+ if kwargs is None:
+ kwargs = {}
+ torch.cuda.synchronize()
+ with torch.profiler.profile(with_stack=WITH_STACK) as p:
+ niter = 3
+ for _ in range(niter):
+ with torch.profiler.record_function(tag_lhs):
+ f_lhs(*args, **kwargs)
+
+ with torch.profiler.record_function(tag_rhs):
+ f_rhs(*args, **kwargs)
+ torch.cuda.synchronize()
+
+ profile_path = "/tmp/chrome.json"
+ p.export_chrome_trace(profile_path)
+ print(f"Chrome trace is written to {profile_path}")
+
+
+class PerfTestBetweenGoodAndBadShape(TestCaseBase):
+ @unittest.skipIf(not DO_PERF_TEST, "Perf test not enabled")
+ def test_nobias_LinearAndSoftmax_both_shapes(self):
+ self.test_LinearAndSoftmax_both_shapes(bias=False)
+
+ @unittest.skipIf(not DO_PERF_TEST, "Perf test not enabled")
+ def test_LinearAndSoftmax_both_shapes(self, bias=True):
+ """
+ Compare the perf with good and bad shape.
+ """
+ m_bad_shape = LinearAndSoftmax(vocab_size=30523, bias=bias)
+ inptus_bad_shape = m_bad_shape.get_example_inputs()
+ m_good_shape = LinearAndSoftmax(vocab_size=30528, bias=bias)
+ inputs_good_shape = m_good_shape.get_example_inputs()
+
+ m_bad_shape_opt = torch.compile(m_bad_shape)
+ m_good_shape_opt = torch.compile(m_good_shape)
+
+ latency_good_shape = do_bench(
+ lambda: forward_and_backward_pass(m_good_shape_opt, inputs_good_shape)
+ )
+ latency_bad_shape = do_bench(
+ lambda: forward_and_backward_pass(m_bad_shape_opt, inptus_bad_shape)
+ )
+ print(
+ f"Latency for good shape v.s. bad shape: {latency_good_shape:.3f}ms v.s. {latency_bad_shape:.3f}ms"
+ )
+
+ @unittest.skipIf(not DO_PERF_TEST or not HAS_TRANSFORMER, "Perf test not enabled")
+ def test_BertForMaskedLM(self, num_layers=1):
+ """
+ Compare the perf between doing padding and good shape.
+ """
+ from transformers import BertForMaskedLM
+
+ config_cls = BertForMaskedLM.config_class
+ bs = 16
+ seq_length = 512
+
+ def create_model(vocab_size):
+ config = config_cls()
+ config.num_hidden_layers = num_layers
+ config.vocab_size = vocab_size
+ inputs = gen_transformer_inputs(config.vocab_size, bs, seq_length)
+ model = BertForMaskedLM(config)
+
+ optim = get_optim(model)
+
+ def f(**inputs):
+ optim.zero_grad(True)
+ with torch.cuda.amp.autocast():
+ pred = model(**inputs)
+ loss = pred[0]
+ loss.backward()
+ optim.step()
+
+ return torch.compile(f), inputs
+
+ f_good_shape, inputs_good_shape = create_model(30528)
+ f_bad_shape, inputs_bad_shape = create_model(30522)
+
+ print("benchmark for good shape")
+ latency_good_shape = do_bench(lambda: f_good_shape(**inputs_good_shape))
+ print("benchmark for bad shape")
+ latency_bad_shape = do_bench(lambda: f_bad_shape(**inputs_bad_shape))
+ print(
+ f"Latency with good and bad shape: {latency_good_shape:.3f} v.s. {latency_bad_shape:.3f}"
+ )
+
+ self.do_profiling(
+ lambda: f_good_shape(**inputs_good_shape),
+ lambda: f_bad_shape(**inputs_bad_shape),
+ tag_lhs="With good shape",
+ tag_rhs="With bad shape",
+ )
+
+
+class PerfTestWithAndWithoutPadding(TestCaseBase):
+ @maybe_cprofile
+ def run_acc_and_perf_test(self, model, inputs, perf_inputs=None, tol=1e-3):
+ """
+ Run accuracy test.
+
+ Also compare the perf with and without the comprehensive padding if
+ DO_PERF_TEST is true.
+ """
+ if perf_inputs is None:
+ perf_inputs = inputs
+
+ def _process_inputs(x):
+ """
+ return args and kwargs
+ """
+ if isinstance(x, dict):
+ return [], x
+
+ if not isinstance(inputs, (tuple, list)):
+ x = [x]
+
+ return x, {}
+
+ args, kwargs = _process_inputs(inputs)
+ perf_args, perf_kwargs = _process_inputs(perf_inputs)
+
+ if DO_ACC_TEST:
+ model.eval()
+ self.common_numeric_check(model, *args, **kwargs, tol=tol)
+ else:
+ print("Accuracy test skipped")
+
+ model.train()
+
+ if DO_PERF_TEST:
+ print("Do performance test")
+
+ def get_f(m, optim):
+ def f(*args, **kwargs):
+ optim.zero_grad(True)
+ with torch.cuda.amp.autocast():
+ pred = m(*args, **kwargs)
+ loss = reduce_to_scalar_loss(pred)
+ loss.backward()
+ optim.step()
+
+ return f
+
+ latency_with_padding = None
+ print("Benchmark with padding")
+ with config.patch(comprehensive_padding=True):
+ m_copy_with_padding = copy.deepcopy(model)
+ optim_with_padding = get_optim(m_copy_with_padding)
+ opt_f_with_padding = torch.compile(
+ get_f(m_copy_with_padding, optim_with_padding)
+ )
+ latency_with_padding = do_bench(
+ lambda: opt_f_with_padding(*perf_args, **perf_kwargs)
+ )
+ latency_without_padding = None
+ print("bencmark without padding")
+ with config.patch(comprehensive_padding=False):
+ m_copy_without_padding = copy.deepcopy(model)
+ optim_without_padding = get_optim(m_copy_without_padding)
+ opt_f_without_padding = torch.compile(
+ get_f(m_copy_without_padding, optim_without_padding)
+ )
+ latency_without_padding = do_bench(
+ lambda: opt_f_without_padding(*perf_args, **perf_kwargs)
+ )
+ print(
+ f"Latency with and without padding: {latency_with_padding:.3f} v.s. {latency_without_padding:.3f}"
+ )
+
+ # profiling
+ self.do_profiling(
+ opt_f_with_padding,
+ opt_f_without_padding,
+ args=perf_args,
+ kwargs=perf_kwargs,
+ )
+
+ def test_nvidia_deeprecommender(self):
+ """
+ Compared the perf with and without comprehensive padding.
+ """
+ layer_sizes = [197951, 512, 512, 1024, 512, 512, 197951]
+ x = torch.randn(4, layer_sizes[0])
+
+ class Model(nn.Module):
+ def __init__(self):
+ super().__init__()
+ mod_list = []
+ for i in range(len(layer_sizes) - 1):
+ mod_list.append(nn.Linear(layer_sizes[i], layer_sizes[i + 1]))
+ mod_list.append(nn.SELU())
+
+ if i == 2:
+ mod_list.append(nn.Dropout(0.8))
+ self.seq = nn.Sequential(*mod_list)
+
+ def forward(self, x):
+ return self.seq(x)
+
+ m = Model()
+ perf_inputs = torch.randn(256, layer_sizes[0])
+ self.run_acc_and_perf_test(m, x, perf_inputs)
+
+ @unittest.skipIf(not DO_PERF_TEST or not HAS_TRANSFORMER, "Perf test not enabled")
+ def test_longformer(self, bs=4):
+ from transformers import AutoConfig, AutoModelForMaskedLM
+
+ config = AutoConfig.from_pretrained("allenai/longformer-base-4096")
+ model = AutoModelForMaskedLM.from_config(config)
+
+ vocab_size = model.config.vocab_size
+ seq_length = 1024
+ input_dict = gen_transformer_inputs(vocab_size, bs, seq_length)
+
+ self.run_acc_and_perf_test(model, input_dict)
+
+ @unittest.skipIf(not DO_PERF_TEST or not HAS_TRANSFORMER, "Perf test not enabled")
+ def test_longformer_small_bs(self):
+ """
+ The model exists in both HF and TB. In TB it uses a samller batch size.
+ """
+ self.test_longformer(bs=2)
+
+
+class PaddingTest(TestCaseBase):
+ @unittest.skipIf(not DO_PERF_TEST, "Perf test not enabled")
+ def test_mm_padding_perf(self):
+ def naive_mm(a, b):
+ return a @ b
+
+ def _compute_padding(s, align):
+ return (s + align - 1) // align * align - s
+
+ @torch.compile
+ def pad_mm(a, b, align=16):
+ """
+ NOTE: this function only pad a single dimension which is good
+ enough for testing.
+ """
+ m_padding = _compute_padding(a.size(0), align)
+ k_padding = _compute_padding(a.size(1), align)
+ n_padding = _compute_padding(b.size(1), align)
+ return pad_mm_pass.pad_mm(a, b, m_padding, k_padding, n_padding)
+
+ for M, K, N, f in (
+ (8192, 768, 30523, naive_mm),
+ (8192, 768, 30523, pad_mm),
+ (8192, 768, 30528, naive_mm),
+ (30523, 8192, 768, naive_mm),
+ (30528, 8192, 768, naive_mm),
+ ):
+ a = torch.randn(M, K)
+ b = torch.randn(K, N)
+ ms = do_bench(lambda: f(a, b))
+ print(f"MxKxN {M}x{K}x{N} {f.__name__}: {ms:.3f}ms")
+
+ @unittest.skipIf(not DO_PERF_TEST, "Perf test not enabled")
+ def test_padmm(self):
+ """
+ Latency between origional matmul and padded matmul: 2.717 v.s. 2.356
+ """
+ mat1_pad = torch.randn(8192, 30522, dtype=torch.float16)
+ mat2_pad = torch.randn(30522, 768, dtype=torch.float16)
+
+ def f():
+ return mat1_pad @ mat2_pad
+
+ def pad_dim(x: Tensor, padded_length: int, dim: int) -> Tensor:
+ pad = x.new_zeros(*x.shape[:dim], padded_length, *x.shape[dim + 1 :])
+ return torch.cat([x, pad], dim=dim)
+
+ @torch.compile(fullgraph=True, options={"triton.cudagraphs": False})
+ def g():
+ mat1 = mat1_pad
+ mat2 = mat2_pad
+ mat1 = pad_dim(mat1, 6, 1)
+ mat2 = pad_dim(mat2, 6, 0)
+ return torch.ops.aten.mm(mat1, mat2)
+
+ ori_time = do_bench(f)
+ pad_time = do_bench(g)
+
+ print(
+ f"Latency between origional matmul and padded matmul: {ori_time:.3f} v.s. {pad_time:.3f}"
+ )
+ self.do_profiling(f, g, "No MM Padding", "With mm padding")
+
+ @unittest.skipIf(not DO_PERF_TEST, "Perf test not enabled")
+ def test_matmul(self):
+ """
+ Latency with good and bad shapes: 1.705 v.s. 2.625
+ """
+ x_good_shape = torch.randn(8192, 30528, dtype=torch.float16)
+ weight_good_shape = torch.randn(30528, 768, dtype=torch.float16)
+ out_good_shape = torch.randn(8192, 768, dtype=torch.float16)
+
+ # Using stride (30522, 1) does not make a difference here.
+ x_bad_shape = rand_strided(
+ (8192, 30522), (30528, 1), device="cuda", dtype=torch.float16
+ )
+ weight_bad_shape = torch.randn(30522, 768, dtype=torch.float16)
+ out_bad_shape = torch.randn(8192, 768, dtype=torch.float16)
+
+ def f(x, weight, out):
+ torch.mm(x, weight, out=out)
+ return out
+
+ f1 = torch.compile(
+ functools.partial(f, x_good_shape, weight_good_shape, out_good_shape)
+ )
+ f2 = torch.compile(
+ functools.partial(f, x_bad_shape, weight_bad_shape, out_bad_shape)
+ )
+ latency_good_shape = do_bench(f1)
+ latency_bad_shape = do_bench(f2)
+ print(
+ f"Latency with good and bad shapes: {latency_good_shape:.3f} v.s. {latency_bad_shape:.3f}"
+ )
+ self.do_profiling(f1, f2)
+
+ def test_nobias_LinearAndSoftmax_codegen(self):
+ self.test_LinearAndSoftmax_codegen(bias=False)
+
+ def test_LinearAndSoftmax_codegen(self, bias=True):
+ m_bad_shape = LinearAndSoftmax(vocab_size=30523, bias=bias)
+ inputs_bad_shape = m_bad_shape.get_example_inputs()
+ m_bad_shape_opt = torch.compile(copy.deepcopy(m_bad_shape))
+
+ _, wrapper_codes = run_and_get_code(
+ forward_and_backward_pass, m_bad_shape_opt, inputs_bad_shape
+ )
+ forward_and_backward_pass(m_bad_shape, inputs_bad_shape)
+ self.assertTrue(
+ torch.allclose(
+ m_bad_shape.linear.weight.grad, m_bad_shape_opt.linear.weight.grad
+ )
+ )
+ self.assertTrue(len(wrapper_codes) == 2) # one for forward and oen for backward
+ forward_wrapper = wrapper_codes[0]
+
+ # make sure the load for softmax is aligned
+ self.assertTrue(
+ "tl.load(in_ptr0 + (r1 + (30528*x0))" in forward_wrapper,
+ f"forward_wrapper: {forward_wrapper}",
+ )
+
+ if DO_PERF_TEST:
+ latency = do_bench(
+ lambda: forward_and_backward_pass(m_bad_shape_opt, inputs_bad_shape)
+ )
+ print(f"latency: {latency:.3f}ms")
+
+ @config.patch(pattern_matcher=False)
+ def test_attention(self):
+ batch_size, seq_len, num_heads, hidden_size = 1, 4, 1, 16
+ inv_scale = (num_heads / hidden_size) ** 0.5
+
+ class Attention(nn.Module):
+ def __init__(self):
+ super().__init__()
+ self.query = nn.Linear(hidden_size, hidden_size)
+ self.key = nn.Linear(hidden_size, hidden_size)
+ self.value = nn.Linear(hidden_size, hidden_size)
+
+ @staticmethod
+ def reshape(x):
+ return x.view(batch_size, seq_len, num_heads, -1).permute(0, 2, 1, 3)
+
+ @staticmethod
+ def cancel_reshape(x):
+ return x.permute(0, 2, 1, 3).view(batch_size, seq_len, hidden_size)
+
+ def forward(self, x):
+ query, key, value = self.query(x), self.key(x), self.value(x)
+ weights = (
+ torch.matmul(
+ self.reshape(query), self.reshape(key).permute(0, 1, 3, 2)
+ )
+ * inv_scale
+ ).softmax(dim=-1)
+ return self.cancel_reshape(torch.matmul(weights, self.reshape(value)))
+
+ attn = Attention()
+ x = torch.randn(batch_size, seq_len, hidden_size)
+
+ self.common_numeric_check(attn, x)
+
+ def test_view(self):
+ def f(x):
+ return x.view(3, 3, 3)
+
+ x = torch.randn(3, 9)
+ self.common_numeric_check(f, x)
+
+ def test_pad_strides(self):
+ """
+ Note that dim0's stride is also padded even though its previous value
+ is already multiple of 16. The reason is we padded dim1's stride.
+ We have to correspondingly increase the stride for dim0.
+ """
+ sizes = [2, 16, 2047]
+ in_strides = [2047 * 16, 2047, 1]
+ out_strides = list(ir.Layout._pad_strides(in_strides, sizes, torch.float32))
+ expected_strides = [2048 * 16, 2048, 1]
+ self.assertEqual(
+ expected_strides, out_strides, f"{expected_strides} v.s. {out_strides}"
+ )
+
+ def test_pad_strides_skip(self):
+ """
+ The padding is skipped to avoid too much memory overhead.
+ """
+ sizes = [2, 32, 127]
+ in_strides = [4064, 127, 1]
+ out_strides = list(ir.Layout._pad_strides(in_strides, sizes, torch.float32))
+ expected_strides = [4064, 127, 1]
+ self.assertEqual(
+ expected_strides, out_strides, f"{expected_strides} v.s. {out_strides}"
+ )
+
+ def test_pad_3d_tensor(self):
+ """
+ Constructing this test case guided by the fact that we don't pad
+ placeholder or user visible output's strides.
+
+ Add a matmul in the beginning and end so we can pad strides for
+ intermediate tensors.
+ """
+
+ def f(x, y):
+ x = torch.matmul(x, y)
+ x = x + 1
+ return torch.matmul(x, y)
+
+ x = torch.randn(2, 16, 2047)
+ y = torch.randn(2047, 2047)
+ self.common_numeric_check(f, x, y, tol=1e-2)
+ self.assertTrue(metrics.num_comprehensive_padding > 0)
+
+ def test_conv(self):
+ """
+ Padding the input for convolution may cause extra copy kernel being called.
+ Check this example trace: https://gist.github.com/shunting314/ce45398f7d51a63ce05fc8d411faddb3
+ """
+ x_shape = (1, 128, 640, 959)
+ x1 = torch.randn(*x_shape)
+
+ padded_stride = ir.Layout._pad_strides(x1.stride(), x1.shape, torch.float32)
+ x2 = rand_strided(x_shape, padded_stride, device="cuda")
+ x2.copy_(x1)
+
+ weight = torch.randn(64, 128, 3, 3)
+
+ def fun(x, weight):
+ return torch.convolution(
+ x,
+ weight,
+ stride=(1, 1),
+ padding=(1, 1),
+ dilation=(1, 1),
+ transposed=False,
+ output_padding=(0, 0),
+ groups=1,
+ bias=None,
+ )
+
+ ref = fun(x1, weight)
+ act = fun(x2, weight)
+ self.check_close(ref, act)
+ if DO_PERF_TEST:
+ latency_with_padding = do_bench(lambda: fun(x2, weight))
+ latency_without_padding = do_bench(lambda: fun(x1, weight))
+ print(
+ f"Latency with and without padding: {latency_with_padding:.3f} v.s. {latency_without_padding:.3f}"
+ )
+
+ self.do_profiling(lambda: fun(x2, weight), lambda: fun(x1, weight))
+
+ @unittest.skipIf(not DO_PERF_TEST, "Perf test not enabled")
+ def test_cat(self):
+ """
+ Compare the perf between aten cat and compiled cat.
+
+ Latency between eager and compiled: 1.596 v.s. 0.601
+
+ Eager cat can be 2.66x slower than inductor kernel.
+ """
+ x = torch.randn(8192, 30522, dtype=torch.float16)
+
+ def f(x):
+ pad = x.new_zeros(x.size(0), 6)
+ return torch.cat([x, pad], dim=1)
+
+ # disable cudagraphs since cudagraphs need copy the input which
+ # distort the latency a lot! (double the latency here for compiled
+ # version)
+ with config.patch("triton.cudagraphs", False):
+ opt_f = torch.compile(f)
+ opt_f(x)
+ eager_time = do_bench(lambda: f(x))
+ opt_time = do_bench(lambda: opt_f(x))
+ print(
+ f"Latency between eager and compiled: {eager_time:.3f} v.s. {opt_time:.3f}"
+ )
+ self.do_profiling(lambda: f(x), lambda: opt_f(x), "Eager Cat", "Compiled Cat")
+
+ def test_pad_channels_last(self):
+ t = torch.randn(2, 3, 5, 1025)
+ in_strides = t.stride()
+ out_strides = ir.Layout._pad_strides(in_strides, t.shape, torch.float32)
+ self.assertTrue(in_strides != out_strides)
+
+ t = t.to(memory_format=torch.channels_last)
+ in_strides = t.stride()
+ out_strides = ir.Layout._pad_strides(in_strides, t.shape, torch.float32)
+ self.assertTrue(in_strides == out_strides)
+
+
+if __name__ == "__main__":
+ if HAS_CUDA:
+ torch.set_float32_matmul_precision("high")
+ torch.set_default_device("cuda")
+ run_tests()
diff --git a/torch/_inductor/compile_fx.py b/torch/_inductor/compile_fx.py
index 010850df9e..f5e0878d39 100644
--- a/torch/_inductor/compile_fx.py
+++ b/torch/_inductor/compile_fx.py
@@ -8,17 +8,7 @@ import time
import warnings
from itertools import count
-from typing import (
- Any,
- Callable,
- Dict,
- FrozenSet,
- List,
- Optional,
- Sequence,
- Tuple,
- Union,
-)
+from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union
from unittest import mock
from functorch.compile import min_cut_rematerialization_partition
@@ -322,6 +312,22 @@ def is_tf32_warning_applicable(gm: torch.fx.GraphModule):
return False
+def maybe_disable_comprehensive_padding(example_inputs: List[torch.Tensor]):
+ """
+ For CPU backend, enable comprehensive padding causes some unit tests
+ fail due to changing number of generated kernels. Skip for now.
+ """
+ has_cuda = any(
+ t.device.type == "cuda" for t in example_inputs if isinstance(t, torch.Tensor)
+ )
+
+ if config.comprehensive_padding and not has_cuda:
+ perf_hint_log.info("Skip comprehensive padding on CPU")
+ return config.patch(comprehensive_padding=False)
+ else:
+ return contextlib.nullcontext()
+
+
@DebugContext.wrap
def count_bytes_inner(
gm: torch.fx.GraphModule,
@@ -336,7 +342,9 @@ def count_bytes_inner(
_recursive_post_grad_passes(gm, False)
graph = GraphLowering(gm, shape_env=shape_env, num_static_inputs=num_fixed)
- with V.set_graph_handler(graph), V.set_real_inputs(example_inputs):
+ with V.set_graph_handler(graph), V.set_real_inputs(
+ example_inputs
+ ), maybe_disable_comprehensive_padding(example_inputs):
graph.run(*example_inputs)
num_bytes, nodes_num_elem, node_runtimes = graph.count_bytes()
metrics.num_bytes_accessed += num_bytes
@@ -402,7 +410,7 @@ def compile_fx_inner(
aot_mode: bool = False,
is_inference: bool = False,
boxed_forward_device_index: Optional[BoxedDeviceIndex] = None,
- user_visible_outputs: FrozenSet[str] = frozenset(),
+ user_visible_outputs: Optional[Dict[str, None]] = None,
layout_opt: Optional[bool] = None,
extern_node_serializer: Optional[Callable[[List[ExternKernelNode]], Any]] = None,
) -> Union[CompiledFxGraph, str]:
@@ -613,7 +621,9 @@ def fx_codegen_and_compile(
cpp_wrapper: bool = False,
aot_mode: bool = False,
is_inference: bool = False,
- user_visible_outputs: FrozenSet[str] = frozenset(),
+ # Use a dict with None value rather than a set for deterministic
+ # iteration order just in case.
+ user_visible_outputs: Optional[Dict[str, None]] = None,
layout_opt: Optional[bool] = None,
extern_node_serializer: Optional[Callable[[List[ExternKernelNode]], Any]] = None,
) -> Union[CompiledFxGraph, str]:
@@ -678,7 +688,9 @@ def fx_codegen_and_compile(
if config.is_fbcode():
log_optimus_to_scuba()
- with V.set_fake_mode(fake_mode):
+ with V.set_fake_mode(fake_mode), maybe_disable_comprehensive_padding(
+ example_inputs
+ ):
const_output_index = None
const_graph = None
const_code = None
@@ -1107,9 +1119,9 @@ def fw_compiler_freezing(
# for freezing, all graph outputs should be user visible
*_, model_outputs_node = opt_model.graph.nodes
model_outputs = model_outputs_node.args[0]
- user_visible_outputs = [
+ user_visible_outputs = dict.fromkeys(
n.name for n in model_outputs if isinstance(n, torch.fx.Node)
- ]
+ )
# constant params will be real tensors, not fake
tracing_context = torch._guards.TracingContext.try_get()
@@ -1255,11 +1267,10 @@ def compile_fx(
fixed = torch._inductor.utils.num_fw_fixed_arguments(
num_example_inputs, len(example_inputs)
)
- user_visible_outputs = set()
+ user_visible_outputs = {}
if config.keep_output_stride:
- *_, model_outputs_node = model.graph.nodes
- assert model_outputs_node.op == "output"
+ model_outputs_node = output_node(model)
model_outputs = pytree.arg_tree_leaves(*model_outputs_node.args)
num_model_outputs = len(model_outputs)
@@ -1302,11 +1313,11 @@ def compile_fx(
# of "graph" outputs. Make sure we're within bounds.
assert orig_output_end_idx <= num_model_outputs
- user_visible_outputs = {
+ user_visible_outputs = dict.fromkeys(
n.name
for n in model_outputs[original_output_start_index:orig_output_end_idx]
if isinstance(n, torch.fx.Node)
- }
+ )
return inner_compile(
model,
@@ -1344,6 +1355,14 @@ def compile_fx(
@dynamo_utils.dynamo_timed
@dynamo_utils.maybe_cprofile
def bw_compiler(model: torch.fx.GraphModule, example_inputs: List[torch.Tensor]):
+ user_visible_outputs = {}
+
+ if config.bw_outputs_user_visible:
+ model_outputs_node = output_node(model)
+ model_outputs = pytree.arg_tree_leaves(*model_outputs_node.args)
+ user_visible_outputs = dict.fromkeys(
+ n.name for n in model_outputs if isinstance(n, torch.fx.Node)
+ )
fixed = count_tangents(model)
return inner_compile(
model,
@@ -1353,6 +1372,7 @@ def compile_fx(
is_backward=True,
graph_id=graph_id,
boxed_forward_device_index=forward_device,
+ user_visible_outputs=user_visible_outputs,
)
# TODO: can add logging before/after the call to create_aot_dispatcher_function
diff --git a/torch/_inductor/config.py b/torch/_inductor/config.py
index 79d0a33ce2..61af8b070c 100644
--- a/torch/_inductor/config.py
+++ b/torch/_inductor/config.py
@@ -419,6 +419,16 @@ kernel_name_max_ops = 10
# Pad input tensors of matmul/bmm/addmm to leverage Tensor Cores in NVIDIA GPUs
shape_padding = os.environ.get("TORCHINDUCTOR_SHAPE_PADDING", "1") == "1"
+# Control if we will do padding for pointwise/reductions
+comprehensive_padding = (
+ os.environ.get("TORCHINDUCTOR_COMPREHENSIVE_PADDING", "1") == "1"
+)
+pad_channels_last = False
+
+# Whether to treat output of the backward graph as user visible.
+# For user visible outputs, inductor will make sure the stride matches with eager.
+bw_outputs_user_visible = True
+
# Fx-based linear/matmul/bmm + permute/transpose vertical fusion
permute_fusion = os.environ.get("TORCHINDUCTOR_PERMUTE_FUSION", "0") == "1"
@@ -558,7 +568,7 @@ class cpp:
# config specific to codegen/triton.py
class triton:
# Use cudagraphs on output code
- cudagraphs = False
+ cudagraphs = os.environ.get("TORCHINDUCTOR_CUDAGRAPHS") == "1"
# Use cudagraph trees for memory pooling if `cudagraphs` is True
cudagraph_trees = True
diff --git a/torch/_inductor/graph.py b/torch/_inductor/graph.py
index ce2b6d44f0..48fed87257 100644
--- a/torch/_inductor/graph.py
+++ b/torch/_inductor/graph.py
@@ -78,6 +78,7 @@ from .virtualized import V
log = logging.getLogger(__name__)
perf_hint_log = torch._logging.getArtifactLogger(__name__, "perf_hints")
output_code_log = torch._logging.getArtifactLogger(__name__, "output_code")
+aten = torch.ops.aten
if config.is_fbcode():
@@ -148,6 +149,61 @@ def getattr_recursive(obj, target):
return attr_itr
+def mark_nodes_dislike_padding(g):
+ """
+ Nodes like convolution/convolution_backward want its input to be dense.
+ If we pad their inputs, we result in extra calls to copy kernels! On the other hand, padding usually helps reduction.
+
+ The pass finds nodes that dislike padding. These are nodes that can be reached
+ from a convolution/convolution_backward in the backward direction without
+ going thru a reduction.
+ """
+ if not config.comprehensive_padding:
+ return
+ ops_dislike_padding = {
+ aten.convolution,
+ aten.convolution_backward,
+ }
+ # what's a better way to collect the reduction ops?
+ ops_like_padding = {
+ aten.var_mean,
+ aten.sum,
+ aten.mean,
+ aten.prod,
+ aten.any,
+ aten.amin,
+ aten.amax,
+ aten.min,
+ aten.max,
+ aten.argmin,
+ aten.argmax,
+ aten.scatter_reduce,
+ }
+
+ def _get_overload_packet(node):
+ return (
+ node.target._overloadpacket
+ if node.op == "call_function" and hasattr(node.target, "_overloadpacket")
+ else None
+ )
+
+ for cur in reversed(g.nodes):
+ op = _get_overload_packet(cur)
+ if not op:
+ continue
+ if op in ops_dislike_padding:
+ cur.meta["dislike_padding"] = True
+
+ if cur.meta.get("dislike_padding", False):
+ # propagate
+ for prior in cur.all_input_nodes:
+ prior_op = _get_overload_packet(prior)
+ if not prior_op:
+ continue
+ if prior_op not in ops_like_padding:
+ prior.meta["dislike_padding"] = True
+
+
class GraphLowering(torch.fx.Interpreter):
graph_outputs: List[ir.IRNode]
@@ -224,7 +280,7 @@ class GraphLowering(torch.fx.Interpreter):
graph_id=None,
cpp_wrapper=False,
aot_mode=False,
- user_visible_outputs=frozenset(),
+ user_visible_outputs=None,
layout_opt=None,
extern_node_serializer=None,
is_inference=False,
@@ -311,8 +367,11 @@ class GraphLowering(torch.fx.Interpreter):
self.nodes_prefer_channels_last = (
self.find_nodes_prefer_channels_last() if self.layout_opt else set()
)
+ mark_nodes_dislike_padding(gm.graph)
self._warned_fallback = {"aten.convolution_backward"}
- self.user_visible_outputs = user_visible_outputs
+ self.user_visible_outputs = (
+ user_visible_outputs if user_visible_outputs is not None else {}
+ )
self.cache_key: str = "" # This is the cache key for the compiled artifact
self.cache_path: str = "" # This is the path in the filesystem where the compiled artifact is stored
self.cache_linemap: List[
@@ -1114,7 +1173,14 @@ class GraphLowering(torch.fx.Interpreter):
and not is_input_for_as_strided
):
stride_order = ir.NHWC_STRIDE_ORDER
- result = ir.ExternKernel.require_stride_order(result, stride_order)
+
+ allow_padding = (
+ n.name not in self.user_visible_outputs
+ and not is_input_for_as_strided
+ )
+ result = ir.ExternKernel.require_stride_order(
+ result, stride_order, allow_padding=allow_padding
+ )
# Realize if (1) any user need inputs realized, or (2) there is
# already too many reads and rematerializing can be bad.
@@ -1158,7 +1224,9 @@ class GraphLowering(torch.fx.Interpreter):
need_fixed_layout += [torch.ops.mkl._mkl_linear.default]
if user.target in need_fixed_layout:
result = ir.ExternKernel.require_stride_order(
- result, ir.get_stride_order(n.meta["val"].stride())
+ result,
+ ir.get_stride_order(n.meta["val"].stride()),
+ allow_padding=True,
)
if user.op == "output":
if isinstance(result.data.data, (Pointwise, Reduction)):
diff --git a/torch/_inductor/ir.py b/torch/_inductor/ir.py
index ba0a726a26..9baba3ae38 100644
--- a/torch/_inductor/ir.py
+++ b/torch/_inductor/ir.py
@@ -39,6 +39,7 @@ from torch._dynamo.device_interface import get_interface_for_device
from torch._dynamo.utils import identity
from torch._export.serde.serialize import GraphModuleSerializer
from torch._higher_order_ops.auto_functionalize import can_auto_functionalize
+from torch._inductor import metrics
from torch._prims_common import (
compute_required_storage_length,
is_boolean_dtype,
@@ -63,6 +64,7 @@ from .ops_handler import OpCounterCSE
from .utils import (
argsort,
cache_on_self,
+ ceildiv,
convert_shape_to_inductor,
convert_shape_to_symint,
developer_warning,
@@ -191,6 +193,7 @@ def stride_order2fill_order(order):
"""
Convert stride order to fill order
For channel last format,
+
stride order = [3, 0, 2, 1] and fill order = [1, 3, 2, 0]
"""
lookup = {pos: idx for idx, pos in enumerate(order)}
@@ -1784,19 +1787,31 @@ def is_storage_and_layout(x):
def is_contiguous_storage_and_layout(x):
try:
buffer, layout = as_storage_and_layout(x, freeze=False)
+ # pad the stride here so we will NOT claim an tensor as contiguous
+ # if a padding is gonna happen.
+ if layout.should_pad_strides():
+ layout.pad_strides()
return layout.is_contiguous()
except NotImplementedError:
return False
-def as_storage_and_layout(x, freeze=True, want_contiguous=False, stride_order=None):
- """Try to simplify x into a StorageBox and a Layout"""
+def as_storage_and_layout(
+ x, freeze=True, want_contiguous=False, stride_order=None, allow_padding=False
+):
+ """
+ Try to simplify x into a StorageBox and a Layout.
+
+ allow_padding only affect how we apply stride_order. When allow_padding
+ is True, we have the freedom to add padding when applying the stride_order.
+ """
if isinstance(x, TensorBox):
return as_storage_and_layout(
x.data,
freeze=freeze,
want_contiguous=want_contiguous,
stride_order=stride_order,
+ allow_padding=allow_padding,
)
if isinstance(x, StorageBox) and isinstance(x.data, Buffer):
if freeze:
@@ -1804,7 +1819,9 @@ def as_storage_and_layout(x, freeze=True, want_contiguous=False, stride_order=No
x.data.freeze_layout()
assert x.data.layout.is_contiguous()
elif stride_order is not None:
- x.data.freeze_layout_with_stride_order(stride_order)
+ x.data.freeze_layout_with_stride_order(
+ stride_order, allow_padding=allow_padding
+ )
else:
x.data.decide_layout()
return x, x.data.layout
@@ -2478,6 +2495,15 @@ def is_contiguous_strides_for_shape(stride, shape):
)
+def get_align_for_dtype(dtype):
+ """
+ CUDA max memory transaction size is 128 bytes for a warp.
+ We pick `128 // dtype.itemsize` as alighment so GPU can do coalesced
+ memory access.
+ """
+ return 128 // dtype.itemsize
+
+
@dataclasses.dataclass
class Layout(IRNode):
def __init__(
@@ -2516,12 +2542,13 @@ class Layout(IRNode):
def is_contiguous(self):
return is_contiguous_strides_for_shape(self.stride, self.size)
- def is_channels_last_contiguous(self):
- ndim = len(self.size)
- if ndim not in [4, 5] or self.size[1] == 1:
+ @staticmethod
+ def is_channels_last_contiguous(shape, strides):
+ ndim = len(shape)
+ if ndim not in [4, 5] or shape[1] == 1:
return False
for left, right, size in zip(
- self.stride, make_channels_last_strides_for(self.size), self.size # type: ignore[arg-type]
+ strides, make_channels_last_strides_for(shape), shape # type: ignore[arg-type]
):
if size != 1 and left != right:
return False
@@ -2573,7 +2600,96 @@ class Layout(IRNode):
order = [len(order)] + order
return self.is_stride_ordered(order)
+ @staticmethod
+ def _pad_strides(in_strides, size, dtype):
+ """
+ The padding does not change stride order but makes sure all strides larger
+ than the threshold are multiple of align.
+ """
+ align = get_align_for_dtype(dtype)
+ if len(in_strides) == 0:
+ return in_strides
+
+ if not config.pad_channels_last and Layout.is_channels_last_contiguous(
+ size, in_strides
+ ):
+ return in_strides
+
+ current_fx_node = V.get_current_node()
+ if hasattr(current_fx_node, "meta") and current_fx_node.meta.get(
+ "dislike_padding", False
+ ):
+ return in_strides
+
+ # get_stride_order does not work with dynamic shape. Also we can not
+ # statically decide if a padding is needed or how much padding we should
+ # do for dynamic shape.
+ #
+ # Skip padding the strides for dynamic shape for now.
+ if not all(
+ isinstance(s, (int, sympy.Integer))
+ for s in itertools.chain(in_strides, size)
+ ):
+ return in_strides
+
+ stride_order = get_stride_order(in_strides)
+ fill_order = stride_order2fill_order(stride_order)
+
+ new_strides = [0 for _ in range(len(in_strides))]
+ # since we pad when the layout is flexible, we can decide the
+ # smallest stride to be 1.
+ new_strides[fill_order[0]] = 1
+
+ # Don't align a too small stride since that causes too much memory increase.
+ # Pad too small stride may also cause perf loss. We may result in many tiny data blocks
+ # with gaps in between. That causes less coalesced GPU memory access!
+ #
+ # Initially we pick 320 as the threshold since for alignement=16,
+ # that results in at most 5% memory cost.
+ #
+ # But later on we raise the threshold to 1024 to avoid interfere with persistent reduction.
+ # Let's say an inner reduction has a row size 513. Inductor will generate
+ # persistent reduction code.
+ # If we do padding, the strides are not contiguous any more. Inductor
+ # uses a much smaller threshold for persistent reduction in this case and
+ # generates potentially worse non-persistent reduction code.
+ #
+ # This change turns HF AllenaiLongformerBase amp training from a loss of 1.09x to a win of 1.05x.
+ # (baseline: 71.09ms, padding w/o this change: 77.38ms, padding with this change: 67.77ms)
+ align_stride_threshold = 1024
+ padded = False
+ for rank, idx in enumerate(fill_order[1:], start=1):
+ prev_idx = fill_order[rank - 1]
+ stride = new_strides[prev_idx] * size[prev_idx]
+
+ if stride > align_stride_threshold and stride % align != 0:
+ stride = ceildiv(stride, align) * align
+ padded = True
+ new_strides[idx] = stride
+
+ if not padded:
+ # Consider a tensor with shape [256, 1, 5, 5]
+ # Avoid strides like [25, 5, 5, 1] being padded to equivalent strides
+ # [25, 25, 5, 1].
+ return in_strides
+
+ metrics.num_comprehensive_padding += 1
+ return new_strides
+
+ def pad_strides(self):
+ assert isinstance(self, FlexibleLayout)
+ assert self._stride is not None
+ self._stride = self._pad_strides(self._stride, self.size, self.dtype)
+
+ def should_pad_strides(self):
+ return config.comprehensive_padding and isinstance(self, FlexibleLayout)
+
def as_fixed(self):
+ if isinstance(self, FixedLayout):
+ return self
+
+ if self.should_pad_strides():
+ self.pad_strides()
return FixedLayout(
self.device,
self.dtype,
@@ -2717,30 +2833,40 @@ class FlexibleLayout(Layout):
fill_order = sorted(range(len(stride)), key=stride.__getitem__)
return FlexibleLayout.fill_ordered(sizes, fill_order)
- def as_stride_order(self, order):
+ def as_stride_order(self, order, allow_padding=False):
+ new_stride = self.stride_ordered(self.size, order)
+ if self.should_pad_strides() and allow_padding:
+ new_stride = self._pad_strides(new_stride, self.size, self.dtype)
+
return FixedLayout(
self.device,
self.dtype,
self.size,
- self.stride_ordered(self.size, order),
+ new_stride,
self.offset,
)
def as_fill_order(self, order):
+ new_stride = self.fill_ordered(self.size, order)
+ if self.should_pad_strides():
+ new_stride = self._pad_strides(new_stride, self.size, self.dtype)
return FixedLayout(
self.device,
self.dtype,
self.size,
- self.fill_ordered(self.size, order),
+ new_stride,
self.offset,
)
def as_same_order(self, stride):
+ new_stride = self.same_ordered(self.size, stride)
+ if self.should_pad_strides():
+ new_stride = self._pad_strides(new_stride, self.size, self.dtype)
return FixedLayout(
self.device,
self.dtype,
self.size,
- self.same_ordered(self.size, stride),
+ new_stride,
self.offset,
)
@@ -2931,9 +3057,9 @@ class Buffer(IRNode):
if not isinstance(self.layout, (MultiOutputLayout, NonOwningLayout)):
self.layout = self.layout.as_fixed()
- def freeze_layout_with_stride_order(self, order):
+ def freeze_layout_with_stride_order(self, order, allow_padding=False):
assert isinstance(self.layout, FlexibleLayout)
- self.layout = self.layout.as_stride_order(order)
+ self.layout = self.layout.as_stride_order(order, allow_padding=allow_padding)
def freeze_layout_with_fill_order(self, order):
assert isinstance(self.layout, FlexibleLayout)
@@ -3724,10 +3850,9 @@ class ConcatKernel(NopKernel):
x = inputs[i]
if is_storage_and_layout(x):
layout = x.get_layout()
- if (
- isinstance(layout, FixedLayout)
- and layout.is_channels_last_contiguous()
- ):
+ if isinstance(
+ layout, FixedLayout
+ ) and Layout.is_channels_last_contiguous(layout.size, layout.stride):
# use CL stride for the output
output_stride = make_channels_last_strides_for(new_size)
break
@@ -4083,7 +4208,7 @@ class ExternKernel(InputsKernel):
return cls.copy_input(x)
@classmethod
- def require_stride_order(cls, x, order):
+ def require_stride_order(cls, x, order, allow_padding=False):
if x.get_numel() == 0: # Layout doesn't matter
return x
@@ -4111,6 +4236,7 @@ class ExternKernel(InputsKernel):
)
if is_stride_order_storage_and_layout(x, order)
else order,
+ allow_padding=allow_padding,
)
return x
elif isinstance(
@@ -4139,11 +4265,17 @@ class ExternKernel(InputsKernel):
):
try:
x.data = cls.convert_to_reinterpret_view(x.data)
- return cls.require_stride_order(x, order)
+ return cls.require_stride_order(x, order, allow_padding=allow_padding)
except NotImplementedError:
pass
x = cls.copy_input(x)
- as_storage_and_layout(x, freeze=True, want_contiguous=False, stride_order=order)
+ as_storage_and_layout(
+ x,
+ freeze=True,
+ want_contiguous=False,
+ stride_order=order,
+ allow_padding=allow_padding,
+ )
assert is_stride_order_storage_and_layout(x, order)
return x
diff --git a/torch/_inductor/kernel/mm.py b/torch/_inductor/kernel/mm.py
index 2cb78e0c45..f325a5a0b0 100644
--- a/torch/_inductor/kernel/mm.py
+++ b/torch/_inductor/kernel/mm.py
@@ -193,6 +193,14 @@ def tuned_int_mm(mat1, mat2, *, layout=None):
def tuned_addmm(inp, mat1, mat2, *, alpha=1, beta=1, layout=None):
m, n, k, layout, mat1, mat2, inp_expanded = mm_args(mat1, mat2, inp, layout=layout)
if m * n == 0 or not use_max_autotune():
+ # Use a FlexibleLayout if we are not autotuning.
+ # This allows padding strides for the output.
+ from torch._inductor.ir import FixedLayout, FlexibleLayout
+
+ if isinstance(layout, FixedLayout):
+ layout = FlexibleLayout(
+ device=layout.device, dtype=layout.dtype, size=layout.size
+ )
choices = (
[
aten_addmm.bind(
diff --git a/torch/_inductor/metrics.py b/torch/_inductor/metrics.py
index 84ac39e7b0..e5dede0fc9 100644
--- a/torch/_inductor/metrics.py
+++ b/torch/_inductor/metrics.py
@@ -43,6 +43,8 @@ cpp_to_dtype_count = 0
# Each element counts the number of inner kernels in each outer loop fusion.
cpp_outer_loop_fused_inner_counts: List[int] = []
+num_comprehensive_padding = 0
+
# reset all counters
def reset():
@@ -52,6 +54,7 @@ def reset():
global ir_nodes_pre_fusion
global cpp_to_dtype_count
global cpp_outer_loop_fused_inner_counts
+ global num_comprehensive_padding
generated_kernel_count = 0
generated_cpp_vec_kernel_count = 0
@@ -61,6 +64,7 @@ def reset():
ir_nodes_pre_fusion = 0
cpp_to_dtype_count = 0
cpp_outer_loop_fused_inner_counts.clear()
+ num_comprehensive_padding = 0
@dataclass
|
2.41.0
|
0d1720861833e7e2d74ca69c4b056c01b0ee1d0
|
Mon, 15 Apr 2024 19:09:41 +0000
|
[PATCH 0158/1000] [export] Restore original placeholder names (part 3: constant input de/serialization) (#123590)
|
Summary: note: breaking the original diff D55225818 into 3 parts (top-level renaming, higher-order-op subgraphs, constant input de/serialization) because of its size. Stacked PR to restore original names to placeholder nodes, replacing the default names arg0_1, arg1_1, ... This PR supports constant argument placeholder (e.g. forward(self, x, y=1)) names and de/serialization, by adding a name field for ConstantArguments in the graph signature, and ConstantInputSpec in the input specs for serialization. Test Plan: verification checks on placeholder names for all export() calls, unit test in test/export/test_export.py Differential Revision: D55506949 Pull Request resolved: https://github.com/pytorch/pytorch/pull/123590 Approved by: https://github.com/angelayi, https://github.com/zhxchen17
|
diff --git a/test/export/test_export.py b/test/export/test_export.py
index 4bc660b916..73795fe0eb 100644
--- a/test/export/test_export.py
+++ b/test/export/test_export.py
@@ -1262,10 +1262,6 @@ class TestExport(TestCase):
}
self._test_export_same_as_eager(kw_func, args, kwargs)
- # TODO(pianpwk): resolve in immediate follow-up PR
- # add name to ConstantArgument schema for SerDer
- @testing.expectedFailureSerDer
- @testing.expectedFailureSerDerPreDispatch
def test_export_func_with_default_kwargs(self):
class Module(torch.nn.Module):
def forward(self, arg1, arg2, a, b=1):
@@ -4386,6 +4382,21 @@ def forward(self, x):
]
self.assertEqual(expected_getattr_names, real_getattr_names)
+ # original input names aren't retraceable:
+ # compilation will succeed, but names won't match forward() signature.
+ @testing.expectedFailureRetraceability
+ def test_constant_input_naming(self):
+ class Foo(torch.nn.Module):
+ def forward(self, x, y, div="floor"):
+ return torch.div(x, y, rounding_mode=div)
+
+ f = Foo()
+ inputs = (torch.randn(4), torch.randn(4), "floor")
+ ep = export(f, inputs)
+ div_spec = ep.graph_signature.input_specs[2]
+ self.assertEqual(div_spec.arg.name, "div")
+ self.assertEqual(div_spec.arg.value, "floor")
+
@unittest.skipIf(not torchdynamo.is_dynamo_supported(), "dynamo isn't support")
class TestOneOffModelExportResult(TestCase):
diff --git a/test/export/test_torchbind.py b/test/export/test_torchbind.py
index 0f4a8fd659..7523af9f13 100644
--- a/test/export/test_torchbind.py
+++ b/test/export/test_torchbind.py
@@ -152,7 +152,7 @@ class TestExportTorchbind(TestCase):
ep.module().code.strip(),
"""\
def forward(self, arg_0, arg_1):
- x, arg1_1, = fx_pytree.tree_flatten_spec(([arg_0, arg_1], {}), self._in_spec)
+ x, n, = fx_pytree.tree_flatten_spec(([arg_0, arg_1], {}), self._in_spec)
attr = self.attr
call_torchbind = torch.ops.higher_order.call_torchbind(attr, 'add_tensor', x); attr = None
add = torch.ops.aten.add.Tensor(x, call_torchbind); x = call_torchbind = None
@@ -161,7 +161,7 @@ def forward(self, arg_0, arg_1):
self.assertExpectedInline(
ep.graph_module.code.strip(),
"""\
-def forward(self, obj_attr, x, arg1_1):
+def forward(self, obj_attr, x, n):
call_torchbind = torch.ops.higher_order.call_torchbind(obj_attr, 'add_tensor', x); obj_attr = None
add = torch.ops.aten.add.Tensor(x, call_torchbind); x = call_torchbind = None
return (add,)""",
diff --git a/torch/_export/passes/collect_tracepoints_pass.py b/torch/_export/passes/collect_tracepoints_pass.py
index 6a2b9c6748..72ccaa0d22 100644
--- a/torch/_export/passes/collect_tracepoints_pass.py
+++ b/torch/_export/passes/collect_tracepoints_pass.py
@@ -28,7 +28,7 @@ class CollectTracepointsPass(PassBase):
"Symint input is not implemented yet for submodule call signature."
)
else:
- return ConstantArgument(value=arg)
+ return ConstantArgument(name="", value=arg)
for module in gm.modules():
if not isinstance(module, torch.fx.GraphModule):
diff --git a/torch/_export/serde/schema.py b/torch/_export/serde/schema.py
index 9be909f899..8f2a3ad60b 100644
--- a/torch/_export/serde/schema.py
+++ b/torch/_export/serde/schema.py
@@ -8,7 +8,7 @@ from typing import Dict, List, Optional, Tuple
from torch._export.serde.union import _Union
# NOTE: Please update this value if any modifications are made to the schema
-SCHEMA_VERSION = (5, 2)
+SCHEMA_VERSION = (5, 3)
TREESPEC_VERSION = 1
@@ -215,6 +215,21 @@ class UserInputSpec:
arg: Argument
+@dataclass(repr=False)
+class ConstantValue(_Union):
+ as_none: Tuple[()]
+ as_int: int
+ as_float: float
+ as_string: str
+ as_bool: bool
+
+
+@dataclass
+class ConstantInputSpec:
+ name: str
+ value: ConstantValue
+
+
@dataclass
class InputToParameterSpec:
arg: TensorArgument
@@ -254,6 +269,7 @@ class InputSpec(_Union):
tensor_constant: InputToTensorConstantSpec
custom_obj: InputToCustomObjSpec
token: InputTokenSpec
+ constant_input: ConstantInputSpec
@dataclass
diff --git a/torch/_export/serde/schema.yaml b/torch/_export/serde/schema.yaml
index 97c7454022..eecb43431e 100644
--- a/torch/_export/serde/schema.yaml
+++ b/torch/_export/serde/schema.yaml
@@ -1,5 +1,5 @@
# @generated by update_schema.py
-# checksum<<abb5ed3498ff715c731fc17d7dc44cbb97366b5fe6098a79f75819b9c3ad3f97>>
+# checksum<<f1da1027d3bccb23db1f8dde8e635e53c7ab67fde5248ede49a6b7a3402ce743>>
Argument:
kind: union
fields:
@@ -56,6 +56,26 @@ BufferMutationSpec:
type: TensorArgument
buffer_name:
type: str
+ConstantInputSpec:
+ kind: struct
+ fields:
+ name:
+ type: str
+ value:
+ type: ConstantValue
+ConstantValue:
+ kind: union
+ fields:
+ as_none:
+ type: Tuple[()]
+ as_int:
+ type: int
+ as_float:
+ type: float
+ as_string:
+ type: str
+ as_bool:
+ type: bool
CustomObjArgument:
kind: struct
fields:
@@ -157,6 +177,8 @@ InputSpec:
type: InputToCustomObjSpec
token:
type: InputTokenSpec
+ constant_input:
+ type: ConstantInputSpec
InputToBufferSpec:
kind: struct
fields:
@@ -404,5 +426,5 @@ UserOutputSpec:
type: Argument
SCHEMA_VERSION:
- 5
|
+- 3
|
9059affb97a4d69c2c6b82e274f2fefb7ab765b
|
Mon, 15 Apr 2024 09:42:10 -0700
|
[PATCH 0160/1000] Use packed metadata from triton to reduce launch latency (#123842)
|
https://github.com/openai/triton/pull/3633 converts some kernel launch metadata from a namedtuple to a regular tuple, which is faster to parse. Using it here shaves off a microsecond or so from the apparently extremely-sensitive launch path. Fixes #123597 Pull Request resolved: https://github.com/pytorch/pytorch/pull/123842 Approved by: https://github.com/jansel, https://github.com/shunting314 ghstack dependencies: #123841
|
diff --git a/torch/_inductor/triton_heuristics.py b/torch/_inductor/triton_heuristics.py
index 71fbdaa052..7a4ea40b5f 100644
--- a/torch/_inductor/triton_heuristics.py
+++ b/torch/_inductor/triton_heuristics.py
@@ -387,7 +387,9 @@ class CachingAutotuner(KernelInterface):
"bin": binary,
"launch_enter_hook": binary.launch_enter_hook,
"launch_exit_hook": binary.launch_exit_hook,
- "metadata": binary.metadata,
+ "metadata": binary.packed_metadata
+ if hasattr(binary, "packed_metadata")
+ else binary.metadata,
"shared": binary_shared,
}
|
2.41.0
|
5b404b809f231c766e3015e728340730204424a
|
Mon, 15 Apr 2024 09:25:11 -0700
|
[PATCH 0161/1000] [inductor] Fix fresh_inductor_cache() (#122661)
|
Summary: Modify fresh_inductor_cache() to clear cached state before mocking the toplevel cache_dir directory. Any lru_caches (or otherwise) can use the @clear_on_fresh_inductor_cache decorator to register the cache for clearing. Also change the base inductor TestCase class to use fresh_inductor_cache(). Previously that TestCase was only mocking the subdirectory within the toplevel cache dir designated for the FX graph cache artifacts. Test Plan: - New unit test - All existing inductor tests will exercise fresh_inductor_cache() Pull Request resolved: https://github.com/pytorch/pytorch/pull/122661 Approved by: https://github.com/oulgen
|
diff --git a/test/inductor/test_codecache.py b/test/inductor/test_codecache.py
index 89380bee53..04ab69debc 100644
--- a/test/inductor/test_codecache.py
+++ b/test/inductor/test_codecache.py
@@ -14,10 +14,12 @@ from torch._inductor.codecache import (
CUDACodeCache,
FxGraphCachePickler,
FxGraphHashDetails,
+ PyCodeCache,
TensorMetadata,
TensorMetadataAndValues,
)
from torch._inductor.test_case import run_tests, TestCase
+from torch._inductor.utils import cache_dir, fresh_inductor_cache
from torch.testing._internal.common_cuda import SM80OrLater
from torch.testing._internal.common_device_type import largeTensorTest
from torch.testing._internal.common_utils import (
@@ -553,5 +555,28 @@ class TestFxGraphCacheHashing(TestCase):
assert "-DNDEBUG" in cmd_parts, cmd_parts
+class TestUtils(TestCase):
+ def test_fresh_inductor_cache(self):
+ def fn(x, y):
+ return x + y
+
+ a = torch.rand(10)
+ b = torch.rand(10)
+
+ with fresh_inductor_cache():
+ self.assertEqual(len(PyCodeCache.cache.keys()), 0)
+ res1 = torch.compile(fn)(a, b)
+ cache_dir1 = cache_dir()
+
+ torch._dynamo.reset()
+ with fresh_inductor_cache():
+ self.assertEqual(len(PyCodeCache.cache.keys()), 0)
+ res2 = torch.compile(fn)(a, b)
+ cache_dir2 = cache_dir()
+
+ self.assertEqual(res1, res2)
+ self.assertNotEqual(cache_dir1, cache_dir2)
+
+
if __name__ == "__main__":
run_tests()
diff --git a/test/inductor/test_max_autotune.py b/test/inductor/test_max_autotune.py
index 6448b94055..d1f074de51 100644
--- a/test/inductor/test_max_autotune.py
+++ b/test/inductor/test_max_autotune.py
@@ -280,17 +280,17 @@ class TestMaxAutotune(TestCase):
os.environ.pop("TRITON_CACHE_MANAGER", None)
with config.patch({"max_autotune": True}):
for _ in range(4):
- torch.compile(mm, dynamic=dynamic)(a, b)
+ with fresh_inductor_cache():
+ torch.compile(mm, dynamic=dynamic)(a, b)
reset()
- torch._inductor.codecache.PyCodeCache.clear()
self.assertEqual(num_get, 3)
self.assertEqual(num_put, 1)
num_get = 0
num_put = 0
for _ in range(4):
- torch.compile(f, dynamic=dynamic)(x, y)
+ with fresh_inductor_cache():
+ torch.compile(f, dynamic=dynamic)(x, y)
reset()
- torch._inductor.codecache.PyCodeCache.clear()
self.assertEqual(num_get, 3)
self.assertEqual(num_put, 1)
diff --git a/test/inductor/test_multi_kernel.py b/test/inductor/test_multi_kernel.py
index 316ef6bae9..808802ebfd 100644
--- a/test/inductor/test_multi_kernel.py
+++ b/test/inductor/test_multi_kernel.py
@@ -60,7 +60,7 @@ def make_cpp_wrapper_test(orig_test, **extra_args):
# the kernel with cpp_wrapper enabled.
from torch._inductor import codecache
- codecache.PyCodeCache.clear()
+ codecache.PyCodeCache.cache_clear()
return orig_test(self, **extra_args)
return fn
diff --git a/torch/_inductor/codecache.py b/torch/_inductor/codecache.py
index 88d37823a7..4e84838504 100644
--- a/torch/_inductor/codecache.py
+++ b/torch/_inductor/codecache.py
@@ -59,7 +59,7 @@ from torch._dynamo.device_interface import (
from torch._dynamo.utils import counters, dynamo_timed
from torch._inductor import config, exc, metrics
from torch._inductor.codegen.cuda import cuda_env
-from torch._inductor.utils import cache_dir, is_linux
+from torch._inductor.utils import cache_dir, clear_on_fresh_inductor_cache, is_linux
from torch._subclasses.fake_tensor import (
extract_tensor_metadata,
FakeTensor,
@@ -183,6 +183,7 @@ class CacheBase:
return system
@staticmethod
+ @clear_on_fresh_inductor_cache
@functools.lru_cache(None)
def get_local_cache_path() -> Path:
return Path(os.path.join(cache_dir(), "cache", CacheBase.get_system()["hash"]))
@@ -202,22 +203,21 @@ class CacheBase:
self.system = CacheBase.get_system()
- self.local_cache_path = CacheBase.get_local_cache_path()
- self.global_cache_path = CacheBase.get_global_cache_path()
-
def get_local_cache(self) -> Dict[str, Any]:
- if not self.local_cache_path.is_file():
+ local_cache_path = self.get_local_cache_path()
+ if not local_cache_path.is_file():
return {}
- with open(self.local_cache_path) as local_cache_fp:
+ with open(local_cache_path) as local_cache_fp:
local_cache = json.load(local_cache_fp)
return local_cache["cache"]
def update_local_cache(self, local_cache: Dict[str, Any]) -> None:
- if not os.path.exists(self.local_cache_path.parent):
- os.makedirs(self.local_cache_path.parent, exist_ok=True)
+ local_cache_path = self.get_local_cache_path()
+ if not os.path.exists(local_cache_path.parent):
+ os.makedirs(local_cache_path.parent, exist_ok=True)
write_atomic(
- str(self.local_cache_path),
+ str(local_cache_path),
json.dumps({"system": self.system, "cache": local_cache}, indent=4),
)
@@ -250,9 +250,10 @@ class LocalCache(CacheBase):
class PersistentCache(CacheBase):
@functools.lru_cache(None)
def get_global_cache(self):
- if self.global_cache_path is None or not self.global_cache_path.is_file():
+ global_cache_path = self.get_global_cache_path()
+ if global_cache_path is None or not global_cache_path.is_file():
return {}
- with open(self.global_cache_path) as global_cache_fp:
+ with open(global_cache_path) as global_cache_fp:
global_cache = json.load(global_cache_fp)
return global_cache["cache"]
@@ -1613,9 +1614,10 @@ def split_aot_inductor_output_path(path: str) -> Tuple[str, str]:
return path, ""
+@clear_on_fresh_inductor_cache
class CudaKernelParamCache:
cache: Dict[str, Dict[str, str]] = dict()
- clear = staticmethod(cache.clear)
+ cache_clear = staticmethod(cache.clear)
@classmethod
def set(cls, key: str, params: Dict[str, str], cubin: str) -> None:
@@ -1899,6 +1901,7 @@ class AotCodeCompiler:
# - valid_vec_isa_list()
# - VecISA.__bool__() <-- takes out a lock
# - compile_file() <-- imports cpp_prefix_path from cpp, which causes us to try to take out the same lock.
+@clear_on_fresh_inductor_cache
@functools.lru_cache
def cpp_prefix_path() -> str:
path = Path(__file__).parent / "codegen/cpp_prefix.h"
@@ -2011,9 +2014,10 @@ def custom_op_wrapper(op: str, *args):
return torch._C._aoti.unsafe_alloc_void_ptr_from_tensor(result)
+@clear_on_fresh_inductor_cache
class CppCodeCache:
cache: Dict[str, Callable[[], Union[CDLL, ModuleType]]] = {}
- clear = staticmethod(cache.clear)
+ cache_clear = staticmethod(cache.clear)
cpp_compile_command_flags: Dict[str, Any] = {}
@staticmethod
@@ -2103,9 +2107,10 @@ def _worker_compile_cpp(lock_path, input_path, output_path, cmd):
# Customized Python binding for cpp kernels
+@clear_on_fresh_inductor_cache
class CppPythonBindingsCodeCache(CppCodeCache):
cache: Dict[str, Callable[[], Union[CDLL, ModuleType]]] = {}
- clear = staticmethod(cache.clear)
+ cache_clear = staticmethod(cache.clear)
cpp_compile_command_flags = {
# kernels have no dependency on libtorch
"include_pytorch": False,
@@ -2247,9 +2252,10 @@ class CppPythonBindingsCodeCache(CppCodeCache):
return cls.load_pybinding_async(*args, **kwargs)()
+@clear_on_fresh_inductor_cache
class CppWrapperCodeCache(CppPythonBindingsCodeCache):
cache: Dict[str, Callable[[], Union[CDLL, ModuleType]]] = {}
- clear = staticmethod(cache.clear)
+ cache_clear = staticmethod(cache.clear)
cpp_compile_command_flags = {
"include_pytorch": not config.abi_compatible,
"shared": True,
@@ -2313,10 +2319,11 @@ def _reload_python_module_in_subproc(key, path):
return PyCodeCache.load_by_key_path(key, path)
+@clear_on_fresh_inductor_cache
class PyCodeCache:
cache: Dict[str, ModuleType] = dict()
linemaps: Dict[str, List[Tuple[Any, ...]]] = dict()
- clear = staticmethod(cache.clear)
+ cache_clear = staticmethod(cache.clear)
@classmethod
def write(cls, source_code: str, extra: str = "") -> Tuple[str, str]:
@@ -2615,6 +2622,7 @@ class DLLWrapper:
self.close()
+@clear_on_fresh_inductor_cache
class CUDACodeCache:
@dataclasses.dataclass
class CacheEntry:
@@ -2622,7 +2630,7 @@ class CUDACodeCache:
output_path: str
cache: Dict[str, CacheEntry] = dict()
- clear = staticmethod(cache.clear)
+ cache_clear = staticmethod(cache.clear)
_SOURCE_CODE_SUFFIX = "cu"
@classmethod
diff --git a/torch/_inductor/test_case.py b/torch/_inductor/test_case.py
index 546524d900..0412d4eea5 100644
--- a/torch/_inductor/test_case.py
+++ b/torch/_inductor/test_case.py
@@ -1,6 +1,5 @@
import contextlib
-import tempfile
-import unittest
+import os
from torch._dynamo.test_case import (
run_tests as dynamo_run_tests,
@@ -8,6 +7,7 @@ from torch._dynamo.test_case import (
)
from torch._inductor import config
+from torch._inductor.utils import fresh_inductor_cache
def run_tests(needs=()):
@@ -20,34 +20,13 @@ class TestCase(DynamoTestCase):
the cache directory for each test.
"""
- _stack: contextlib.ExitStack
-
- @classmethod
- def setUpClass(cls):
- super().setUpClass()
- cls._stack = contextlib.ExitStack()
- cls._stack.enter_context(config.patch({"fx_graph_cache": True}))
-
- @classmethod
- def tearDownClass(cls):
- super().tearDownClass()
- cls._stack.close()
-
def setUp(self):
super().setUp()
-
- # For all tests, mock the tmp directory populated by the inductor
- # FxGraphCache, both for test isolation and to avoid filling disk.
- self._inductor_cache_tmp_dir = tempfile.TemporaryDirectory()
- self._inductor_cache_get_tmp_dir_patch = unittest.mock.patch(
- "torch._inductor.codecache.FxGraphCache._get_tmp_dir"
- )
- mock_get_dir = self._inductor_cache_get_tmp_dir_patch.start()
- mock_get_dir.return_value = self._inductor_cache_tmp_dir.name
+ self._inductor_test_stack = contextlib.ExitStack()
+ self._inductor_test_stack.enter_context(config.patch({"fx_graph_cache": True}))
+ if os.environ.get("INDUCTOR_TEST_DISABLE_FRESH_CACHE") != "1":
+ self._inductor_test_stack.enter_context(fresh_inductor_cache())
def tearDown(self):
super().tearDown()
-
- # Clean up the FxGraphCache tmp dir.
- self._inductor_cache_get_tmp_dir_patch.stop()
- self._inductor_cache_tmp_dir.cleanup()
+ self._inductor_test_stack.close()
diff --git a/torch/_inductor/utils.py b/torch/_inductor/utils.py
index 0022d0758d..a0f847bf9a 100644
--- a/torch/_inductor/utils.py
+++ b/torch/_inductor/utils.py
@@ -734,6 +734,22 @@ else:
)
+_registered_caches: List[Any] = []
+
+
+def clear_on_fresh_inductor_cache(obj: Any):
+ """
+ Use this decorator to register any caches that should be cache_clear'd
+ with fresh_inductor_cache().
+ """
+ if not hasattr(obj, "cache_clear") or not callable(obj.cache_clear):
+ raise AttributeError(f"{obj} does not have a cache_clear method")
+
+ _registered_caches.append(obj)
+ return obj
+
+
+@clear_on_fresh_inductor_cache
@functools.lru_cache(None)
def cache_dir() -> str:
cache_dir = os.environ.get("TORCHINDUCTOR_CACHE_DIR")
@@ -755,6 +771,9 @@ def fresh_inductor_cache(cache_entries=None):
Optionally, pass a dict as 'cache_entries' to get a list of filenames and sizes
generated with this cache instance.
"""
+ for obj in _registered_caches:
+ obj.cache_clear()
+
with tempfile.TemporaryDirectory() as inductor_cache_dir:
with mock.patch.dict(
os.environ, {"TORCHINDUCTOR_CACHE_DIR": inductor_cache_dir}
|
2.41.0
|
5a090fb56b5a9cd018caf1dc5169a3a2b5d7468
|
Mon, 15 Apr 2024 20:39:50 +0000
|
[PATCH 0163/1000] [CI] Update bazel deps (#124076)
|
- Update `WORKSPACE` to actually use Python-3.10 as job name claims it is - Get rid of unneeded `future` and `six` dependencies (Removed long time ago) - Update `requests`, `typing-extensions` and `setuptools` to the latest releases - Mark `tools/build/bazel/requirements.txt` as a generated file This also updates idna to 3.7 that contains a fix for [CVE-2024-3651](https://github.com/advisories/GHSA-jjg7-2v4v-x38h), though as we are no shipping a binary with it, it does not expose CI system to any actual risks TODOs: - Add periodic job that runs `pip compile` to update those to the latest version - Unify varios requirements .txt (i.e. bazel requirements and requirements-ci should be one and the same) Pull Request resolved: https://github.com/pytorch/pytorch/pull/124076 Approved by: https://github.com/seemethere, https://github.com/DanilBaibak
|
diff --git a/.gitattributes b/.gitattributes
index 8bccf04bbb..e904301752 100644
--- a/.gitattributes
+++ b/.gitattributes
@@ -4,3 +4,4 @@
.github/generated-* linguist-generated=true
.github/scripts/gql_mocks.json linguist-generated=true
third_party/LICENSES_BUNDLED.txt linguist-generated=true
+tools/build/bazel/requirements.txt linguist-generated=true
diff --git a/BUILD.bazel b/BUILD.bazel
index a2902c0e5e..c5cf199b00 100644
--- a/BUILD.bazel
+++ b/BUILD.bazel
@@ -1762,12 +1762,10 @@ py_library(
visibility = ["//visibility:public"],
srcs = glob(["torch/**/*.py"], exclude = ["torch/version.py"]) + [":torch/version.py"] + glob(["functorch/**/*.py"]),
deps = [
- rules.requirement("future"),
rules.requirement("numpy"),
rules.requirement("pyyaml"),
rules.requirement("requests"),
rules.requirement("setuptools"),
- rules.requirement("six"),
rules.requirement("sympy"),
rules.requirement("typing_extensions"),
"//torchgen",
diff --git a/WORKSPACE b/WORKSPACE
index b187949d66..59bc0998dd 100644
--- a/WORKSPACE
+++ b/WORKSPACE
@@ -206,11 +206,11 @@ py_repositories()
load("@rules_python//python:repositories.bzl", "python_register_toolchains")
python_register_toolchains(
- name = "python3_8",
- python_version = "3.8",
+ name = "python3_10",
+ python_version = "3.10",
)
-load("@python3_8//:defs.bzl", "interpreter")
+load("@python3_10//:defs.bzl", "interpreter")
load("@rules_python//python:pip.bzl", "pip_parse")
pip_parse(
diff --git a/tools/build/bazel/requirements.in b/tools/build/bazel/requirements.in
index eaec6c67ee..20b28921ff 100644
--- a/tools/build/bazel/requirements.in
+++ b/tools/build/bazel/requirements.in
@@ -1,8 +1,6 @@
-PyYAML==6.0
-future==0.18.3
-numpy==1.24.3
+PyYAML==6.0.1
+numpy==1.26.4
requests==2.31.0
-setuptools==67.8.0
-six==1.16.0
+setuptools==69.5.1
sympy==1.12
-typing_extensions==4.5.0
+typing_extensions==4.11.0
diff --git a/tools/build/bazel/requirements.txt b/tools/build/bazel/requirements.txt
index 2d9812b0bf..a241602167 100644
--- a/tools/build/bazel/requirements.txt
+++ b/tools/build/bazel/requirements.txt
@@ -4,193 +4,220 @@
#
# pip-compile --allow-unsafe --generate-hashes tools/build/bazel/requirements.in
#
-certifi==2023.7.22 \
- --hash=sha256:539cc1d13202e33ca466e88b2807e29f4c13049d6d87031a3c110744495cb082 \
- --hash=sha256:92d6037539857d8206b8f6ae472e8b77db8058fec5937a1ef3f54304089edbb9
+certifi==2024.2.2 \
+ --hash=sha256:0569859f95fc761b18b45ef421b1290a0f65f147e92a1e5eb3e635f9a5e4e66f \
+ --hash=sha256:dc383c07b76109f368f6106eee2b593b04a011ea4d55f652c6ca24a754d1cdd1
# via requests
-charset-normalizer==3.1.0 \
- --hash=sha256:04afa6387e2b282cf78ff3dbce20f0cc071c12dc8f685bd40960cc68644cfea6 \
- --hash=sha256:04eefcee095f58eaabe6dc3cc2262f3bcd776d2c67005880894f447b3f2cb9c1 \
- --hash=sha256:0be65ccf618c1e7ac9b849c315cc2e8a8751d9cfdaa43027d4f6624bd587ab7e \
- --hash=sha256:0c95f12b74681e9ae127728f7e5409cbbef9cd914d5896ef238cc779b8152373 \
- --hash=sha256:0ca564606d2caafb0abe6d1b5311c2649e8071eb241b2d64e75a0d0065107e62 \
- --hash=sha256:10c93628d7497c81686e8e5e557aafa78f230cd9e77dd0c40032ef90c18f2230 \
- --hash=sha256:11d117e6c63e8f495412d37e7dc2e2fff09c34b2d09dbe2bee3c6229577818be \
- --hash=sha256:11d3bcb7be35e7b1bba2c23beedac81ee893ac9871d0ba79effc7fc01167db6c \
- --hash=sha256:12a2b561af122e3d94cdb97fe6fb2bb2b82cef0cdca131646fdb940a1eda04f0 \
- --hash=sha256:12d1a39aa6b8c6f6248bb54550efcc1c38ce0d8096a146638fd4738e42284448 \
- --hash=sha256:1435ae15108b1cb6fffbcea2af3d468683b7afed0169ad718451f8db5d1aff6f \
- --hash=sha256:1c60b9c202d00052183c9be85e5eaf18a4ada0a47d188a83c8f5c5b23252f649 \
- --hash=sha256:1e8fcdd8f672a1c4fc8d0bd3a2b576b152d2a349782d1eb0f6b8e52e9954731d \
- --hash=sha256:20064ead0717cf9a73a6d1e779b23d149b53daf971169289ed2ed43a71e8d3b0 \
- --hash=sha256:21fa558996782fc226b529fdd2ed7866c2c6ec91cee82735c98a197fae39f706 \
- --hash=sha256:22908891a380d50738e1f978667536f6c6b526a2064156203d418f4856d6e86a \
- --hash=sha256:3160a0fd9754aab7d47f95a6b63ab355388d890163eb03b2d2b87ab0a30cfa59 \
- --hash=sha256:322102cdf1ab682ecc7d9b1c5eed4ec59657a65e1c146a0da342b78f4112db23 \
- --hash=sha256:34e0a2f9c370eb95597aae63bf85eb5e96826d81e3dcf88b8886012906f509b5 \
- --hash=sha256:3573d376454d956553c356df45bb824262c397c6e26ce43e8203c4c540ee0acb \
- --hash=sha256:3747443b6a904001473370d7810aa19c3a180ccd52a7157aacc264a5ac79265e \
- --hash=sha256:38e812a197bf8e71a59fe55b757a84c1f946d0ac114acafaafaf21667a7e169e \
- --hash=sha256:3a06f32c9634a8705f4ca9946d667609f52cf130d5548881401f1eb2c39b1e2c \
- --hash=sha256:3a5fc78f9e3f501a1614a98f7c54d3969f3ad9bba8ba3d9b438c3bc5d047dd28 \
- --hash=sha256:3d9098b479e78c85080c98e1e35ff40b4a31d8953102bb0fd7d1b6f8a2111a3d \
- --hash=sha256:3dc5b6a8ecfdc5748a7e429782598e4f17ef378e3e272eeb1340ea57c9109f41 \
- --hash=sha256:4155b51ae05ed47199dc5b2a4e62abccb274cee6b01da5b895099b61b1982974 \
- --hash=sha256:49919f8400b5e49e961f320c735388ee686a62327e773fa5b3ce6721f7e785ce \
- --hash=sha256:53d0a3fa5f8af98a1e261de6a3943ca631c526635eb5817a87a59d9a57ebf48f \
- --hash=sha256:5f008525e02908b20e04707a4f704cd286d94718f48bb33edddc7d7b584dddc1 \
- --hash=sha256:628c985afb2c7d27a4800bfb609e03985aaecb42f955049957814e0491d4006d \
- --hash=sha256:65ed923f84a6844de5fd29726b888e58c62820e0769b76565480e1fdc3d062f8 \
- --hash=sha256:6734e606355834f13445b6adc38b53c0fd45f1a56a9ba06c2058f86893ae8017 \
- --hash=sha256:6baf0baf0d5d265fa7944feb9f7451cc316bfe30e8df1a61b1bb08577c554f31 \
- --hash=sha256:6f4f4668e1831850ebcc2fd0b1cd11721947b6dc7c00bf1c6bd3c929ae14f2c7 \
- --hash=sha256:6f5c2e7bc8a4bf7c426599765b1bd33217ec84023033672c1e9a8b35eaeaaaf8 \
- --hash=sha256:6f6c7a8a57e9405cad7485f4c9d3172ae486cfef1344b5ddd8e5239582d7355e \
- --hash=sha256:7381c66e0561c5757ffe616af869b916c8b4e42b367ab29fedc98481d1e74e14 \
- --hash=sha256:73dc03a6a7e30b7edc5b01b601e53e7fc924b04e1835e8e407c12c037e81adbd \
- --hash=sha256:74db0052d985cf37fa111828d0dd230776ac99c740e1a758ad99094be4f1803d \
- --hash=sha256:75f2568b4189dda1c567339b48cba4ac7384accb9c2a7ed655cd86b04055c795 \
- --hash=sha256:78cacd03e79d009d95635e7d6ff12c21eb89b894c354bd2b2ed0b4763373693b \
- --hash=sha256:80d1543d58bd3d6c271b66abf454d437a438dff01c3e62fdbcd68f2a11310d4b \
- --hash=sha256:830d2948a5ec37c386d3170c483063798d7879037492540f10a475e3fd6f244b \
- --hash=sha256:891cf9b48776b5c61c700b55a598621fdb7b1e301a550365571e9624f270c203 \
- --hash=sha256:8f25e17ab3039b05f762b0a55ae0b3632b2e073d9c8fc88e89aca31a6198e88f \
- --hash=sha256:9a3267620866c9d17b959a84dd0bd2d45719b817245e49371ead79ed4f710d19 \
- --hash=sha256:a04f86f41a8916fe45ac5024ec477f41f886b3c435da2d4e3d2709b22ab02af1 \
- --hash=sha256:aaf53a6cebad0eae578f062c7d462155eada9c172bd8c4d250b8c1d8eb7f916a \
- --hash=sha256:abc1185d79f47c0a7aaf7e2412a0eb2c03b724581139193d2d82b3ad8cbb00ac \
- --hash=sha256:ac0aa6cd53ab9a31d397f8303f92c42f534693528fafbdb997c82bae6e477ad9 \
- --hash=sha256:ac3775e3311661d4adace3697a52ac0bab17edd166087d493b52d4f4f553f9f0 \
- --hash=sha256:b06f0d3bf045158d2fb8837c5785fe9ff9b8c93358be64461a1089f5da983137 \
- --hash=sha256:b116502087ce8a6b7a5f1814568ccbd0e9f6cfd99948aa59b0e241dc57cf739f \
- --hash=sha256:b82fab78e0b1329e183a65260581de4375f619167478dddab510c6c6fb04d9b6 \
- --hash=sha256:bd7163182133c0c7701b25e604cf1611c0d87712e56e88e7ee5d72deab3e76b5 \
- --hash=sha256:c36bcbc0d5174a80d6cccf43a0ecaca44e81d25be4b7f90f0ed7bcfbb5a00909 \
- --hash=sha256:c3af8e0f07399d3176b179f2e2634c3ce9c1301379a6b8c9c9aeecd481da494f \
- --hash=sha256:c84132a54c750fda57729d1e2599bb598f5fa0344085dbde5003ba429a4798c0 \
- --hash=sha256:cb7b2ab0188829593b9de646545175547a70d9a6e2b63bf2cd87a0a391599324 \
- --hash=sha256:cca4def576f47a09a943666b8f829606bcb17e2bc2d5911a46c8f8da45f56755 \
- --hash=sha256:cf6511efa4801b9b38dc5546d7547d5b5c6ef4b081c60b23e4d941d0eba9cbeb \
- --hash=sha256:d16fd5252f883eb074ca55cb622bc0bee49b979ae4e8639fff6ca3ff44f9f854 \
- --hash=sha256:d2686f91611f9e17f4548dbf050e75b079bbc2a82be565832bc8ea9047b61c8c \
- --hash=sha256:d7fc3fca01da18fbabe4625d64bb612b533533ed10045a2ac3dd194bfa656b60 \
- --hash=sha256:dd5653e67b149503c68c4018bf07e42eeed6b4e956b24c00ccdf93ac79cdff84 \
- --hash=sha256:de5695a6f1d8340b12a5d6d4484290ee74d61e467c39ff03b39e30df62cf83a0 \
- --hash=sha256:e0ac8959c929593fee38da1c2b64ee9778733cdf03c482c9ff1d508b6b593b2b \
- --hash=sha256:e1b25e3ad6c909f398df8921780d6a3d120d8c09466720226fc621605b6f92b1 \
- --hash=sha256:e633940f28c1e913615fd624fcdd72fdba807bf53ea6925d6a588e84e1151531 \
- --hash=sha256:e89df2958e5159b811af9ff0f92614dabf4ff617c03a4c1c6ff53bf1c399e0e1 \
- --hash=sha256:ea9f9c6034ea2d93d9147818f17c2a0860d41b71c38b9ce4d55f21b6f9165a11 \
- --hash=sha256:f645caaf0008bacf349875a974220f1f1da349c5dbe7c4ec93048cdc785a3326 \
- --hash=sha256:f8303414c7b03f794347ad062c0516cee0e15f7a612abd0ce1e25caf6ceb47df \
- --hash=sha256:fca62a8301b605b954ad2e9c3666f9d97f63872aa4efcae5492baca2056b74ab
+charset-normalizer==3.3.2 \
+ --hash=sha256:06435b539f889b1f6f4ac1758871aae42dc3a8c0e24ac9e60c2384973ad73027 \
+ --hash=sha256:06a81e93cd441c56a9b65d8e1d043daeb97a3d0856d177d5c90ba85acb3db087 \
+ --hash=sha256:0a55554a2fa0d408816b3b5cedf0045f4b8e1a6065aec45849de2d6f3f8e9786 \
+ --hash=sha256:0b2b64d2bb6d3fb9112bafa732def486049e63de9618b5843bcdd081d8144cd8 \
+ --hash=sha256:10955842570876604d404661fbccbc9c7e684caf432c09c715ec38fbae45ae09 \
+ --hash=sha256:122c7fa62b130ed55f8f285bfd56d5f4b4a5b503609d181f9ad85e55c89f4185 \
+ --hash=sha256:1ceae2f17a9c33cb48e3263960dc5fc8005351ee19db217e9b1bb15d28c02574 \
+ --hash=sha256:1d3193f4a680c64b4b6a9115943538edb896edc190f0b222e73761716519268e \
+ --hash=sha256:1f79682fbe303db92bc2b1136016a38a42e835d932bab5b3b1bfcfbf0640e519 \
+ --hash=sha256:2127566c664442652f024c837091890cb1942c30937add288223dc895793f898 \
+ --hash=sha256:22afcb9f253dac0696b5a4be4a1c0f8762f8239e21b99680099abd9b2b1b2269 \
+ --hash=sha256:25baf083bf6f6b341f4121c2f3c548875ee6f5339300e08be3f2b2ba1721cdd3 \
+ --hash=sha256:2e81c7b9c8979ce92ed306c249d46894776a909505d8f5a4ba55b14206e3222f \
+ --hash=sha256:3287761bc4ee9e33561a7e058c72ac0938c4f57fe49a09eae428fd88aafe7bb6 \
+ --hash=sha256:34d1c8da1e78d2e001f363791c98a272bb734000fcef47a491c1e3b0505657a8 \
+ --hash=sha256:37e55c8e51c236f95b033f6fb391d7d7970ba5fe7ff453dad675e88cf303377a \
+ --hash=sha256:3d47fa203a7bd9c5b6cee4736ee84ca03b8ef23193c0d1ca99b5089f72645c73 \
+ --hash=sha256:3e4d1f6587322d2788836a99c69062fbb091331ec940e02d12d179c1d53e25fc \
+ --hash=sha256:42cb296636fcc8b0644486d15c12376cb9fa75443e00fb25de0b8602e64c1714 \
+ --hash=sha256:45485e01ff4d3630ec0d9617310448a8702f70e9c01906b0d0118bdf9d124cf2 \
+ --hash=sha256:4a78b2b446bd7c934f5dcedc588903fb2f5eec172f3d29e52a9096a43722adfc \
+ --hash=sha256:4ab2fe47fae9e0f9dee8c04187ce5d09f48eabe611be8259444906793ab7cbce \
+ --hash=sha256:4d0d1650369165a14e14e1e47b372cfcb31d6ab44e6e33cb2d4e57265290044d \
+ --hash=sha256:549a3a73da901d5bc3ce8d24e0600d1fa85524c10287f6004fbab87672bf3e1e \
+ --hash=sha256:55086ee1064215781fff39a1af09518bc9255b50d6333f2e4c74ca09fac6a8f6 \
+ --hash=sha256:572c3763a264ba47b3cf708a44ce965d98555f618ca42c926a9c1616d8f34269 \
+ --hash=sha256:573f6eac48f4769d667c4442081b1794f52919e7edada77495aaed9236d13a96 \
+ --hash=sha256:5b4c145409bef602a690e7cfad0a15a55c13320ff7a3ad7ca59c13bb8ba4d45d \
+ --hash=sha256:6463effa3186ea09411d50efc7d85360b38d5f09b870c48e4600f63af490e56a \
+ --hash=sha256:65f6f63034100ead094b8744b3b97965785388f308a64cf8d7c34f2f2e5be0c4 \
+ --hash=sha256:663946639d296df6a2bb2aa51b60a2454ca1cb29835324c640dafb5ff2131a77 \
+ --hash=sha256:6897af51655e3691ff853668779c7bad41579facacf5fd7253b0133308cf000d \
+ --hash=sha256:68d1f8a9e9e37c1223b656399be5d6b448dea850bed7d0f87a8311f1ff3dabb0 \
+ --hash=sha256:6ac7ffc7ad6d040517be39eb591cac5ff87416c2537df6ba3cba3bae290c0fed \
+ --hash=sha256:6b3251890fff30ee142c44144871185dbe13b11bab478a88887a639655be1068 \
+ --hash=sha256:6c4caeef8fa63d06bd437cd4bdcf3ffefe6738fb1b25951440d80dc7df8c03ac \
+ --hash=sha256:6ef1d82a3af9d3eecdba2321dc1b3c238245d890843e040e41e470ffa64c3e25 \
+ --hash=sha256:753f10e867343b4511128c6ed8c82f7bec3bd026875576dfd88483c5c73b2fd8 \
+ --hash=sha256:7cd13a2e3ddeed6913a65e66e94b51d80a041145a026c27e6bb76c31a853c6ab \
+ --hash=sha256:7ed9e526742851e8d5cc9e6cf41427dfc6068d4f5a3bb03659444b4cabf6bc26 \
+ --hash=sha256:7f04c839ed0b6b98b1a7501a002144b76c18fb1c1850c8b98d458ac269e26ed2 \
+ --hash=sha256:802fe99cca7457642125a8a88a084cef28ff0cf9407060f7b93dca5aa25480db \
+ --hash=sha256:80402cd6ee291dcb72644d6eac93785fe2c8b9cb30893c1af5b8fdd753b9d40f \
+ --hash=sha256:8465322196c8b4d7ab6d1e049e4c5cb460d0394da4a27d23cc242fbf0034b6b5 \
+ --hash=sha256:86216b5cee4b06df986d214f664305142d9c76df9b6512be2738aa72a2048f99 \
+ --hash=sha256:87d1351268731db79e0f8e745d92493ee2841c974128ef629dc518b937d9194c \
+ --hash=sha256:8bdb58ff7ba23002a4c5808d608e4e6c687175724f54a5dade5fa8c67b604e4d \
+ --hash=sha256:8c622a5fe39a48f78944a87d4fb8a53ee07344641b0562c540d840748571b811 \
+ --hash=sha256:8d756e44e94489e49571086ef83b2bb8ce311e730092d2c34ca8f7d925cb20aa \
+ --hash=sha256:8f4a014bc36d3c57402e2977dada34f9c12300af536839dc38c0beab8878f38a \
+ --hash=sha256:9063e24fdb1e498ab71cb7419e24622516c4a04476b17a2dab57e8baa30d6e03 \
+ --hash=sha256:90d558489962fd4918143277a773316e56c72da56ec7aa3dc3dbbe20fdfed15b \
+ --hash=sha256:923c0c831b7cfcb071580d3f46c4baf50f174be571576556269530f4bbd79d04 \
+ --hash=sha256:95f2a5796329323b8f0512e09dbb7a1860c46a39da62ecb2324f116fa8fdc85c \
+ --hash=sha256:96b02a3dc4381e5494fad39be677abcb5e6634bf7b4fa83a6dd3112607547001 \
+ --hash=sha256:9f96df6923e21816da7e0ad3fd47dd8f94b2a5ce594e00677c0013018b813458 \
+ --hash=sha256:a10af20b82360ab00827f916a6058451b723b4e65030c5a18577c8b2de5b3389 \
+ --hash=sha256:a50aebfa173e157099939b17f18600f72f84eed3049e743b68ad15bd69b6bf99 \
+ --hash=sha256:a981a536974bbc7a512cf44ed14938cf01030a99e9b3a06dd59578882f06f985 \
+ --hash=sha256:a9a8e9031d613fd2009c182b69c7b2c1ef8239a0efb1df3f7c8da66d5dd3d537 \
+ --hash=sha256:ae5f4161f18c61806f411a13b0310bea87f987c7d2ecdbdaad0e94eb2e404238 \
+ --hash=sha256:aed38f6e4fb3f5d6bf81bfa990a07806be9d83cf7bacef998ab1a9bd660a581f \
+ --hash=sha256:b01b88d45a6fcb69667cd6d2f7a9aeb4bf53760d7fc536bf679ec94fe9f3ff3d \
+ --hash=sha256:b261ccdec7821281dade748d088bb6e9b69e6d15b30652b74cbbac25e280b796 \
+ --hash=sha256:b2b0a0c0517616b6869869f8c581d4eb2dd83a4d79e0ebcb7d373ef9956aeb0a \
+ --hash=sha256:b4a23f61ce87adf89be746c8a8974fe1c823c891d8f86eb218bb957c924bb143 \
+ --hash=sha256:bd8f7df7d12c2db9fab40bdd87a7c09b1530128315d047a086fa3ae3435cb3a8 \
+ --hash=sha256:beb58fe5cdb101e3a055192ac291b7a21e3b7ef4f67fa1d74e331a7f2124341c \
+ --hash=sha256:c002b4ffc0be611f0d9da932eb0f704fe2602a9a949d1f738e4c34c75b0863d5 \
+ --hash=sha256:c083af607d2515612056a31f0a8d9e0fcb5876b7bfc0abad3ecd275bc4ebc2d5 \
+ --hash=sha256:c180f51afb394e165eafe4ac2936a14bee3eb10debc9d9e4db8958fe36afe711 \
+ --hash=sha256:c235ebd9baae02f1b77bcea61bce332cb4331dc3617d254df3323aa01ab47bd4 \
+ --hash=sha256:cd70574b12bb8a4d2aaa0094515df2463cb429d8536cfb6c7ce983246983e5a6 \
+ --hash=sha256:d0eccceffcb53201b5bfebb52600a5fb483a20b61da9dbc885f8b103cbe7598c \
+ --hash=sha256:d965bba47ddeec8cd560687584e88cf699fd28f192ceb452d1d7ee807c5597b7 \
+ --hash=sha256:db364eca23f876da6f9e16c9da0df51aa4f104a972735574842618b8c6d999d4 \
+ --hash=sha256:ddbb2551d7e0102e7252db79ba445cdab71b26640817ab1e3e3648dad515003b \
+ --hash=sha256:deb6be0ac38ece9ba87dea880e438f25ca3eddfac8b002a2ec3d9183a454e8ae \
+ --hash=sha256:e06ed3eb3218bc64786f7db41917d4e686cc4856944f53d5bdf83a6884432e12 \
+ --hash=sha256:e27ad930a842b4c5eb8ac0016b0a54f5aebbe679340c26101df33424142c143c \
+ --hash=sha256:e537484df0d8f426ce2afb2d0f8e1c3d0b114b83f8850e5f2fbea0e797bd82ae \
+ --hash=sha256:eb00ed941194665c332bf8e078baf037d6c35d7c4f3102ea2d4f16ca94a26dc8 \
+ --hash=sha256:eb6904c354526e758fda7167b33005998fb68c46fbc10e013ca97f21ca5c8887 \
+ --hash=sha256:eb8821e09e916165e160797a6c17edda0679379a4be5c716c260e836e122f54b \
+ --hash=sha256:efcb3f6676480691518c177e3b465bcddf57cea040302f9f4e6e191af91174d4 \
+ --hash=sha256:f27273b60488abe721a075bcca6d7f3964f9f6f067c8c4c605743023d7d3944f \
+ --hash=sha256:f30c3cb33b24454a82faecaf01b19c18562b1e89558fb6c56de4d9118a032fd5 \
+ --hash=sha256:fb69256e180cb6c8a894fee62b3afebae785babc1ee98b81cdf68bbca1987f33 \
+ --hash=sha256:fd1abc0d89e30cc4e02e4064dc67fcc51bd941eb395c502aac3ec19fab46b519 \
+ --hash=sha256:ff8fa367d09b717b2a17a052544193ad76cd49979c805768879cb63d9ca50561
# via requests
-future==0.18.3 \
- --hash=sha256:34a17436ed1e96697a86f9de3d15a3b0be01d8bc8de9c1dffd59fb8234ed5307
- # via -r requirements.in
-idna==3.4 \
- --hash=sha256:814f528e8dead7d329833b91c5faa87d60bf71824cd12a7530b5526063d02cb4 \
- --hash=sha256:90b77e79eaa3eba6de819a0c442c0b4ceefc341a7a2ab77d7562bf49f425c5c2
+idna==3.7 \
+ --hash=sha256:028ff3aadf0609c1fd278d8ea3089299412a7a8b9bd005dd08b9f8285bcb5cfc \
+ --hash=sha256:82fee1fc78add43492d3a1898bfa6d8a904cc97d8427f683ed8e798d07761aa0
# via requests
mpmath==1.3.0 \
--hash=sha256:7a28eb2a9774d00c7bc92411c19a89209d5da7c4c9a9e227be8330a23a25b91f \
--hash=sha256:a0b2b9fe80bbcd81a6647ff13108738cfb482d481d826cc0e02f5b35e5c88d2c
# via sympy
-numpy==1.24.3 \
- --hash=sha256:0ec87a7084caa559c36e0a2309e4ecb1baa03b687201d0a847c8b0ed476a7187 \
- --hash=sha256:1a7d6acc2e7524c9955e5c903160aa4ea083736fde7e91276b0e5d98e6332812 \
- --hash=sha256:202de8f38fc4a45a3eea4b63e2f376e5f2dc64ef0fa692838e31a808520efaf7 \
- --hash=sha256:210461d87fb02a84ef243cac5e814aad2b7f4be953b32cb53327bb49fd77fbb4 \
- --hash=sha256:2d926b52ba1367f9acb76b0df6ed21f0b16a1ad87c6720a1121674e5cf63e2b6 \
- --hash=sha256:352ee00c7f8387b44d19f4cada524586f07379c0d49270f87233983bc5087ca0 \
- --hash=sha256:35400e6a8d102fd07c71ed7dcadd9eb62ee9a6e84ec159bd48c28235bbb0f8e4 \
- --hash=sha256:3c1104d3c036fb81ab923f507536daedc718d0ad5a8707c6061cdfd6d184e570 \
- --hash=sha256:4719d5aefb5189f50887773699eaf94e7d1e02bf36c1a9d353d9f46703758ca4 \
- --hash=sha256:4749e053a29364d3452c034827102ee100986903263e89884922ef01a0a6fd2f \
- --hash=sha256:5342cf6aad47943286afa6f1609cad9b4266a05e7f2ec408e2cf7aea7ff69d80 \
- --hash=sha256:56e48aec79ae238f6e4395886b5eaed058abb7231fb3361ddd7bfdf4eed54289 \
- --hash=sha256:76e3f4e85fc5d4fd311f6e9b794d0c00e7002ec122be271f2019d63376f1d385 \
- --hash=sha256:7776ea65423ca6a15255ba1872d82d207bd1e09f6d0894ee4a64678dd2204078 \
- --hash=sha256:784c6da1a07818491b0ffd63c6bbe5a33deaa0e25a20e1b3ea20cf0e43f8046c \
- --hash=sha256:8535303847b89aa6b0f00aa1dc62867b5a32923e4d1681a35b5eef2d9591a463 \
- --hash=sha256:9a7721ec204d3a237225db3e194c25268faf92e19338a35f3a224469cb6039a3 \
- --hash=sha256:a1d3c026f57ceaad42f8231305d4653d5f05dc6332a730ae5c0bea3513de0950 \
- --hash=sha256:ab344f1bf21f140adab8e47fdbc7c35a477dc01408791f8ba00d018dd0bc5155 \
- --hash=sha256:ab5f23af8c16022663a652d3b25dcdc272ac3f83c3af4c02eb8b824e6b3ab9d7 \
- --hash=sha256:ae8d0be48d1b6ed82588934aaaa179875e7dc4f3d84da18d7eae6eb3f06c242c \
- --hash=sha256:c91c4afd8abc3908e00a44b2672718905b8611503f7ff87390cc0ac3423fb096 \
- --hash=sha256:d5036197ecae68d7f491fcdb4df90082b0d4960ca6599ba2659957aafced7c17 \
- --hash=sha256:d6cc757de514c00b24ae8cf5c876af2a7c3df189028d68c0cb4eaa9cd5afc2bf \
- --hash=sha256:d933fabd8f6a319e8530d0de4fcc2e6a61917e0b0c271fded460032db42a0fe4 \
- --hash=sha256:ea8282b9bcfe2b5e7d491d0bf7f3e2da29700cec05b49e64d6246923329f2b02 \
- --hash=sha256:ecde0f8adef7dfdec993fd54b0f78183051b6580f606111a6d789cd14c61ea0c \
- --hash=sha256:f21c442fdd2805e91799fbe044a7b999b8571bb0ab0f7850d0cb9641a687092b
- # via -r requirements.in
-pyyaml==6.0 \
- --hash=sha256:01b45c0191e6d66c470b6cf1b9531a771a83c1c4208272ead47a3ae4f2f603bf \
- --hash=sha256:0283c35a6a9fbf047493e3a0ce8d79ef5030852c51e9d911a27badfde0605293 \
- --hash=sha256:055d937d65826939cb044fc8c9b08889e8c743fdc6a32b33e2390f66013e449b \
- --hash=sha256:07751360502caac1c067a8132d150cf3d61339af5691fe9e87803040dbc5db57 \
- --hash=sha256:0b4624f379dab24d3725ffde76559cff63d9ec94e1736b556dacdfebe5ab6d4b \
- --hash=sha256:0ce82d761c532fe4ec3f87fc45688bdd3a4c1dc5e0b4a19814b9009a29baefd4 \
- --hash=sha256:1e4747bc279b4f613a09eb64bba2ba602d8a6664c6ce6396a4d0cd413a50ce07 \
- --hash=sha256:213c60cd50106436cc818accf5baa1aba61c0189ff610f64f4a3e8c6726218ba \
- --hash=sha256:231710d57adfd809ef5d34183b8ed1eeae3f76459c18fb4a0b373ad56bedcdd9 \
- --hash=sha256:277a0ef2981ca40581a47093e9e2d13b3f1fbbeffae064c1d21bfceba2030287 \
- --hash=sha256:2cd5df3de48857ed0544b34e2d40e9fac445930039f3cfe4bcc592a1f836d513 \
- --hash=sha256:40527857252b61eacd1d9af500c3337ba8deb8fc298940291486c465c8b46ec0 \
- --hash=sha256:432557aa2c09802be39460360ddffd48156e30721f5e8d917f01d31694216782 \
- --hash=sha256:473f9edb243cb1935ab5a084eb238d842fb8f404ed2193a915d1784b5a6b5fc0 \
- --hash=sha256:48c346915c114f5fdb3ead70312bd042a953a8ce5c7106d5bfb1a5254e47da92 \
- --hash=sha256:50602afada6d6cbfad699b0c7bb50d5ccffa7e46a3d738092afddc1f9758427f \
- --hash=sha256:68fb519c14306fec9720a2a5b45bc9f0c8d1b9c72adf45c37baedfcd949c35a2 \
- --hash=sha256:77f396e6ef4c73fdc33a9157446466f1cff553d979bd00ecb64385760c6babdc \
- --hash=sha256:81957921f441d50af23654aa6c5e5eaf9b06aba7f0a19c18a538dc7ef291c5a1 \
- --hash=sha256:819b3830a1543db06c4d4b865e70ded25be52a2e0631ccd2f6a47a2822f2fd7c \
- --hash=sha256:897b80890765f037df3403d22bab41627ca8811ae55e9a722fd0392850ec4d86 \
- --hash=sha256:98c4d36e99714e55cfbaaee6dd5badbc9a1ec339ebfc3b1f52e293aee6bb71a4 \
- --hash=sha256:9df7ed3b3d2e0ecfe09e14741b857df43adb5a3ddadc919a2d94fbdf78fea53c \
- --hash=sha256:9fa600030013c4de8165339db93d182b9431076eb98eb40ee068700c9c813e34 \
- --hash=sha256:a80a78046a72361de73f8f395f1f1e49f956c6be882eed58505a15f3e430962b \
- --hash=sha256:afa17f5bc4d1b10afd4466fd3a44dc0e245382deca5b3c353d8b757f9e3ecb8d \
- --hash=sha256:b3d267842bf12586ba6c734f89d1f5b871df0273157918b0ccefa29deb05c21c \
- --hash=sha256:b5b9eccad747aabaaffbc6064800670f0c297e52c12754eb1d976c57e4f74dcb \
- --hash=sha256:bfaef573a63ba8923503d27530362590ff4f576c626d86a9fed95822a8255fd7 \
- --hash=sha256:c5687b8d43cf58545ade1fe3e055f70eac7a5a1a0bf42824308d868289a95737 \
- --hash=sha256:cba8c411ef271aa037d7357a2bc8f9ee8b58b9965831d9e51baf703280dc73d3 \
- --hash=sha256:d15a181d1ecd0d4270dc32edb46f7cb7733c7c508857278d3d378d14d606db2d \
- --hash=sha256:d4b0ba9512519522b118090257be113b9468d804b19d63c71dbcf4a48fa32358 \
- --hash=sha256:d4db7c7aef085872ef65a8fd7d6d09a14ae91f691dec3e87ee5ee0539d516f53 \
- --hash=sha256:d4eccecf9adf6fbcc6861a38015c2a64f38b9d94838ac1810a9023a0609e1b78 \
- --hash=sha256:d67d839ede4ed1b28a4e8909735fc992a923cdb84e618544973d7dfc71540803 \
- --hash=sha256:daf496c58a8c52083df09b80c860005194014c3698698d1a57cbcfa182142a3a \
- --hash=sha256:dbad0e9d368bb989f4515da330b88a057617d16b6a8245084f1b05400f24609f \
- --hash=sha256:e61ceaab6f49fb8bdfaa0f92c4b57bcfbea54c09277b1b4f7ac376bfb7a7c174 \
- --hash=sha256:f84fbc98b019fef2ee9a1cb3ce93e3187a6df0b2538a651bfb890254ba9f90b5
- # via -r requirements.in
+numpy==1.26.4 \
+ --hash=sha256:03a8c78d01d9781b28a6989f6fa1bb2c4f2d51201cf99d3dd875df6fbd96b23b \
+ --hash=sha256:08beddf13648eb95f8d867350f6a018a4be2e5ad54c8d8caed89ebca558b2818 \
+ --hash=sha256:1af303d6b2210eb850fcf03064d364652b7120803a0b872f5211f5234b399f20 \
+ --hash=sha256:1dda2e7b4ec9dd512f84935c5f126c8bd8b9f2fc001e9f54af255e8c5f16b0e0 \
+ --hash=sha256:2a02aba9ed12e4ac4eb3ea9421c420301a0c6460d9830d74a9df87efa4912010 \
+ --hash=sha256:2e4ee3380d6de9c9ec04745830fd9e2eccb3e6cf790d39d7b98ffd19b0dd754a \
+ --hash=sha256:3373d5d70a5fe74a2c1bb6d2cfd9609ecf686d47a2d7b1d37a8f3b6bf6003aea \
+ --hash=sha256:47711010ad8555514b434df65f7d7b076bb8261df1ca9bb78f53d3b2db02e95c \
+ --hash=sha256:4c66707fabe114439db9068ee468c26bbdf909cac0fb58686a42a24de1760c71 \
+ --hash=sha256:50193e430acfc1346175fcbdaa28ffec49947a06918b7b92130744e81e640110 \
+ --hash=sha256:52b8b60467cd7dd1e9ed082188b4e6bb35aa5cdd01777621a1658910745b90be \
+ --hash=sha256:60dedbb91afcbfdc9bc0b1f3f402804070deed7392c23eb7a7f07fa857868e8a \
+ --hash=sha256:62b8e4b1e28009ef2846b4c7852046736bab361f7aeadeb6a5b89ebec3c7055a \
+ --hash=sha256:666dbfb6ec68962c033a450943ded891bed2d54e6755e35e5835d63f4f6931d5 \
+ --hash=sha256:675d61ffbfa78604709862923189bad94014bef562cc35cf61d3a07bba02a7ed \
+ --hash=sha256:679b0076f67ecc0138fd2ede3a8fd196dddc2ad3254069bcb9faf9a79b1cebcd \
+ --hash=sha256:7349ab0fa0c429c82442a27a9673fc802ffdb7c7775fad780226cb234965e53c \
+ --hash=sha256:7ab55401287bfec946ced39700c053796e7cc0e3acbef09993a9ad2adba6ca6e \
+ --hash=sha256:7e50d0a0cc3189f9cb0aeb3a6a6af18c16f59f004b866cd2be1c14b36134a4a0 \
+ --hash=sha256:95a7476c59002f2f6c590b9b7b998306fba6a5aa646b1e22ddfeaf8f78c3a29c \
+ --hash=sha256:96ff0b2ad353d8f990b63294c8986f1ec3cb19d749234014f4e7eb0112ceba5a \
+ --hash=sha256:9fad7dcb1aac3c7f0584a5a8133e3a43eeb2fe127f47e3632d43d677c66c102b \
+ --hash=sha256:9ff0f4f29c51e2803569d7a51c2304de5554655a60c5d776e35b4a41413830d0 \
+ --hash=sha256:a354325ee03388678242a4d7ebcd08b5c727033fcff3b2f536aea978e15ee9e6 \
+ --hash=sha256:a4abb4f9001ad2858e7ac189089c42178fcce737e4169dc61321660f1a96c7d2 \
+ --hash=sha256:ab47dbe5cc8210f55aa58e4805fe224dac469cde56b9f731a4c098b91917159a \
+ --hash=sha256:afedb719a9dcfc7eaf2287b839d8198e06dcd4cb5d276a3df279231138e83d30 \
+ --hash=sha256:b3ce300f3644fb06443ee2222c2201dd3a89ea6040541412b8fa189341847218 \
+ --hash=sha256:b97fe8060236edf3662adfc2c633f56a08ae30560c56310562cb4f95500022d5 \
+ --hash=sha256:bfe25acf8b437eb2a8b2d49d443800a5f18508cd811fea3181723922a8a82b07 \
+ --hash=sha256:cd25bcecc4974d09257ffcd1f098ee778f7834c3ad767fe5db785be9a4aa9cb2 \
+ --hash=sha256:d209d8969599b27ad20994c8e41936ee0964e6da07478d6c35016bc386b66ad4 \
+ --hash=sha256:d5241e0a80d808d70546c697135da2c613f30e28251ff8307eb72ba696945764 \
+ --hash=sha256:edd8b5fe47dab091176d21bb6de568acdd906d1887a4584a15a9a96a1dca06ef \
+ --hash=sha256:f870204a840a60da0b12273ef34f7051e98c3b5961b61b0c2c1be6dfd64fbcd3 \
+ --hash=sha256:ffa75af20b44f8dba823498024771d5ac50620e6915abac414251bd971b4529f
+ # via -r tools/build/bazel/requirements.in
+pyyaml==6.0.1 \
+ --hash=sha256:04ac92ad1925b2cff1db0cfebffb6ffc43457495c9b3c39d3fcae417d7125dc5 \
+ --hash=sha256:062582fca9fabdd2c8b54a3ef1c978d786e0f6b3a1510e0ac93ef59e0ddae2bc \
+ --hash=sha256:0d3304d8c0adc42be59c5f8a4d9e3d7379e6955ad754aa9d6ab7a398b59dd1df \
+ --hash=sha256:1635fd110e8d85d55237ab316b5b011de701ea0f29d07611174a1b42f1444741 \
+ --hash=sha256:184c5108a2aca3c5b3d3bf9395d50893a7ab82a38004c8f61c258d4428e80206 \
+ --hash=sha256:18aeb1bf9a78867dc38b259769503436b7c72f7a1f1f4c93ff9a17de54319b27 \
+ --hash=sha256:1d4c7e777c441b20e32f52bd377e0c409713e8bb1386e1099c2415f26e479595 \
+ --hash=sha256:1e2722cc9fbb45d9b87631ac70924c11d3a401b2d7f410cc0e3bbf249f2dca62 \
+ --hash=sha256:1fe35611261b29bd1de0070f0b2f47cb6ff71fa6595c077e42bd0c419fa27b98 \
+ --hash=sha256:28c119d996beec18c05208a8bd78cbe4007878c6dd15091efb73a30e90539696 \
+ --hash=sha256:326c013efe8048858a6d312ddd31d56e468118ad4cdeda36c719bf5bb6192290 \
+ --hash=sha256:40df9b996c2b73138957fe23a16a4f0ba614f4c0efce1e9406a184b6d07fa3a9 \
+ --hash=sha256:42f8152b8dbc4fe7d96729ec2b99c7097d656dc1213a3229ca5383f973a5ed6d \
+ --hash=sha256:49a183be227561de579b4a36efbb21b3eab9651dd81b1858589f796549873dd6 \
+ --hash=sha256:4fb147e7a67ef577a588a0e2c17b6db51dda102c71de36f8549b6816a96e1867 \
+ --hash=sha256:50550eb667afee136e9a77d6dc71ae76a44df8b3e51e41b77f6de2932bfe0f47 \
+ --hash=sha256:510c9deebc5c0225e8c96813043e62b680ba2f9c50a08d3724c7f28a747d1486 \
+ --hash=sha256:5773183b6446b2c99bb77e77595dd486303b4faab2b086e7b17bc6bef28865f6 \
+ --hash=sha256:596106435fa6ad000c2991a98fa58eeb8656ef2325d7e158344fb33864ed87e3 \
+ --hash=sha256:6965a7bc3cf88e5a1c3bd2e0b5c22f8d677dc88a455344035f03399034eb3007 \
+ --hash=sha256:69b023b2b4daa7548bcfbd4aa3da05b3a74b772db9e23b982788168117739938 \
+ --hash=sha256:6c22bec3fbe2524cde73d7ada88f6566758a8f7227bfbf93a408a9d86bcc12a0 \
+ --hash=sha256:704219a11b772aea0d8ecd7058d0082713c3562b4e271b849ad7dc4a5c90c13c \
+ --hash=sha256:7e07cbde391ba96ab58e532ff4803f79c4129397514e1413a7dc761ccd755735 \
+ --hash=sha256:81e0b275a9ecc9c0c0c07b4b90ba548307583c125f54d5b6946cfee6360c733d \
+ --hash=sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28 \
+ --hash=sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4 \
+ --hash=sha256:9046c58c4395dff28dd494285c82ba00b546adfc7ef001486fbf0324bc174fba \
+ --hash=sha256:9eb6caa9a297fc2c2fb8862bc5370d0303ddba53ba97e71f08023b6cd73d16a8 \
+ --hash=sha256:a08c6f0fe150303c1c6b71ebcd7213c2858041a7e01975da3a99aed1e7a378ef \
+ --hash=sha256:a0cd17c15d3bb3fa06978b4e8958dcdc6e0174ccea823003a106c7d4d7899ac5 \
+ --hash=sha256:afd7e57eddb1a54f0f1a974bc4391af8bcce0b444685d936840f125cf046d5bd \
+ --hash=sha256:b1275ad35a5d18c62a7220633c913e1b42d44b46ee12554e5fd39c70a243d6a3 \
+ --hash=sha256:b786eecbdf8499b9ca1d697215862083bd6d2a99965554781d0d8d1ad31e13a0 \
+ --hash=sha256:ba336e390cd8e4d1739f42dfe9bb83a3cc2e80f567d8805e11b46f4a943f5515 \
+ --hash=sha256:baa90d3f661d43131ca170712d903e6295d1f7a0f595074f151c0aed377c9b9c \
+ --hash=sha256:bc1bf2925a1ecd43da378f4db9e4f799775d6367bdb94671027b73b393a7c42c \
+ --hash=sha256:bd4af7373a854424dabd882decdc5579653d7868b8fb26dc7d0e99f823aa5924 \
+ --hash=sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34 \
+ --hash=sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43 \
+ --hash=sha256:c8098ddcc2a85b61647b2590f825f3db38891662cfc2fc776415143f599bb859 \
+ --hash=sha256:d2b04aac4d386b172d5b9692e2d2da8de7bfb6c387fa4f801fbf6fb2e6ba4673 \
+ --hash=sha256:d483d2cdf104e7c9fa60c544d92981f12ad66a457afae824d146093b8c294c54 \
+ --hash=sha256:d858aa552c999bc8a8d57426ed01e40bef403cd8ccdd0fc5f6f04a00414cac2a \
+ --hash=sha256:e7d73685e87afe9f3b36c799222440d6cf362062f78be1013661b00c5c6f678b \
+ --hash=sha256:f003ed9ad21d6a4713f0a9b5a7a0a79e08dd0f221aff4525a2be4c346ee60aab \
+ --hash=sha256:f22ac1c3cac4dbc50079e965eba2c1058622631e526bd9afd45fedd49ba781fa \
+ --hash=sha256:faca3bdcf85b2fc05d06ff3fbc1f83e1391b3e724afa3feba7d13eeab355484c \
+ --hash=sha256:fca0e3a251908a499833aa292323f32437106001d436eca0e6e7833256674585 \
+ --hash=sha256:fd1592b3fdf65fff2ad0004b5e363300ef59ced41c2e6b3a99d4089fa8c5435d \
+ --hash=sha256:fd66fc5d0da6d9815ba2cebeb4205f95818ff4b79c3ebe268e75d961704af52f
+ # via -r tools/build/bazel/requirements.in
requests==2.31.0 \
--hash=sha256:58cd2187c01e70e6e26505bca751777aa9f2ee0b7f4300988b709f44e013003f \
--hash=sha256:942c5a758f98d790eaed1a29cb6eefc7ffb0d1cf7af05c3d2791656dbd6ad1e1
- # via -r requirements.in
-six==1.16.0 \
- --hash=sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926 \
- --hash=sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254
- # via -r requirements.in
+ # via -r tools/build/bazel/requirements.in
sympy==1.12 \
--hash=sha256:c3588cd4295d0c0f603d0f2ae780587e64e2efeedb3521e46b9bb1d08d184fa5 \
--hash=sha256:ebf595c8dac3e0fdc4152c51878b498396ec7f30e7a914d6071e674d49420fb8
- # via -r requirements.in
-typing-extensions==4.5.0 \
- --hash=sha256:5cb5f4a79139d699607b3ef622a1dedafa84e115ab0024e0d9c044a9479ca7cb \
- --hash=sha256:fb33085c39dd998ac16d1431ebc293a8b3eedd00fd4a32de0ff79002c19511b4
- # via -r requirements.in
-urllib3==2.0.7 \
- --hash=sha256:c97dfde1f7bd43a71c8d2a58e369e9b2bf692d1334ea9f9cae55add7d0dd0f84 \
- --hash=sha256:fdb6d215c776278489906c2f8916e6e7d4f5a9b602ccbcfdf7f016fc8da0596e
+ # via -r tools/build/bazel/requirements.in
+typing-extensions==4.11.0 \
+ --hash=sha256:83f085bd5ca59c80295fc2a82ab5dac679cbe02b9f33f7d83af68e241bea51b0 \
+ --hash=sha256:c1f94d72897edaf4ce775bb7558d5b79d8126906a14ea5ed1635921406c0387a
+ # via -r tools/build/bazel/requirements.in
+urllib3==2.2.1 \
+ --hash=sha256:450b20ec296a467077128bff42b73080516e71b56ff59a60a02bef2232c4fa9d \
+ --hash=sha256:d0570876c61ab9e520d776c38acbbb5b05a776d3f9ff98a5c8fd5162a444cf19
# via requests
# The following packages are considered to be unsafe in a requirements file:
-setuptools==67.8.0 \
- --hash=sha256:5df61bf30bb10c6f756eb19e7c9f3b473051f48db77fddbe06ff2ca307df9a6f \
- --hash=sha256:62642358adc77ffa87233bc4d2354c4b2682d214048f500964dbe760ccedf102
- # via -r requirements.in
+setuptools==69.5.1 \
+ --hash=sha256:6c1fccdac05a97e598fb0ae3bbed5904ccb317337a51139dcd51453611bbb987 \
+ --hash=sha256:c636ac361bc47580504644275c9ad802c50415c7522212252c033bd15f301f32
+ # via -r tools/build/bazel/requirements.in
|
2.41.0
|
75f77784f01d7dfa6dc74a704f9d390bbde339e
|
Mon, 15 Apr 2024 18:05:22 +0000
|
[PATCH 0164/1000] Fix CUDA out of memory error message formatting (#123984)
|
We need a string instead of an integer here. With device 0, the string was getting NULL terminated leading to a truncated error message Pull Request resolved: https://github.com/pytorch/pytorch/pull/123984 Approved by: https://github.com/eqy, https://github.com/peterbell10
|
diff --git a/c10/cuda/CUDACachingAllocator.cpp b/c10/cuda/CUDACachingAllocator.cpp
index 704a011c3e..c472e82ce2 100644
--- a/c10/cuda/CUDACachingAllocator.cpp
+++ b/c10/cuda/CUDACachingAllocator.cpp
@@ -1149,7 +1149,7 @@ class DeviceCachingAllocator {
"CUDA out of memory. Tried to allocate ",
format_size(alloc_size),
". GPU ",
- device,
+ static_cast<int>(device),
" has a total capacity of ",
format_size(device_total),
" of which ",
|
2.41.0
|
a52918e816879fe6cafbb1cc7c737119cfe6fad
|
Mon, 15 Apr 2024 10:31:01 -0700
|
[PATCH 0165/1000] [FSDP2] Generalized all-gather outputs to >1 per parameter (#119302)
|
This PR is part of the FSDP extensions work. For subclasses such as for QLoRA's `NF4Tensor` (using block-wise quantization) that have multiple inner tensors per parameter, we must generalize to allow each parameter to contribute >1 all-gather inputs and hence have >1 all-gather outputs. This PR does this generalization by converting `FSDPParam.all_gather_input: torch.Tensor` to `FSDPParam.all_gather_inputs: List[torch.Tensor]`. Unfortunately, since we need to preserve the mapping from all-gather inputs/outputs to their source parameter, we have to introduce `List[List]` instead of simply `List` in several places. Furthermore, we still require the flattened 1D `List` for `torch.split` calls, introducing some redundancy between data structures. Nonetheless, I do not see a way to avoid this if we want the generalization. Pull Request resolved: https://github.com/pytorch/pytorch/pull/119302 Approved by: https://github.com/weifengpy, https://github.com/wanchaol
|
diff --git a/torch/distributed/_composable/fsdp/_fsdp_collectives.py b/torch/distributed/_composable/fsdp/_fsdp_collectives.py
index feba4a606b..7a72ce0a13 100644
--- a/torch/distributed/_composable/fsdp/_fsdp_collectives.py
+++ b/torch/distributed/_composable/fsdp/_fsdp_collectives.py
@@ -15,7 +15,13 @@ class AllGatherResult(NamedTuple):
all_gather_output: torch.Tensor
all_gather_event: Optional[torch.cuda.Event]
all_gather_work: Optional[dist.distributed_c10d.Work]
- all_gather_input_numels: List[int]
+ # For each parameter, the all-gather input dtype for each input
+ param_all_gather_input_dtypes: List[List[torch.dtype]]
+ # For each parameter, the all-gather input numel for each input
+ param_all_gather_input_numels: List[List[int]]
+ # 1D flattened version of `param_all_gather_input_numels` saved to avoid
+ # CPU overhead from recomputing
+ all_gather_input_split_sizes: List[int]
@torch.no_grad()
@@ -30,15 +36,20 @@ def foreach_all_gather(
world_size, rank = group.size(), group.rank()
# - Copy in
with torch.cuda.stream(all_gather_copy_in_stream):
- param_all_gather_inputs = [
- fsdp_param.all_gather_input for fsdp_param in fsdp_params
+ param_all_gather_inputs: List[List[torch.Tensor]] = [
+ fsdp_param.all_gather_inputs for fsdp_param in fsdp_params
]
- dtype = param_all_gather_inputs[0].dtype
- if not all(t.dtype == dtype for t in param_all_gather_inputs):
+ (
+ param_all_gather_input_dtypes,
+ param_all_gather_input_numels,
+ ) = _get_all_gather_input_metadatas(param_all_gather_inputs)
+ dtype = param_all_gather_inputs[0][0].dtype
+ if not all(t.dtype == dtype for ts in param_all_gather_inputs for t in ts):
raise NotImplementedError(
- f"Mixed dtype not supported yet: {[t.dtype for t in param_all_gather_inputs]}"
+ f"Mixed dtype not supported yet: {param_all_gather_input_dtypes}"
)
- inp_split_sizes = [inp.numel() for inp in param_all_gather_inputs]
+ all_gather_inputs = [t for ts in param_all_gather_inputs for t in ts]
+ inp_split_sizes = [t.numel() for t in all_gather_inputs]
all_gather_input_numel = sum(inp_split_sizes)
all_gather_output = torch.empty(
(all_gather_input_numel * world_size,), dtype=dtype, device=device
@@ -47,7 +58,7 @@ def foreach_all_gather(
0, all_gather_input_numel * rank, all_gather_input_numel
)
foreach_copy_dsts = torch.split(all_gather_input, inp_split_sizes)
- torch._foreach_copy_(foreach_copy_dsts, param_all_gather_inputs)
+ torch._foreach_copy_(foreach_copy_dsts, all_gather_inputs)
del param_all_gather_inputs
all_gather_stream.wait_stream(all_gather_copy_in_stream)
with torch.cuda.stream(all_gather_stream):
@@ -60,7 +71,12 @@ def foreach_all_gather(
)
all_gather_event = all_gather_stream.record_event()
return AllGatherResult(
- all_gather_output, all_gather_event, all_gather_work, inp_split_sizes
+ all_gather_output,
+ all_gather_event,
+ all_gather_work,
+ param_all_gather_input_dtypes,
+ param_all_gather_input_numels,
+ inp_split_sizes,
)
@@ -74,25 +90,30 @@ def foreach_all_gather_copy_out(
all_gather_output,
all_gather_event,
all_gather_work,
- all_gather_input_numels,
+ param_all_gather_input_dtypes,
+ param_all_gather_input_numels,
+ all_gather_input_split_sizes,
) = all_gather_result
if all_gather_event is not None: # sync op
torch.cuda.current_stream().wait_event(all_gather_event)
if isinstance(all_gather_work, dist.distributed_c10d.Work): # async op
all_gather_work.wait()
- world_size = group.size()
- dtype, device = all_gather_output.dtype, all_gather_output.device
- for all_gather_input_numel, fsdp_param in zip(all_gather_input_numels, fsdp_params):
- fsdp_param.init_all_gather_output(
- all_gather_input_numel, world_size, dtype, device
+ world_size, device = group.size(), all_gather_output.device
+ for all_gather_input_numels, all_gather_input_dtypes, fsdp_param in zip(
+ param_all_gather_input_numels, param_all_gather_input_dtypes, fsdp_params
+ ):
+ fsdp_param.init_all_gather_outputs(
+ all_gather_input_numels, all_gather_input_dtypes, world_size, device
) # no-op after 1st call
- fsdp_param.alloc_all_gather_output()
+ fsdp_param.alloc_all_gather_outputs()
all_gather_output = all_gather_output.view(world_size, -1)
- out = [
- fsdp_param.all_gather_output.view(world_size, -1) for fsdp_param in fsdp_params
+ out: List[torch.Tensor] = [
+ t.view(world_size, -1)
+ for fsdp_param in fsdp_params
+ for t in fsdp_param.all_gather_outputs
]
torch.split_with_sizes_copy(
- all_gather_output, all_gather_input_numels, dim=1, out=out
+ all_gather_output, all_gather_input_split_sizes, dim=1, out=out
)
@@ -199,6 +220,17 @@ def foreach_reduce_scatter_copy_in(
)
+def _get_all_gather_input_metadatas(
+ param_all_gather_inputs: List[List[torch.Tensor]],
+) -> Tuple[List[List[torch.dtype]], List[List[int]]]:
+ param_all_gather_input_dtypes: List[List[torch.dtype]] = []
+ param_all_gather_input_numels: List[List[int]] = []
+ for all_gather_inputs in param_all_gather_inputs:
+ param_all_gather_input_dtypes.append([t.dtype for t in all_gather_inputs])
+ param_all_gather_input_numels.append([t.numel() for t in all_gather_inputs])
+ return param_all_gather_input_dtypes, param_all_gather_input_numels
+
+
def _reduce_scatter(
output: torch.Tensor,
input: torch.Tensor,
diff --git a/torch/distributed/_composable/fsdp/_fsdp_param.py b/torch/distributed/_composable/fsdp/_fsdp_param.py
index 90e69119b1..6541df7ed8 100644
--- a/torch/distributed/_composable/fsdp/_fsdp_param.py
+++ b/torch/distributed/_composable/fsdp/_fsdp_param.py
@@ -28,23 +28,25 @@ FSDP considers the following tensors:
on the module when applying FSDP
- Sharded parameter: sharding the original parameter on dim-0 as a DTensor
over the main mesh
|
- sharded parameter
|
1a0821e7ecc83bf06b6ad03a6038e5404ce0ec0
|
Mon, 15 Apr 2024 10:31:01 -0700
|
[PATCH 0166/1000] [FSDP2] Added pre/post-all-gather extensions (subclass) (#122908)
|
**Overview** This PR adds pre/post-all-gather extensions to FSDP2. - The pre/post-all-gather extensions are specified at the tensor-level on the `sharded_param._local_tensor` (i.e. the tensor wrapped by the sharded `DTensor`). If the user has a tensor-subclass parameter on the module passed to FSDP that preserves the subclass through the sharding ops (e.g. `new_zeros`, `chunk`, etc.), then the `sharded_param._local_tensor` will naturally be of that subclass. - The pre-all-gather function has signature: ``` def fsdp_pre_all_gather(self) -> Tuple[Tuple[torch.Tensor, ...], Any] ``` - The first return value is a `Tuple[torch.Tensor, ...]` of the all-gather inputs. It is a tuple since a subclass could contribute >1 inner tensors. - The second return value is any optional metadata needed to pass through to the post-all-gather. - The post all-gather function has signature: ``` def fsdp_post_all_gather( self, all_gather_outputs: Tuple[torch.Tensor, ...], metadata: Any, param_dtype: torch.dtype, *, out: Optional[torch.Tensor] = None, ) -> Union[Tuple[torch.Tensor, Tuple[torch.Tensor, ...]], None]: ``` - The `all_gather_outputs` are exactly the all-gathered versions of the `fsdp_pre_all_gather` 1st return value (representing the all-gather inputs). We make sure to unflatten these back to ND for the user. - The `metadata` is the `fsdp_pre_all_gather` 2nd return value, untouched. - The `param_dtype` is the parameter dtype based on the passed-in `MixedPrecisionPolicy`. Namely, if no policy is passed in, then `param_dtype` is the original dtype, and otherwise, it is the `MixedPrecisionPolicy.param_dtype`. - If `out` is not specified, then the return value has type `Tuple[torch.Tensor, Tuple[torch.Tensor, ...]]`. The first tuple item is the unsharded parameter (e.g. re-wrapping into some subclass). The second tuple item is a tuple of unsharded inner tensors that FSDP should free during reshard. These should be derived from the all-gather outputs. - The `out` argument is required due to FSDP's `resize_` usage. We require an in-place variant for the backward all-gather. Here, `out` will be exactly the object returned as the first tuple item in the out-of-place variant mentioned before. The unsharded inner tensors will be allocated before calling `fsdp_post_all_gather`. When `out` is specified, the `fsdp_post_all_gather` should return `None`. If the post-all-gather does not do any out-of-place ops, then the `out` variant can just be a no-op since the unsharded inner tensors will be the same as the all-gather outputs, which FSDP directly writes to after all-gather. (E.g., this is the case for both float8 and `NF4Tensor`.) - We check for `fsdp_pre_all_gather` and `fsdp_post_all_gather` directly via `hasattr` to accommodate monkey patching so that we do not strictly require the user to use a tensor subclass. The monkey patch must happen after the local tensors have been finalized (after applying FSDP and after any meta-device init). - For now, we require that all gradients in one FSDP parameter group share the same dtype. This is fine for float8 and `NF4Tensor` use cases. If this requirement is too strict, then in the future we can issue 1 reduce-scatter per dtype per group. **Design Notes** - We assume that the `sharded_param._local_tensor` is padded on dim-0. - This assumption should not block immediate use cases, and when we pad the `DTensor._local_tensor` by default, this assumption will always be true. - This assumption allows us to call `sharded_param._local_tensor.fsdp_pre_all_gather()`; i.e. it tells us from which tensor object to invoke `fsdp_pre_all_gather()`. - Suppose we want to compose with CPU offloading. Then, CPU offloading's H2D copy should run first, i.e. `sharded_param._local_tensor.to("cuda").fsdp_pre_all_gather()`, where `_local_tensor.to("cuda")` should return an instance of the subclass so that it still defines `fsdp_pre_all_gather()`. Note that in this case, the subclass instance on GPU is a temporary, which means caching values on it would not be possible. One possibility would be to have `.to("cuda")` move any cached values too. - `fsdp_post_all_gather` can either return an unsharded parameter that aliases with the all-gather output or does not alias, but there is no way to know a priori. - If the unsharded parameter aliases with the all-gather output, then we should _not_ free the all-gather output in `unshard`. - If the unsharded parameter does not alias with the all-gather output, then we prefer to free the all-gather output in `unshard` to avoid holding the unneeded temporary. - One approach is for eager-mode to check for this alias (by comparing data pointers). However, this might be adversarial to full-graph compilation. The compromise for simplicity can be to always free the all-gather output in `reshard`. Pull Request resolved: https://github.com/pytorch/pytorch/pull/122908 Approved by: https://github.com/weifengpy, https://github.com/wanchaol ghstack dependencies: #119302
|
diff --git a/test/distributed/_composable/fsdp/test_fully_shard_extensions.py b/test/distributed/_composable/fsdp/test_fully_shard_extensions.py
new file mode 100644
index 0000000000..655bff78f0
--- /dev/null
+++ b/test/distributed/_composable/fsdp/test_fully_shard_extensions.py
@@ -0,0 +1,235 @@
+# Owner(s): ["oncall: distributed"]
+
+import contextlib
+import copy
+import functools
+import threading
+import unittest
+from typing import Any, List, Optional, Tuple, Union
+
+import torch
+import torch.distributed as dist
+import torch.nn as nn
+
+from torch.distributed._composable.fsdp import fully_shard, MixedPrecisionPolicy
+from torch.testing._internal.common_cuda import TEST_CUDA
+from torch.testing._internal.common_distributed import skip_if_lt_x_gpu
+from torch.testing._internal.common_fsdp import (
+ check_sharded_parity,
+ FSDPTest,
+ FSDPTestMultiThread,
+ MLP,
+)
+from torch.testing._internal.common_utils import run_tests
+from torch.testing._internal.two_tensor import TwoTensor
+
+
+def two_tensor_fsdp_pre_all_gather(self) -> Tuple[Tuple[torch.Tensor, ...], Any]:
+ all_gather_inputs = (self.a, self.b)
+ metadata = None
+ return all_gather_inputs, metadata
+
+
+def two_tensor_fsdp_post_all_gather(
+ self,
+ all_gather_outputs: Tuple[torch.Tensor, ...],
+ metadata: Any,
+ param_dtype: torch.dtype,
+ *,
+ out: Optional[torch.Tensor] = None,
+) -> Union[Tuple[torch.Tensor, Tuple[torch.Tensor, ...]], None]:
+ assert metadata is None, f"{metadata}"
+ a, b = all_gather_outputs
+ if out is not None:
+ assert isinstance(out, TwoTensor), f"{type(out)}"
+ if a.dtype == param_dtype:
+ assert a.untyped_storage().data_ptr() == out.a.untyped_storage().data_ptr()
+ assert b.untyped_storage().data_ptr() == out.b.untyped_storage().data_ptr()
+ else:
+ assert out.a.dtype == param_dtype, f"{out.a.dtype} {param_dtype}"
+ assert out.b.dtype == param_dtype, f"{out.b.dtype} {param_dtype}"
+ out.a.copy_(a)
+ out.b.copy_(b)
+ return
+ tensors_to_free = (a, b)
+ # If the cast is real, then the all-gather outputs will not alias the
+ # returned `TwoTensor`'s `a` and `b`
+ two_tensor = TwoTensor(a, b).to(param_dtype)
+ return two_tensor, tensors_to_free
+
+
+class TestFullyShardAllGatherExtensionsCommon:
+ @property
+ def world_size(self) -> int:
+ return 2
+
+ @contextlib.contextmanager
+ def _patch_two_tensor_fsdp_all_gather(self):
+ lock = threading.Lock()
+ TwoTensor.fsdp_pre_all_gather = two_tensor_fsdp_pre_all_gather
+ TwoTensor.fsdp_post_all_gather = two_tensor_fsdp_post_all_gather
+ dist.barrier()
+ try:
+ yield
+ finally:
+ dist.barrier()
+ with lock: # only one thread needs to delete
+ if hasattr(TwoTensor, "fsdp_pre_all_gather"):
+ delattr(TwoTensor, "fsdp_pre_all_gather")
+ if hasattr(TwoTensor, "fsdp_post_all_gather"):
+ delattr(TwoTensor, "fsdp_post_all_gather")
+
+ def _init_two_tensor_mlp(self) -> nn.Module:
+ # Disable bias because the reference model will end up with a bias
+ # gradient that is a `TwoTensor`, whereas the FSDP model does not
+ model = nn.Sequential(*[MLP(8, bias=False) for _ in range(3)])
+ for mlp in model:
+ mlp.in_proj.weight = nn.Parameter(
+ TwoTensor(mlp.in_proj.weight, mlp.in_proj.weight.clone())
+ )
+ mlp.out_proj.weight = nn.Parameter(
+ TwoTensor(mlp.out_proj.weight, mlp.out_proj.weight.clone())
+ )
+ return model
+
+
+class TestFullyShardAllGatherExtensionsMultiProcess(
+ TestFullyShardAllGatherExtensionsCommon, FSDPTest
+):
+ @skip_if_lt_x_gpu(2)
+ def test_all_gather_extensions_train_parity(self):
+ with self._patch_two_tensor_fsdp_all_gather():
+ self.run_subtests(
+ {"reshard_after_forward": [True, False]},
+ self._test_all_gather_extensions_train_parity,
+ )
+
+ def _test_all_gather_extensions_train_parity(self, reshard_after_forward: bool):
+ torch.manual_seed(42)
+ model = self._init_two_tensor_mlp()
+ ref_model = copy.deepcopy(model).cuda()
+ ref_optim = torch.optim.Adam(ref_model.parameters(), lr=1e-2, foreach=True)
+ fully_shard_fn = functools.partial(
+ fully_shard, reshard_after_forward=reshard_after_forward
+ )
+ for mlp in model:
+ fully_shard_fn(mlp)
+ fully_shard_fn(model)
+ optim = torch.optim.Adam(model.parameters(), lr=1e-2, foreach=True)
+ check_sharded_parity(self, ref_model, model)
+
+ torch.manual_seed(42 + self.rank + 1)
+ inp = torch.randn((2, 8), device="cuda")
+ for iter_idx in range(10):
+ losses: List[torch.Tensor] = []
+ for _model in (ref_model, model):
+ losses.append(_model(inp).sum())
+ losses[-1].backward()
+ if _model is ref_model:
+ for param_name, param in _model.named_parameters():
+ dist.all_reduce(param.grad)
+ param.grad.detach().div_(self.world_size)
+ self.assertEqual(losses[0], losses[1])
+ check_sharded_parity(self, ref_model, model)
+ for _optim in (ref_optim, optim):
+ _optim.step()
+ _optim.zero_grad(set_to_none=(iter_idx % 2 == 0))
+ check_sharded_parity(self, ref_model, model)
+
+
+class TestFullyShardAllGatherExtensionsMultiThread(
+ TestFullyShardAllGatherExtensionsCommon, FSDPTestMultiThread
+):
+ @property
+ def device(self) -> torch.device:
+ return torch.device("cuda:0")
+
+ @unittest.skipIf(not TEST_CUDA, "no cuda")
+ def test_all_gather_extensions_end_to_end(self):
+ with self._patch_two_tensor_fsdp_all_gather():
+ self.run_subtests(
+ {"reshard_after_forward": [True, False]},
+ self._test_all_gather_extensions_end_to_end,
+ )
+
+ def _test_all_gather_extensions_end_to_end(self, reshard_after_forward: bool):
+ # Check that we can run the meta-device initialization flow
+ with torch.device("meta"):
+ model = self._init_two_tensor_mlp()
+ for param in model.parameters():
+ self.assertEqual(param.device, torch.device("meta"))
+ fully_shard_fn = functools.partial(
+ fully_shard,
+ reshard_after_forward=reshard_after_forward,
+ mp_policy=MixedPrecisionPolicy(param_dtype=torch.bfloat16),
+ )
+ for mlp in model:
+ fully_shard_fn(mlp)
+ fully_shard_fn(model)
+ model.to_empty(device=self.device)
+ for param in model.parameters():
+ nn.init.trunc_normal_(param)
+ optim = torch.optim.Adam(model.parameters(), lr=1e-2, foreach=True)
+
+ # Run a few iterations to check for errors
+ torch.manual_seed(42 + self.rank + 1)
+ inp = torch.randn((2, 8), device="cuda")
+ for _ in range(3):
+ model(inp).sum().backward()
+ optim.step()
+ optim.zero_grad()
+
+ @unittest.skipIf(not TEST_CUDA, "no cuda")
+ def test_all_gather_extensions_monkey_patch(self):
+ # Define a pre/post-all-gather pair that quantizes to bf16 for the
+ # all-gather and de-quantizes back to the parameter dtype
+ def fsdp_pre_all_gather(self) -> Tuple[Tuple[torch.Tensor, ...], Any]:
+ return (self.to(torch.bfloat16),), None
+
+ def fsdp_post_all_gather(
+ self,
+ all_gather_outputs: Tuple[torch.Tensor, ...],
+ metadata: Any,
+ param_dtype: torch.dtype,
+ *,
+ out: Optional[torch.Tensor] = None,
+ ) -> Union[Tuple[torch.Tensor, Tuple[torch.Tensor, ...]], None]:
+ (tensor,) = all_gather_outputs
+ assert metadata is None, f"{metadata}"
+ assert tensor.dtype == torch.bfloat16, f"{tensor.dtype}"
+ if out is not None:
+ out.copy_(tensor)
+ return
+ return tensor.to(param_dtype), (tensor,)
+
+ with torch.device("meta"):
+ model = self._init_two_tensor_mlp()
+ for mlp in model:
+ fully_shard(mlp)
+ fully_shard(model)
+ model.to_empty(device=self.device)
+ for param in model.parameters():
+ nn.init.trunc_normal_(param)
+ # Monkey patch the pre/post-all-gather functions *after* `to_empty()`
+ # since the local tensor objects change from materialization
+ self.assertGreater(sum("weight" in n for n, _ in model.named_parameters()), 0)
+ for param_name, param in model.named_parameters():
+ if "weight" in param_name:
+ local_param = param.to_local()
+ # Monkey patch on the `torch.Tensor` to show that the extension
+ # can work even without a subclass
+ local_param.fsdp_pre_all_gather = fsdp_pre_all_gather
+ local_param.fsdp_post_all_gather = fsdp_post_all_gather
+ optim = torch.optim.Adam(model.parameters(), lr=1e-2, foreach=True)
+
+ # Run a few iterations to check for errors
+ torch.manual_seed(42 + self.rank + 1)
+ inp = torch.randn((2, 8), device="cuda")
+ for _ in range(3):
+ model(inp).sum().backward()
+ optim.step()
+ optim.zero_grad()
+
+
+if __name__ == "__main__":
+ run_tests()
diff --git a/torch/distributed/_composable/fsdp/_fsdp_collectives.py b/torch/distributed/_composable/fsdp/_fsdp_collectives.py
index 7a72ce0a13..c2bc0bd0e7 100644
--- a/torch/distributed/_composable/fsdp/_fsdp_collectives.py
+++ b/torch/distributed/_composable/fsdp/_fsdp_collectives.py
@@ -34,7 +34,6 @@ def foreach_all_gather(
device: torch.device,
) -> Optional[AllGatherResult]:
world_size, rank = group.size(), group.rank()
- # - Copy in
with torch.cuda.stream(all_gather_copy_in_stream):
param_all_gather_inputs: List[List[torch.Tensor]] = [
fsdp_param.all_gather_inputs for fsdp_param in fsdp_params
@@ -42,13 +41,14 @@ def foreach_all_gather(
(
param_all_gather_input_dtypes,
param_all_gather_input_numels,
+ dtype,
) = _get_all_gather_input_metadatas(param_all_gather_inputs)
- dtype = param_all_gather_inputs[0][0].dtype
- if not all(t.dtype == dtype for ts in param_all_gather_inputs for t in ts):
- raise NotImplementedError(
- f"Mixed dtype not supported yet: {param_all_gather_input_dtypes}"
- )
- all_gather_inputs = [t for ts in param_all_gather_inputs for t in ts]
+ if dtype == torch.uint8:
+ all_gather_inputs = [
+ t.view(torch.uint8) for ts in param_all_gather_inputs for t in ts
+ ]
+ else:
+ all_gather_inputs = [t for ts in param_all_gather_inputs for t in ts]
inp_split_sizes = [t.numel() for t in all_gather_inputs]
all_gather_input_numel = sum(inp_split_sizes)
all_gather_output = torch.empty(
@@ -62,7 +62,6 @@ def foreach_all_gather(
del param_all_gather_inputs
all_gather_stream.wait_stream(all_gather_copy_in_stream)
with torch.cuda.stream(all_gather_stream):
- # - All-gather
all_gather_work = dist.all_gather_into_tensor(
output_tensor=all_gather_output,
input_tensor=all_gather_input,
@@ -107,11 +106,11 @@ def foreach_all_gather_copy_out(
) # no-op after 1st call
fsdp_param.alloc_all_gather_outputs()
all_gather_output = all_gather_output.view(world_size, -1)
- out: List[torch.Tensor] = [
- t.view(world_size, -1)
- for fsdp_param in fsdp_params
- for t in fsdp_param.all_gather_outputs
- ]
+ gen = (t for fsdp_param in fsdp_params for t in fsdp_param.all_gather_outputs)
+ if all_gather_output.dtype == torch.uint8:
+ out = [t.view(world_size, -1).view(torch.uint8) for t in gen]
+ else:
+ out = [t.view(world_size, -1) for t in gen]
torch.split_with_sizes_copy(
all_gather_output, all_gather_input_split_sizes, dim=1, out=out
)
@@ -222,13 +221,25 @@ def foreach_reduce_scatter_copy_in(
def _get_all_gather_input_metadatas(
param_all_gather_inputs: List[List[torch.Tensor]],
-) -> Tuple[List[List[torch.dtype]], List[List[int]]]:
+) -> Tuple[List[List[torch.dtype]], List[List[int]], torch.dtype]:
param_all_gather_input_dtypes: List[List[torch.dtype]] = []
param_all_gather_input_numels: List[List[int]] = []
+ all_gather_dtype = param_all_gather_inputs[0][0].dtype
for all_gather_inputs in param_all_gather_inputs:
- param_all_gather_input_dtypes.append([t.dtype for t in all_gather_inputs])
- param_all_gather_input_numels.append([t.numel() for t in all_gather_inputs])
- return param_all_gather_input_dtypes, param_all_gather_input_numels
+ input_dtypes: List[torch.dtype] = []
+ input_numels: List[int] = []
+ for all_gather_input in all_gather_inputs:
+ if all_gather_input.dtype != all_gather_dtype:
+ all_gather_dtype = torch.uint8
+ input_dtypes.append(all_gather_input.dtype)
+ input_numels.append(all_gather_input.numel())
+ param_all_gather_input_dtypes.append(input_dtypes)
+ param_all_gather_input_numels.append(input_numels)
+ return (
+ param_all_gather_input_dtypes,
+ param_all_gather_input_numels,
+ all_gather_dtype,
+ )
def _reduce_scatter(
diff --git a/torch/distributed/_composable/fsdp/_fsdp_init.py b/torch/distributed/_composable/fsdp/_fsdp_init.py
index 39b3d8d09d..07fd45e9e3 100644
--- a/torch/distributed/_composable/fsdp/_fsdp_init.py
+++ b/torch/distributed/_composable/fsdp/_fsdp_init.py
@@ -7,6 +7,7 @@ import torch.nn as nn
from torch.distributed._tensor import DeviceMesh, DTensor, init_device_mesh
from torch.distributed.device_mesh import _get_device_handle
+from torch.utils._python_dispatch import is_traceable_wrapper_subclass
from ._fsdp_common import _is_composable_with_fsdp, FSDPMeshInfo, HSDPMeshInfo
from ._fsdp_state import _get_module_fsdp_state
@@ -125,8 +126,7 @@ def _move_states_to_device(
rather than modules since modules to support ignoring parameters/buffers in
the future.
"""
- # TODO: De-duplicate with `_apply` after `swap_tensors` path lands:
- # https://github.com/pytorch/pytorch/issues/115792
+ # Follow the logic in `nn.Module._apply`
for tensor in itertools.chain(params, buffers):
if tensor.device == device or tensor.device.type == "meta":
# Keep meta-device tensors on meta device for deferred init
@@ -140,4 +140,9 @@ def _move_states_to_device(
raise AssertionError(
f"Expects DTensor to be moved to {dtensor_mesh_type} but got {tensor.device}"
)
- tensor.data = tensor.to(device)
+ if is_traceable_wrapper_subclass(tensor):
+ with torch.no_grad(): # avoid autograd increasing C++ refcount by 1
+ tensor_on_device = nn.Parameter(tensor.to(device))
+ torch.utils.swap_tensors(tensor, tensor_on_device)
+ else:
+ tensor.data = tensor.to(device)
diff --git a/torch/distributed/_composable/fsdp/_fsdp_param.py b/torch/distributed/_composable/fsdp/_fsdp_param.py
index 6541df7ed8..70d0a28ffc 100644
--- a/torch/distributed/_composable/fsdp/_fsdp_param.py
+++ b/torch/distributed/_composable/fsdp/_fsdp_param.py
@@ -1,6 +1,7 @@
+import itertools
from dataclasses import dataclass, field
from enum import auto, Enum
-from typing import cast, List, Optional, Tuple
+from typing import Any, cast, List, Optional, Sequence, Tuple
import torch
import torch.nn as nn
@@ -93,6 +94,18 @@ class ParamModuleInfo:
shared_param_names: List[str] = field(default_factory=list)
+@dataclass
+class ExtensionsData:
+ # User-defined metadata passed from pre to post-all-gather
+ all_gather_metadata: Optional[Any] = None
+ # Save the all-gather input sizes to unflatten the all-gather outputs to ND
+ all_gather_input_sizes: Sequence[torch.Size] = () # ND
+
+ def clear(self):
+ self.all_gather_metadata = None
+ self.all_gather_input_sizes = ()
+
+
class FSDPParam:
"""
This class manages a parameter with FSDP or FSDP variants applied,
@@ -116,8 +129,12 @@ class FSDPParam:
_global_placements: Tuple[Placement, ...]
_global_size: torch.Size
_global_stride: Tuple[int, ...]
+ all_gather_outputs: List[torch.Tensor] # 1D
# DTensor attributes (only defined for DTensor `param`):
_tp_spec: DTensorSpec
+ # All-gather extension attributes
+ _extensions_data: ExtensionsData
+ _unsharded_inner_tensors: List[torch.Tensor]
def __init__(
self,
@@ -135,6 +152,7 @@ class FSDPParam:
self._init_sharded_param(param, device)
if self.post_forward_mesh_info:
self._init_sharded_post_forward_param_metadata(param)
+ self._init_extensions()
self.all_gather_outputs: List[torch.Tensor] = []
self._param_fqn: Optional[str] = None # prefixed from root module
@@ -236,6 +254,24 @@ class FSDPParam:
self.reduce_dtype = reduce_dtype
# None indicates that the mixed precision is not enabled
+ def _init_extensions(self) -> None:
+ inner_tensor = self._sharded_local_tensor
+ has_fsdp_pre_all_gather = hasattr(inner_tensor, "fsdp_pre_all_gather")
+ has_fsdp_post_all_gather = hasattr(inner_tensor, "fsdp_post_all_gather")
+ if has_fsdp_pre_all_gather != has_fsdp_post_all_gather:
+ raise AssertionError(
+ "Both fsdp_pre_all_gather and fsdp_post_all_gather should be defined "
+ f"if using all-gather extensions: {inner_tensor}"
+ )
+ if has_fsdp_pre_all_gather:
+ if self.padded_sharded_param_size != self._sharded_local_tensor.size():
+ raise NotImplementedError(
+ "FSDP all-gather extensions require even sharding on dim-0.\n"
+ f"{self._orig_size} is not divisible by FSDP world size {self.mesh_info.mesh.size()}."
+ )
+ self._extensions_data = ExtensionsData()
+ self._unsharded_inner_tensors: List[torch.Tensor] = []
+
def init_all_gather_outputs(
self,
all_gather_input_numels: List[int],
@@ -251,13 +287,40 @@ class FSDPParam:
]
def init_unsharded_param(self):
- if hasattr(self, "_unsharded_param"):
- return # already initialized
- # For the default path (no post-all-gather), the all-gather output
- # gives the unsharded parameter data directly
- assert len(self.all_gather_outputs) == 1
+ if hasattr(self, "_unsharded_param"): # after the 1st all-gather
+ inner_tensor = self._sharded_local_tensor
+ if not hasattr(inner_tensor, "fsdp_post_all_gather"):
+ return # already initialized
+ for tensor in self._unsharded_inner_tensors:
+ alloc_storage(tensor)
+ all_gather_outputs = self._unflatten_all_gather_outputs()
+ inner_tensor.fsdp_post_all_gather(
+ all_gather_outputs,
+ self._extensions_data.all_gather_metadata,
+ self.param_dtype or self.orig_dtype,
+ out=self._unsharded_param,
+ )
+ self._extensions_data.clear()
+ return
+ inner_tensor = self._sharded_local_tensor
+ if hasattr(inner_tensor, "fsdp_post_all_gather"):
+ all_gather_outputs = self._unflatten_all_gather_outputs()
+ (
+ unsharded_tensor,
+ self._unsharded_inner_tensors,
+ ) = inner_tensor.fsdp_post_all_gather(
+ all_gather_outputs,
+ self._extensions_data.all_gather_metadata,
+ self.param_dtype or self.orig_dtype,
+ )
+ self._extensions_data.clear()
+ else:
+ # For the default path (no post-all-gather), the all-gather output
+ # gives the unsharded parameter data directly
+ assert len(self.all_gather_outputs) == 1, f"{len(self.all_gather_outputs)}"
+ unsharded_tensor = self.all_gather_outputs[0]
unsharded_param = torch.as_strided(
- self.all_gather_outputs[0],
+ unsharded_tensor,
self._orig_size,
make_contiguous_strides_for(self._orig_size),
storage_offset=0,
@@ -273,9 +336,17 @@ class FSDPParam:
self._unsharded_param = nn.Parameter(unsharded_param)
self._unsharded_param.requires_grad_(self.sharded_param.requires_grad)
+ def _unflatten_all_gather_outputs(self) -> Tuple[torch.Tensor, ...]:
+ return tuple(
+ t.view(-1, *s[1:])
+ for t, s in zip(
+ self.all_gather_outputs, self._extensions_data.all_gather_input_sizes
+ )
+ )
+
def to_sharded(self) -> None:
self._setattr_on_modules(self.sharded_param)
- self.free_all_gather_outputs()
+ self.free_unsharded_param()
self.sharded_state = ShardedState.SHARDED
def to_sharded_post_forward(self) -> None:
@@ -309,7 +380,7 @@ class FSDPParam:
self.to_sharded_post_forward_dtensor(sharded_post_forward_tensor)
)
self._setattr_on_modules(self._sharded_post_forward_param)
- self.free_all_gather_outputs()
+ self.free_unsharded_param()
self.sharded_state = ShardedState.SHARDED_POST_FORWARD
def to_unsharded(self) -> None:
@@ -368,18 +439,33 @@ class FSDPParam:
)
def alloc_all_gather_outputs(self) -> None:
- unsafe_alloc_storage(self.all_gather_outputs[0])
+ for tensor in self.all_gather_outputs:
+ alloc_storage(tensor)
- def free_all_gather_outputs(self) -> None:
- unsafe_free_storage(self.all_gather_outputs[0])
+ def free_unsharded_param(self) -> None:
+ for tensor in itertools.chain(
+ self.all_gather_outputs, self._unsharded_inner_tensors
+ ):
+ free_storage(tensor)
@property
def all_gather_inputs(self) -> List[torch.Tensor]: # 1D
self._assert_in_states(ShardedState.SHARDED, ShardedState.SHARDED_POST_FORWARD)
if self.sharded_state == ShardedState.SHARDED:
+ if hasattr(self._sharded_local_tensor, "fsdp_pre_all_gather"):
+ (
+ all_gather_inputs,
+ self._extensions_data.all_gather_metadata,
+ ) = self._sharded_local_tensor.fsdp_pre_all_gather()
+ self._extensions_data.all_gather_input_sizes = [
+ t.size() for t in all_gather_inputs
+ ]
+ return [t.view(-1) for t in all_gather_inputs]
sharded_param_data = self._sharded_param_data
return [_to_dtype_if_needed(sharded_param_data, self.param_dtype)]
elif self.sharded_state == ShardedState.SHARDED_POST_FORWARD:
+ if hasattr(self._sharded_local_tensor, "fsdp_pre_all_gather"):
+ raise NotImplementedError()
all_gather_input = _to_dtype_if_needed(
cast(torch.Tensor, self._sharded_post_forward_param_data),
self.param_dtype,
@@ -411,6 +497,10 @@ class FSDPParam:
grad = grad._local_tensor
return grad
+ @property
+ def _sharded_local_tensor(self) -> torch.Tensor:
+ return cast(DTensor, self.sharded_param)._local_tensor
+
def _assert_in_states(self, *states: ShardedState) -> None:
if self.sharded_state not in states:
_raise_assert_with_print(
@@ -418,18 +508,15 @@ class FSDPParam:
)
-# NOTE: Unsafe here refers to not checking whether the storage is already
-# allocated or freed, respectively. We should be safe to use them since we
-# explicitly manage the state transition.
-def unsafe_alloc_storage(tensor: torch.Tensor) -> None:
- # Skip the already-allocated check and assume that `tensor` is the base
- # tensor to save CPU overhead
- tensor.untyped_storage().resize_(tensor.numel() * tensor.itemsize)
+def alloc_storage(tensor: torch.Tensor) -> None:
+ size = tensor.numel() * tensor.itemsize
+ if (storage := tensor.untyped_storage()).size() != size:
+ storage.resize_(size)
-def unsafe_free_storage(tensor: torch.Tensor) -> None:
- # Skip the already-freed check to save CPU overhead
- tensor.untyped_storage().resize_(0)
+def free_storage(tensor: torch.Tensor) -> None:
+ if (storage := tensor.untyped_storage()).size() != 0:
+ storage.resize_(0)
# NOTE: These bypass `nn.Module.__setattr__` checks, which incur non-trivial
diff --git a/torch/distributed/_composable/fsdp/_fsdp_param_group.py b/torch/distributed/_composable/fsdp/_fsdp_param_group.py
index 8b6a844f47..a6eb977e02 100644
--- a/torch/distributed/_composable/fsdp/_fsdp_param_group.py
+++ b/torch/distributed/_composable/fsdp/_fsdp_param_group.py
@@ -244,7 +244,7 @@ class FSDPParamGroup:
self._all_gather_result, self.fsdp_params, self._all_gather_process_group
)
for fsdp_param in self.fsdp_params:
- fsdp_param.init_unsharded_param() # no-op after 1st call
+ fsdp_param.init_unsharded_param()
self._to_unsharded()
all_gather_copy_out_event = torch.cuda.Event()
all_gather_copy_out_event.record()
diff --git a/torch/testing/_internal/common_fsdp.py b/torch/testing/_internal/common_fsdp.py
index 6f0ed8bd78..1f8487290e 100644
--- a/torch/testing/_internal/common_fsdp.py
+++ b/torch/testing/_internal/common_fsdp.py
@@ -829,12 +829,14 @@ class MLP(nn.Module):
self,
dim: int,
device: Optional[torch.device] = None,
+ *,
+ bias: bool = True,
with_buffer: bool = False,
dim_multiplier: int = 4,
):
super().__init__()
- self.in_proj = nn.Linear(dim, dim_multiplier * dim, device=device)
- self.out_proj = nn.Linear(dim_multiplier * dim, dim, device=device)
+ self.in_proj = nn.Linear(dim, dim_multiplier * dim, device=device, bias=bias)
+ self.out_proj = nn.Linear(dim_multiplier * dim, dim, device=device, bias=bias)
if with_buffer:
self.register_buffer("buffer", torch.randn((dim,), device=device))
else:
|
2.41.0
|
95a4d4a4227b9f17b71707945a97451f7fb62a5
|
Mon, 15 Apr 2024 10:31:02 -0700
|
[PATCH 0167/1000] [FSDP2] Added `mesh` arg to `fsdp_pre_all_gather` (#123953)
|
This PR adds a `mesh: DeviceMesh` argument to `fsdp_pre_all_gather()` so that the extension can know over which mesh the all-gather is happening. This can be useful in recovering the post-all-gather tensor size in the `fsdp_post_all_gather()` (e.g. for `NF4Tensor`). Pull Request resolved: https://github.com/pytorch/pytorch/pull/123953 Approved by: https://github.com/Skylion007, https://github.com/wanchaol ghstack dependencies: #119302, #122908
|
diff --git a/test/distributed/_composable/fsdp/test_fully_shard_extensions.py b/test/distributed/_composable/fsdp/test_fully_shard_extensions.py
index 655bff78f0..5ce0f272c3 100644
--- a/test/distributed/_composable/fsdp/test_fully_shard_extensions.py
+++ b/test/distributed/_composable/fsdp/test_fully_shard_extensions.py
@@ -10,8 +10,9 @@ from typing import Any, List, Optional, Tuple, Union
import torch
import torch.distributed as dist
import torch.nn as nn
-
from torch.distributed._composable.fsdp import fully_shard, MixedPrecisionPolicy
+
+from torch.distributed.device_mesh import DeviceMesh
from torch.testing._internal.common_cuda import TEST_CUDA
from torch.testing._internal.common_distributed import skip_if_lt_x_gpu
from torch.testing._internal.common_fsdp import (
@@ -24,7 +25,9 @@ from torch.testing._internal.common_utils import run_tests
from torch.testing._internal.two_tensor import TwoTensor
-def two_tensor_fsdp_pre_all_gather(self) -> Tuple[Tuple[torch.Tensor, ...], Any]:
+def two_tensor_fsdp_pre_all_gather(
+ self, mesh: DeviceMesh
+) -> Tuple[Tuple[torch.Tensor, ...], Any]:
all_gather_inputs = (self.a, self.b)
metadata = None
return all_gather_inputs, metadata
diff --git a/torch/distributed/_composable/fsdp/_fsdp_param.py b/torch/distributed/_composable/fsdp/_fsdp_param.py
index 70d0a28ffc..15046a79c6 100644
--- a/torch/distributed/_composable/fsdp/_fsdp_param.py
+++ b/torch/distributed/_composable/fsdp/_fsdp_param.py
@@ -456,7 +456,7 @@ class FSDPParam:
(
all_gather_inputs,
self._extensions_data.all_gather_metadata,
- ) = self._sharded_local_tensor.fsdp_pre_all_gather()
+ ) = self._sharded_local_tensor.fsdp_pre_all_gather(self.mesh_info.mesh)
self._extensions_data.all_gather_input_sizes = [
t.size() for t in all_gather_inputs
]
|
2.41.0
|
aba918bd83c5524cf19672a7aeec96faa99997a
|
Mon, 15 Apr 2024 21:41:42 +0000
|
[PATCH 0168/1000] Support Accelerator OOM Error (#121200) (#121702)
|
Fixes #121200 This PR introduces AcceleratorOutOfMemoryError for all privateuse1 backend. For python, there is a PyError object which will be set only when privateuse1 is registered. All privateuse1 backend then can use this error for memory errors. Maybe more error types in the future. Pull Request resolved: https://github.com/pytorch/pytorch/pull/121702 Approved by: https://github.com/guangyey, https://github.com/albanD
|
diff --git a/test/test_public_bindings.py b/test/test_public_bindings.py
index 2f9aad141e..65aa339aff 100644
--- a/test/test_public_bindings.py
+++ b/test/test_public_bindings.py
@@ -188,6 +188,7 @@ class TestPublicBindings(TestCase):
"NumberType",
"OperatorInfo",
"OptionalType",
+ "OutOfMemoryError",
"ParameterDict",
"parse_ir",
"parse_schema",
diff --git a/torch/_C/__init__.pyi.in b/torch/_C/__init__.pyi.in
index b39f0f472e..f017c16597 100644
--- a/torch/_C/__init__.pyi.in
+++ b/torch/_C/__init__.pyi.in
@@ -2192,7 +2192,7 @@ def _current_graph_task_id() -> _int: ...
def _current_autograd_node() -> _Node: ...
# Defined in torch/csrc/Exceptions.cpp
-class _OutOfMemoryError(RuntimeError): ...
+class OutOfMemoryError(RuntimeError): ...
class _DistError(RuntimeError): ...
class _DistBackendError(RuntimeError): ...
class _DistStoreError(RuntimeError): ...
diff --git a/torch/csrc/Exceptions.cpp b/torch/csrc/Exceptions.cpp
index 975b2e71e2..a58c62df17 100644
--- a/torch/csrc/Exceptions.cpp
+++ b/torch/csrc/Exceptions.cpp
@@ -60,13 +60,15 @@ could not be completed because the input matrix is singular.",
// NOLINTNEXTLINE(bugprone-assignment-in-if-condition)
ASSERT_TRUE(
THPException_OutOfMemoryError = PyErr_NewExceptionWithDoc(
- "torch.cuda.OutOfMemoryError",
- "Exception raised when CUDA is out of memory",
+ "torch.OutOfMemoryError",
+ "Exception raised when device is out of memory",
PyExc_RuntimeError,
nullptr));
+ PyTypeObject* type = (PyTypeObject*)THPException_OutOfMemoryError;
+ type->tp_name = "torch.OutOfMemoryError";
ASSERT_TRUE(
PyModule_AddObject(
- module, "_OutOfMemoryError", THPException_OutOfMemoryError) == 0);
+ module, "OutOfMemoryError", THPException_OutOfMemoryError) == 0);
// NOLINTNEXTLINE(bugprone-assignment-in-if-condition)
ASSERT_TRUE(
diff --git a/torch/cuda/__init__.py b/torch/cuda/__init__.py
index eb78243748..1344de8b9f 100644
--- a/torch/cuda/__init__.py
+++ b/torch/cuda/__init__.py
@@ -243,7 +243,7 @@ class DeferredCudaCallError(Exception):
pass
-OutOfMemoryError = torch._C._OutOfMemoryError
+OutOfMemoryError = torch._C.OutOfMemoryError
def init():
|
2.41.0
|
d222473fc69db8a1258973f94534a181c8d8681
|
Mon, 15 Apr 2024 21:44:26 +0000
|
[PATCH 0169/1000] [EZ][BE] Fix unknown pragma warning (#124086)
|
By using `C10_DIAGNOSTIC_` macros instead of `#pragma clang diagnostic` that puts appropriate compiler supported pragmas. Fixes following warning during the bazel build ``` INFO: From Compiling aten/src/ATen/native/TensorFactories.cpp: aten/src/ATen/native/TensorFactories.cpp:372: warning: ignoring #pragma clang diagnostic [-Wunknown-pragmas] 372 | #pragma clang diagnostic push | aten/src/ATen/native/TensorFactories.cpp:373: warning: ignoring #pragma clang diagnostic [-Wunknown-pragmas] 373 | #pragma clang diagnostic ignored "-Wmissing-prototypes" | aten/src/ATen/native/TensorFactories.cpp:375: warning: ignoring #pragma clang diagnostic [-Wunknown-pragmas] 375 | #pragma clang diagnostic pop | ``` Pull Request resolved: https://github.com/pytorch/pytorch/pull/124086 Approved by: https://github.com/kit1980, https://github.com/seemethere, https://github.com/Skylion007
|
diff --git a/aten/src/ATen/native/TensorFactories.cpp b/aten/src/ATen/native/TensorFactories.cpp
index 2416d015ee..c8fddc3756 100644
--- a/aten/src/ATen/native/TensorFactories.cpp
+++ b/aten/src/ATen/native/TensorFactories.cpp
@@ -369,10 +369,9 @@ Tensor& empty_out(IntArrayRef size,
// Some scalar types in CAST_OP have no declarations, they may be unused in Pytorch.
// But we keep them and ignore the warning here until verified in the future.
-#pragma clang diagnostic push
-#pragma clang diagnostic ignored "-Wmissing-prototypes"
+C10_DIAGNOSTIC_PUSH_AND_IGNORED_IF_DEFINED("-Wmissing-prototypes")
AT_FORALL_SCALAR_TYPES_AND3(Bool, Half, BFloat16, DEFINE_CAST_OP)
-#pragma clang diagnostic pop
+C10_DIAGNOSTIC_POP()
#undef DEFINE_CAST_OP
|
2.41.0
|
0ad64e8a644d478228aa740e03f65d6153c4074
|
Mon, 15 Apr 2024 22:31:12 +0000
|
[PATCH 0171/1000] update docs for separate context and forward functions (#121955)
|
Pull Request resolved: https://github.com/pytorch/pytorch/pull/121955 Approved by: https://github.com/soulitzer
|
diff --git a/torch/autograd/function.py b/torch/autograd/function.py
index a15ef9e88d..3ff96953b2 100644
--- a/torch/autograd/function.py
+++ b/torch/autograd/function.py
@@ -32,8 +32,8 @@ class FunctionCtx:
def save_for_backward(self, *tensors: torch.Tensor):
r"""Save given tensors for a future call to :func:`~Function.backward`.
- ``save_for_backward`` should be called at most once, only from inside the
- :func:`forward` method, and only with tensors.
+ ``save_for_backward`` should be called at most once, in either the
+ :func:`setup_context` or :func:`forward` methods, and only with tensors.
All tensors intended to be used in the backward pass should be saved
with ``save_for_backward`` (as opposed to directly on ``ctx``) to prevent
@@ -91,8 +91,9 @@ class FunctionCtx:
def save_for_forward(self, *tensors: torch.Tensor):
r"""Save given tensors for a future call to :func:`~Function.jvp`.
- ``save_for_forward`` should be only called once, from inside the :func:`forward`
- method, and only be called with tensors.
+ ``save_for_forward`` should be called at most once, in either the
+ :func:`setup_context` or :func:`forward` methods, and all arguments
+ should be tensors.
In :func:`jvp`, saved objects can be accessed through the :attr:`saved_tensors`
attribute.
@@ -144,8 +145,8 @@ class FunctionCtx:
def mark_dirty(self, *args: torch.Tensor):
r"""Mark given tensors as modified in an in-place operation.
- **This should be called at most once, only from inside the**
- :func:`forward` **method, and all arguments should be inputs.**
+ This should be called at most once, in either the :func:`setup_context`
+ or :func:`forward` methods, and all arguments should be inputs.
Every tensor that's been modified in-place in a call to :func:`forward`
should be given to this function, to ensure correctness of our checks.
@@ -188,8 +189,8 @@ class FunctionCtx:
def mark_non_differentiable(self, *args: torch.Tensor):
r"""Mark outputs as non-differentiable.
- **This should be called at most once, only from inside the**
- :func:`forward` **method, and all arguments should be tensor outputs.**
+ This should be called at most once, in either the :func:`setup_context`
+ or :func:`forward` methods, and all arguments should be tensor outputs.
This will mark outputs as not requiring gradients, increasing the
efficiency of backward computation. You still need to accept a gradient
@@ -220,7 +221,8 @@ class FunctionCtx:
def set_materialize_grads(self, value: bool):
r"""Set whether to materialize grad tensors. Default is ``True``.
- **This should be called only from inside the** :func:`forward` **method**
+ This should be called only from either the :func:`setup_context` or
+ :func:`forward` methods.
If ``True``, undefined grad tensors will be expanded to tensors full of zeros
prior to calling the :func:`backward` and :func:`jvp` methods.
|
2.41.0
|
c25b18d76feacc913851e513ce75956350bd5e8
|
Mon, 15 Apr 2024 09:08:11 -0700
|
[PATCH 0174/1000] Excise old custom ops prototype from custom_op_db (#124062)
|
Test Plan: - tests Pull Request resolved: https://github.com/pytorch/pytorch/pull/124062 Approved by: https://github.com/albanD ghstack dependencies: #123615
|
diff --git a/torch/testing/_internal/custom_op_db.py b/torch/testing/_internal/custom_op_db.py
index f78777e5c5..7567d59269 100644
--- a/torch/testing/_internal/custom_op_db.py
+++ b/torch/testing/_internal/custom_op_db.py
@@ -16,7 +16,6 @@ from torch.testing._internal.autograd_function_db import (
from torch import Tensor
from torch.types import Number
from typing import * # noqa: F403
-import torch._custom_ops as custom_ops
# Note: [custom op db]
#
@@ -152,25 +151,21 @@ def sample_inputs_numpy_nonzero(opinfo, device, dtype, requires_grad, **kwargs):
yield SampleInput(result, args=())
-@custom_ops.custom_op('_torch_testing::numpy_view_copy')
+@torch.library.custom_op("_torch_testing::numpy_view_copy", mutates_args=())
def numpy_view_copy(x: Tensor, shape: Sequence[int]) -> Tensor:
- raise NotImplementedError()
-
-@custom_ops.impl('_torch_testing::numpy_view_copy')
-def numpy_view_copy_impl(x, shape) -> Tensor:
return torch.tensor(np.copy(to_numpy(x).reshape(shape)), device=x.device)
-@custom_ops.impl_abstract('_torch_testing::numpy_view_copy')
-def numpy_view_copy_abstract(x, shape) -> Tensor:
+@numpy_view_copy.register_fake
+def _(x, shape) -> Tensor:
return x.clone().view(shape).clone()
-@custom_ops.impl_save_for_backward('_torch_testing::numpy_view_copy')
-def numpy_view_copy_save_for_backward(inputs, output) -> Tensor:
- return inputs.x.shape
+def numpy_view_copy_setup_context(ctx, inputs, output) -> None:
+ ctx.x_shape = inputs[0].shape
+
+def numpy_view_copy_backward(ctx, grad_out):
+ return torch.ops._torch_testing.numpy_view_copy(grad_out, ctx.x_shape), None
-@custom_ops.impl_backward('_torch_testing::numpy_view_copy')
-def numpy_view_copy_backward(ctx, x_shape, grad_out) -> Dict[str, Tensor]:
- return {'x': torch.ops._torch_testing.numpy_view_copy(grad_out, x_shape)}
+numpy_view_copy.register_autograd(numpy_view_copy_setup_context, numpy_view_copy_backward)
def sample_inputs_numpy_view_copy(opinfo, device, dtype, requires_grad, **kwargs):
make_arg = functools.partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
@@ -259,12 +254,8 @@ def numpy_split_copy_with_int_backward(ctx, grad_out, _):
numpy_split_copy_with_int.register_autograd(numpy_split_copy_with_int_setup_context, numpy_split_copy_with_int_backward)
-@custom_ops.custom_op('_torch_testing::numpy_nms')
+@torch.library.custom_op("_torch_testing::numpy_nms", mutates_args=())
def numpy_nms(boxes: Tensor, scores: Tensor, iou_threshold: Number) -> Tensor:
- raise NotImplementedError()
-
-@custom_ops.impl('_torch_testing::numpy_nms')
-def numpy_nms_impl(boxes, scores, iou_threshold):
# Adapted from Ross Girshick's fast-rcnn implementation at
# https://github.com/rbgirshick/fast-rcnn/blob/master/lib/utils/nms.py
assert boxes.device == scores.device
@@ -307,8 +298,8 @@ def numpy_nms_impl(boxes, scores, iou_threshold):
assert result.size(0) >= 2
return result
-@custom_ops.impl_abstract('_torch_testing::numpy_nms')
-def numpy_nms_abstract(boxes, scores, iou_threshold):
+@numpy_nms.register_fake
+def _(boxes, scores, iou_threshold):
assert boxes.device == scores.device
N = boxes.shape[0]
assert boxes.shape == (N, 4)
|
2.41.0
|
079c766892e873b5691185b3b76569bad030090
|
Tue, 16 Apr 2024 00:24:32 +0000
|
[PATCH 0177/1000] Fix Asynchronous PyTorch Profiler Trace (#124080)
|
Summary: With the merge of D55925068, we have introduced an overflow issue when recording a trace using dyno gputrace. This is because it is possible for TorchOPs to be enumerated but not have an end time since they were running as the recording ended. By default these events have an end time set to INT_MIN. When finding the duration() for such events using end-start, we get an overflow resulting in a very long duration. This was avoided before because we were dividing the INT_MIN by 1000 because we were trying to convert uS to nS. This change introduces a patch for TorchOps and a future PR will be added to create a more universal guard in kineto. Test Plan: Trace recorded using resnet test. Trace: https://www.internalfb.com/intern/perfdoctor/trace_view?filepath=tree/traces/dynocli/0/1713199267/localhost/libkineto_activities_2247224.json.gz&bucket=gpu_traces Differential Revision: D56144914 Pull Request resolved: https://github.com/pytorch/pytorch/pull/124080 Approved by: https://github.com/aaronenyeshi
|
diff --git a/torch/csrc/profiler/collection.cpp b/torch/csrc/profiler/collection.cpp
index 104657ec19..d64df0bd04 100644
--- a/torch/csrc/profiler/collection.cpp
+++ b/torch/csrc/profiler/collection.cpp
@@ -813,13 +813,18 @@ void passEventsToKineto(
// Generate Kineto events for each event recorded by the PyTorch profiler.
for (const auto i : c10::irange(results.size())) {
const auto& e = results[i];
+ // (TODO): This is a temporary fix for async traces to make sure that we do
+ // not use int64 MIN as end time in Kineto. If we use that value, the
+ // duration will overflow and become a very large positive number. For a
+ // long term solution, add guards in kineto for each activity type
+ int64_t act_end_time = std::max(e->endTimeNS(), e->start_time_ns_);
auto* activity = cpu_trace.addCPUActivity(
e->name(),
e->kinetoType(),
e->kineto_info_,
e->correlationID(),
e->start_time_ns_,
- e->endTimeNS());
+ act_end_time);
TORCH_INTERNAL_ASSERT(activity || !kKinetoAvailable);
if (activity) {
|
2.41.0
|
2596fd3e0371c3568315bcabf9664157e06c457
|
Tue, 16 Apr 2024 00:42:18 +0000
|
[PATCH 0178/1000] [Distributed] [4/N] Fix clang-tidy warnings in torch/csrc/distributed/c10d (#124032)
|
This PR continues to fix some clang-tidy warnings in distributed/c10d code, following https://github.com/pytorch/pytorch/pull/123312. Pull Request resolved: https://github.com/pytorch/pytorch/pull/124032 Approved by: https://github.com/Skylion007
|
diff --git a/torch/csrc/distributed/c10d/FileStore.cpp b/torch/csrc/distributed/c10d/FileStore.cpp
index 98b90297a6..df74adeb6d 100644
--- a/torch/csrc/distributed/c10d/FileStore.cpp
+++ b/torch/csrc/distributed/c10d/FileStore.cpp
@@ -90,6 +90,7 @@ class Lock {
flock(operation);
}
+ // NOLINTNEXTLINE(bugprone-exception-escape)
~Lock() {
unlock();
}
@@ -290,6 +291,7 @@ FileStore::FileStore(std::string path, int numWorkers)
addHelper(refCountKey_, 1);
}
+// NOLINTNEXTLINE(bugprone-exception-escape)
FileStore::~FileStore() {
// If the file does not exist - exit.
// This can happen when FileStore is invoked from python language which has
diff --git a/torch/csrc/distributed/c10d/Functional.cpp b/torch/csrc/distributed/c10d/Functional.cpp
index d3c4a9fe1d..63d30d51d6 100644
--- a/torch/csrc/distributed/c10d/Functional.cpp
+++ b/torch/csrc/distributed/c10d/Functional.cpp
@@ -105,7 +105,9 @@ c10d::ReduceOp to_reduce_op(const std::string& reduce_op) {
at::Tensor& all_reduce_(
at::Tensor& input,
+ // NOLINTNEXTLINE(performance-unnecessary-value-param)
std::string reduce_op,
+ // NOLINTNEXTLINE(performance-unnecessary-value-param)
std::string group_name) {
c10d::AllreduceOptions opts;
opts.reduceOp = to_reduce_op(reduce_op);
@@ -122,12 +124,14 @@ at::Tensor all_reduce(
std::string reduce_op,
std::string group_name) {
auto output = input.clone(at::MemoryFormat::Contiguous);
- return all_reduce_(output, reduce_op, group_name);
+ return all_reduce_(output, std::move(reduce_op), std::move(group_name));
}
std::vector<at::Tensor> all_reduce_coalesced_(
std::vector<at::Tensor> inputs,
+ // NOLINTNEXTLINE(performance-unnecessary-value-param)
std::string reduce_op,
+ // NOLINTNEXTLINE(performance-unnecessary-value-param)
std::string group_name) {
c10d::AllreduceCoalescedOptions opts;
opts.reduceOp = to_reduce_op(reduce_op);
@@ -141,6 +145,7 @@ std::vector<at::Tensor> all_reduce_coalesced_(
}
std::vector<at::Tensor> all_reduce_coalesced(
+ // NOLINTNEXTLINE(performance-unnecessary-value-param)
std::vector<at::Tensor> inputs,
std::string reduce_op,
std::string group_name) {
@@ -149,7 +154,8 @@ std::vector<at::Tensor> all_reduce_coalesced(
for (const auto& tensor : inputs) {
outputs.push_back(tensor.clone(at::MemoryFormat::Contiguous));
}
- return all_reduce_coalesced_(outputs, reduce_op, group_name);
+ return all_reduce_coalesced_(
+ outputs, std::move(reduce_op), std::move(group_name));
}
at::Tensor allocate_all_gather_output(
@@ -165,6 +171,7 @@ at::Tensor allocate_all_gather_output(
std::vector<at::Tensor> all_gather_into_tensor_coalesced(
std::vector<at::Tensor> inputs,
int64_t group_size,
+ // NOLINTNEXTLINE(performance-unnecessary-value-param)
std::string group_name) {
std::vector<at::Tensor> outputs;
outputs.reserve(inputs.size());
@@ -173,8 +180,7 @@ std::vector<at::Tensor> all_gather_into_tensor_coalesced(
}
auto group = c10d::resolve_process_group(group_name);
- auto work = group->allgather_into_tensor_coalesced(
- outputs, const_cast<std::vector<at::Tensor>&>(inputs));
+ auto work = group->allgather_into_tensor_coalesced(outputs, inputs);
for (const auto& tensor : outputs) {
c10d::RankLocal<WorkRegistry>::get().register_work(tensor, work);
}
@@ -186,7 +192,8 @@ at::Tensor all_gather_into_tensor(
int64_t group_size,
std::string group_name) {
std::vector<at::Tensor> inputs{input};
- return all_gather_into_tensor_coalesced(inputs, group_size, group_name)[0];
+ return all_gather_into_tensor_coalesced(
+ inputs, group_size, std::move(group_name))[0];
}
at::Tensor allocate_reduce_scatter_output(
@@ -206,8 +213,10 @@ at::Tensor allocate_reduce_scatter_output(
std::vector<at::Tensor> reduce_scatter_tensor_coalesced(
std::vector<at::Tensor> inputs,
+ // NOLINTNEXTLINE(performance-unnecessary-value-param)
std::string reduce_op,
int64_t group_size,
+ // NOLINTNEXTLINE(performance-unnecessary-value-param)
std::string group_name) {
c10d::ReduceScatterOptions opts;
opts.reduceOp = to_reduce_op(reduce_op);
@@ -218,8 +227,7 @@ std::vector<at::Tensor> reduce_scatter_tensor_coalesced(
}
auto group = c10d::resolve_process_group(group_name);
- auto work = group->reduce_scatter_tensor_coalesced(
- outputs, const_cast<std::vector<at::Tensor>&>(inputs), opts);
+ auto work = group->reduce_scatter_tensor_coalesced(outputs, inputs, opts);
for (const auto& tensor : outputs) {
c10d::RankLocal<WorkRegistry>::get().register_work(tensor, work);
}
@@ -233,13 +241,14 @@ at::Tensor reduce_scatter_tensor(
std::string group_name) {
std::vector<at::Tensor> inputs{input};
return reduce_scatter_tensor_coalesced(
- inputs, reduce_op, group_size, group_name)[0];
+ inputs, std::move(reduce_op), group_size, std::move(group_name))[0];
}
at::Tensor all_to_all_single(
const at::Tensor& input,
std::vector<int64_t> output_split_sizes,
std::vector<int64_t> input_split_sizes,
+ // NOLINTNEXTLINE(performance-unnecessary-value-param)
std::string group_name) {
std::vector<int64_t> output_sizes = input.sizes().vec();
output_sizes[0] = std::accumulate(
@@ -249,6 +258,7 @@ at::Tensor all_to_all_single(
auto group = c10d::resolve_process_group(group_name);
auto work = group->alltoall_base(
output,
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-const-cast)
const_cast<at::Tensor&>(input),
output_split_sizes,
input_split_sizes);
@@ -256,6 +266,7 @@ at::Tensor all_to_all_single(
return output;
}
+// NOLINTNEXTLINE(performance-unnecessary-value-param)
at::Tensor& broadcast_(at::Tensor& input, int64_t src, std::string group_name) {
c10d::BroadcastOptions opts;
opts.rootRank = src;
@@ -272,7 +283,7 @@ at::Tensor broadcast(
int64_t src,
std::string group_name) {
auto output = input.clone(at::MemoryFormat::Contiguous);
- return broadcast_(output, src, group_name);
+ return broadcast_(output, src, std::move(group_name));
}
at::Tensor wait_tensor(const at::Tensor& tensor) {
@@ -371,8 +382,11 @@ class AllToAllSingle : public torch::autograd::Function<AllToAllSingle> {
static torch::autograd::Variable forward(
torch::autograd::AutogradContext* ctx,
const at::Tensor& input,
+ // NOLINTNEXTLINE(performance-unnecessary-value-param)
std::vector<int64_t> output_split_sizes,
+ // NOLINTNEXTLINE(performance-unnecessary-value-param)
std::vector<int64_t> input_split_sizes,
+ // NOLINTNEXTLINE(performance-unnecessary-value-param)
std::string group_name) {
// swap sizes for backwards pass
ctx->saved_data["output_split_sizes"] = input_split_sizes;
@@ -416,9 +430,9 @@ class AllToAllSingle : public torch::autograd::Function<AllToAllSingle> {
at::Tensor all_to_all_single_autograd(
const at::Tensor& input,
- std::vector<int64_t> output_split_sizes,
- std::vector<int64_t> input_split_sizes,
- std::string group_name) {
+ const std::vector<int64_t>& output_split_sizes,
+ const std::vector<int64_t>& input_split_sizes,
+ const std::string& group_name) {
return AllToAllSingle::apply(
input, output_split_sizes, input_split_sizes, group_name)[0];
}
diff --git a/torch/csrc/distributed/c10d/GroupRegistry.cpp b/torch/csrc/distributed/c10d/GroupRegistry.cpp
index 3b8f004ad8..b13b4fa07c 100644
--- a/torch/csrc/distributed/c10d/GroupRegistry.cpp
+++ b/torch/csrc/distributed/c10d/GroupRegistry.cpp
@@ -13,7 +13,7 @@ class GroupRegistry {
const std::string& group_name,
c10::intrusive_ptr<c10d::ProcessGroup> group) {
std::unique_lock write_lock(lock_);
- auto [_, inserted] = registry_.emplace(group_name, group);
+ auto [_, inserted] = registry_.try_emplace(group_name, std::move(group));
TORCH_CHECK(
inserted,
"A process group is already registered under the name",
@@ -72,9 +72,10 @@ void register_process_group(
const std::string& group_name,
c10::intrusive_ptr<c10d::ProcessGroup> group) {
if (thread_isolation_mode) {
- RankLocal<::GroupRegistry>::get().register_group(group_name, group);
+ RankLocal<::GroupRegistry>::get().register_group(
+ group_name, std::move(group));
} else {
- process_registry.register_group(group_name, group);
+ process_registry.register_group(group_name, std::move(group));
}
}
diff --git a/torch/csrc/distributed/c10d/ParamCommsUtils.hpp b/torch/csrc/distributed/c10d/ParamCommsUtils.hpp
index c40aa1898f..61e3405acd 100644
--- a/torch/csrc/distributed/c10d/ParamCommsUtils.hpp
+++ b/torch/csrc/distributed/c10d/ParamCommsUtils.hpp
@@ -89,8 +89,8 @@ class TORCH_API ParamCommsDebugInfo : public c10::DebugInfoBase {
at::ScalarType dType_ = at::kByte;
std::vector<int64_t> inputSplitSizes_;
std::vector<int64_t> outputSplitSizes_;
- int globalRankStart_;
- int globalRankStride_;
+ int globalRankStart_{};
+ int globalRankStride_{};
std::vector<int64_t> groupRanks_{};
};
diff --git a/torch/csrc/distributed/c10d/ProcessGroup.hpp b/torch/csrc/distributed/c10d/ProcessGroup.hpp
index b968219e5a..f8dff7ec12 100644
--- a/torch/csrc/distributed/c10d/ProcessGroup.hpp
+++ b/torch/csrc/distributed/c10d/ProcessGroup.hpp
@@ -674,7 +674,7 @@ class TORCH_API ProcessGroup : public torch::CustomClassHolder {
std::vector<c10::Device> devices;
devices.reserve(deviceTypes_.size());
for (auto& dt : deviceTypes_) {
- devices.push_back(c10::Device(dt));
+ devices.emplace_back(dt);
}
return devices;
}
diff --git a/torch/csrc/distributed/c10d/ProcessGroupGloo.hpp b/torch/csrc/distributed/c10d/ProcessGroupGloo.hpp
index 7dfa6d6121..b0620f9667 100644
--- a/torch/csrc/distributed/c10d/ProcessGroupGloo.hpp
+++ b/torch/csrc/distributed/c10d/ProcessGroupGloo.hpp
@@ -147,7 +147,7 @@ class TORCH_API ProcessGroupGloo : public Backend {
const std::vector<std::string>& keys) override {
std::vector<std::vector<char>> res;
for (auto& value : store_->multiGet(keys)) {
- res.emplace_back(std::vector<char>(value.begin(), value.end()));
+ res.emplace_back(value.begin(), value.end());
}
return res;
}
@@ -156,8 +156,9 @@ class TORCH_API ProcessGroupGloo : public Backend {
const std::vector<std::string>& keys,
const std::vector<std::vector<char>>& values) override {
std::vector<std::vector<uint8_t>> u_values;
+ u_values.reserve(values.size());
for (auto& value : values) {
- u_values.emplace_back(std::vector<uint8_t>(value.begin(), value.end()));
+ u_values.emplace_back(value.begin(), value.end());
}
store_->multiSet(keys, u_values);
}
diff --git a/torch/csrc/distributed/c10d/UnixSockUtils.hpp b/torch/csrc/distributed/c10d/UnixSockUtils.hpp
index ffce091b6c..531f8459aa 100644
--- a/torch/csrc/distributed/c10d/UnixSockUtils.hpp
+++ b/torch/csrc/distributed/c10d/UnixSockUtils.hpp
@@ -2,8 +2,7 @@
#include <torch/csrc/distributed/c10d/Utils.hpp>
-namespace c10d {
-namespace tcputil {
+namespace c10d::tcputil {
#define CONNECT_SOCKET_OFFSET 2
@@ -23,5 +22,4 @@ inline struct ::pollfd getPollfd(int socket, short events) {
return res;
}
-} // namespace tcputil
-} // namespace c10d
+} // namespace c10d::tcputil
diff --git a/torch/csrc/distributed/c10d/WinSockUtils.hpp b/torch/csrc/distributed/c10d/WinSockUtils.hpp
index 9b2b1aa245..1a2c749129 100644
--- a/torch/csrc/distributed/c10d/WinSockUtils.hpp
+++ b/torch/csrc/distributed/c10d/WinSockUtils.hpp
@@ -2,8 +2,7 @@
#include <torch/csrc/distributed/c10d/Utils.hpp>
-namespace c10d {
-namespace tcputil {
+namespace c10d::tcputil {
#define CONNECT_SOCKET_OFFSET 1
@@ -23,5 +22,4 @@ inline struct ::pollfd getPollfd(int socket, short events) {
return res;
}
-} // namespace tcputil
-} // namespace c10d
+} // namespace c10d::tcputil
diff --git a/torch/csrc/distributed/c10d/init.cpp b/torch/csrc/distributed/c10d/init.cpp
index e0d8b96b5d..7cbd898499 100644
--- a/torch/csrc/distributed/c10d/init.cpp
+++ b/torch/csrc/distributed/c10d/init.cpp
@@ -85,7 +85,7 @@ static bool registered = registerGilChecker();
// TODO: move this somewhere more generally useful
template <typename T>
class IntrusivePtrNoGilDestructor {
- c10::intrusive_ptr<T> impl_;
+ c10::intrusive_ptr<T> impl_{};
public:
IntrusivePtrNoGilDestructor() = default;
@@ -132,9 +132,7 @@ class IntrusivePtrNoGilDestructor {
PYBIND11_DECLARE_HOLDER_TYPE(T, IntrusivePtrNoGilDestructor<T>, true);
-namespace torch {
-namespace distributed {
-namespace c10d {
+namespace torch::distributed::c10d {
namespace {
@@ -259,7 +257,7 @@ class PythonStore : public ::c10d::Store {
py::bytes(reinterpret_cast<const char*>(value.data()), value.size()));
}
- virtual std::vector<std::vector<uint8_t>> multiGet(
+ std::vector<std::vector<uint8_t>> multiGet(
const std::vector<std::string>& keys) override {
pybind11::gil_scoped_acquire gil;
pybind11::function fn = pybind11::get_overload(
@@ -270,15 +268,16 @@ class PythonStore : public ::c10d::Store {
std::vector<std::string> py_list =
pybind11::cast<std::vector<std::string>>(fn(keys));
std::vector<std::vector<uint8_t>> res;
+ res.reserve(py_list.size());
for (auto& str : py_list) {
- res.emplace_back(std::vector<uint8_t>(str.begin(), str.end()));
+ res.emplace_back(str.begin(), str.end());
}
return res;
}
- virtual void multiSet(
+ void multiSet(
const std::vector<std::string>& keys,
const std::vector<std::vector<uint8_t>>& values) override {
pybind11::gil_scoped_acquire gil;
@@ -289,9 +288,10 @@ class PythonStore : public ::c10d::Store {
}
std::vector<py::bytes> bytes;
+ bytes.reserve(values.size());
for (auto& value : values) {
bytes.emplace_back(
- py::bytes(reinterpret_cast<const char*>(value.data()), value.size()));
+ reinterpret_cast<const char*>(value.data()), value.size());
}
fn(keys, bytes);
@@ -350,6 +350,7 @@ static PyObject* reduceopmeta___instancecheck__(
}
Py_RETURN_FALSE;
}
+// NOLINTNEXTLINE(*c-arrays)
static PyMethodDef reduceopmeta_methods[] = {
{"__instancecheck__",
(PyCFunction)reduceopmeta___instancecheck__,
@@ -360,6 +361,7 @@ PyTypeObject* GetReduceOpMetaclass() {
static auto* metaclass = [] {
PyTypeObject* base_metaclass =
pybind11::detail::get_internals().default_metaclass;
+ // NOLINTNEXTLINE(*c-arrays)
PyType_Slot slots[] = {
{Py_tp_base, base_metaclass},
{Py_tp_methods, reduceopmeta_methods},
@@ -367,6 +369,7 @@ PyTypeObject* GetReduceOpMetaclass() {
};
PyType_Spec spec = {};
spec.name = "torch._C._distributed_c10d._ReduceOpMeta";
+ // NOLINTNEXTLINE(*-narrowing-conversions)
spec.basicsize = base_metaclass->tp_basicsize;
spec.flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE;
spec.slots = slots;
@@ -533,7 +536,7 @@ An enum-like class for built-in communication hooks: ``ALLREDUCE`` and ``FP16_CO
for (const auto& fut : futs) {
futures.push_back(fut->fut);
}
- reducer.install_futures(std::move(futures));
+ reducer.install_futures(futures);
},
py::call_guard<py::gil_scoped_release>())
.def(
@@ -616,7 +619,7 @@ An enum-like class for built-in communication hooks: ``ALLREDUCE`` and ``FP16_CO
.def(
"set_logger",
[](::c10d::Reducer& reducer,
- const std::shared_ptr<::c10d::Logger> logger) {
+ const std::shared_ptr<::c10d::Logger>& logger) {
std::weak_ptr<::c10d::Logger> logger_weakref = logger;
reducer.set_logger(logger_weakref);
})
@@ -636,7 +639,7 @@ An enum-like class for built-in communication hooks: ``ALLREDUCE`` and ``FP16_CO
"_update_process_group",
[](::c10d::Reducer& reducer,
c10::intrusive_ptr<::c10d::ProcessGroup> new_process_group) {
- return reducer.update_process_group(new_process_group);
+ return reducer.update_process_group(std::move(new_process_group));
},
py::call_guard<py::gil_scoped_release>());
@@ -764,6 +767,7 @@ This class does not support ``__members__`` property.)");
// With the above custom `__eq__`'s, I have to manually support the
// other types.
"__eq__",
+ // NOLINTNEXTLINE(performance-unnecessary-value-param)
[](const ::c10d::ReduceOp& self, py::object) { return false; })
.def(
"__hash__",
@@ -794,7 +798,7 @@ This class does not support ``__members__`` property.)");
return py::make_tuple(r.op_, preMulSupplement->tensor_factor);
}
},
- [](const py::tuple t) {
+ [](const py::tuple& t) {
// __setstate__
TORCH_CHECK(t.size() == 2, "Invalid state");
const auto op =
@@ -856,7 +860,7 @@ This class does not support ``__members__`` property.)");
"_register_process_group",
[](const std::string& group_name,
c10::intrusive_ptr<::c10d::ProcessGroup> group) {
- ::c10d::register_process_group(group_name, group);
+ ::c10d::register_process_group(group_name, std::move(group));
},
py::arg("group_name"),
py::arg("group"));
@@ -1295,9 +1299,9 @@ Example::
const std::vector<std::string>& keys,
const std::vector<std::string>& values) {
std::vector<std::vector<uint8_t>> vals;
+ vals.reserve(values.size());
for (auto& value : values) {
- vals.push_back(
- std::vector<uint8_t>(value.begin(), value.end()));
+ vals.emplace_back(value.begin(), value.end());
}
store.multiSet(keys, vals);
},
@@ -1534,7 +1538,7 @@ Arguments:
"allreduce",
[](const c10::intrusive_ptr<::c10d::ProcessGroup>& self,
std::vector<at::Tensor>& xs,
- ::c10d::ReduceOp op) {
+ const ::c10d::ReduceOp& op) {
::c10d::AllreduceOptions opts;
opts.reduceOp = op;
return self->allreduce(xs, opts);
@@ -1547,7 +1551,7 @@ Arguments:
"allreduce",
[](const c10::intrusive_ptr<::c10d::ProcessGroup>& self,
at::Tensor& x,
- ::c10d::ReduceOp op) {
+ const ::c10d::ReduceOp& op) {
::c10d::AllreduceOptions opts;
opts.reduceOp = op;
std::vector<at::Tensor> xs = {x};
@@ -1575,7 +1579,7 @@ Arguments:
[](const c10::intrusive_ptr<::c10d::ProcessGroup>& self,
at::Tensor& x,
int rootRank,
- ::c10d::ReduceOp op) {
+ const ::c10d::ReduceOp& op) {
::c10d::ReduceOptions opts;
opts.reduceOp = op;
opts.rootRank = rootRank;
@@ -1686,7 +1690,7 @@ Arguments:
[](const c10::intrusive_ptr<::c10d::ProcessGroup>& self,
at::Tensor& output,
std::vector<at::Tensor>& input,
- ::c10d::ReduceOp op) {
+ const ::c10d::ReduceOp& op) {
std::vector<at::Tensor> outputs = {output};
std::vector<std::vector<at::Tensor>> inputs = {input};
::c10d::ReduceScatterOptions opts;
@@ -1826,7 +1830,7 @@ Arguments:
self->registerOnCompletionHook(
[hookWrapper = ::c10d::PythonOnCompletionHook(std::move(
hook))](std::shared_ptr<::c10d::WorkInfo> workInfo) {
- hookWrapper(workInfo);
+ hookWrapper(std::move(workInfo));
});
},
py::arg("hook"),
@@ -1903,7 +1907,7 @@ Arguments:
})
.def_static("unbox", [](py::object obj) {
auto typePtr = torch::getCustomClass("__torch__.torch.classes.c10d.ProcessGroup");
- auto ivalue = torch::jit::toIValue(obj, typePtr);
+ auto ivalue = torch::jit::toIValue(std::move(obj), typePtr);
return ivalue.toCustomClass<::c10d::ProcessGroup>();
});
@@ -1995,7 +1999,7 @@ options :class:`~torch.distributed.ProcessGroupNCCL.Options`).
"allreduce",
[](const c10::intrusive_ptr<::c10d::Backend>& self,
std::vector<at::Tensor>& xs,
- ::c10d::ReduceOp op) {
+ const ::c10d::ReduceOp& op) {
::c10d::AllreduceOptions opts;
opts.reduceOp = op;
return self->allreduce(xs, opts);
@@ -2007,7 +2011,7 @@ options :class:`~torch.distributed.ProcessGroupNCCL.Options`).
"allreduce",
[](const c10::intrusive_ptr<::c10d::Backend>& self,
at::Tensor& x,
- ::c10d::ReduceOp op) {
+ const ::c10d::ReduceOp& op) {
::c10d::AllreduceOptions opts;
opts.reduceOp = op;
std::vector<at::Tensor> xs = {x};
@@ -2033,7 +2037,7 @@ options :class:`~torch.distributed.ProcessGroupNCCL.Options`).
[](const c10::intrusive_ptr<::c10d::Backend>& self,
at::Tensor& x,
int rootRank,
- ::c10d::ReduceOp op) {
+ const ::c10d::ReduceOp& op) {
::c10d::ReduceOptions opts;
opts.reduceOp = op;
opts.rootRank = rootRank;
@@ -2136,7 +2140,7 @@ options :class:`~torch.distributed.ProcessGroupNCCL.Options`).
[](const c10::intrusive_ptr<::c10d::Backend>& self,
at::Tensor& output,
std::vector<at::Tensor>& input,
- ::c10d::ReduceOp op) {
+ const ::c10d::ReduceOp& op) {
std::vector<at::Tensor> outputs = {output};
std::vector<std::vector<at::Tensor>> inputs = {input};
::c10d::ReduceScatterOptions opts;
@@ -2258,6 +2262,7 @@ options :class:`~torch.distributed.ProcessGroupNCCL.Options`).
intrusive_ptr_no_gil_destructor_class_<::c10d::ProcessGroupGloo>(
module, "ProcessGroupGloo", backend);
+ // NOLINTNEXTLINE(bugprone-unused-raii)
shared_ptr_class_<::gloo::transport::Device>(processGroupGloo, "Device");
intrusive_ptr_class_<::c10d::ProcessGroupGloo::Options>(
@@ -2535,7 +2540,7 @@ Example::
processGroupMPI.def_static(
"create",
[](std::vector<int> ranks) {
- return ::c10d::ProcessGroupMPI::createProcessGroupMPI(ranks);
+ return ::c10d::ProcessGroupMPI::createProcessGroupMPI(std::move(ranks));
},
py::call_guard<py::gil_scoped_release>());
#endif
@@ -2712,12 +2717,12 @@ such as `dist.all_reduce(tensor, async_op=True)`.
.def(
"boxed",
[](c10::intrusive_ptr<::c10d::Work> self) {
- return torch::jit::toPyObject(c10::IValue(self));
+ return torch::jit::toPyObject(c10::IValue(std::move(self)));
})
.def_static("unbox", [](py::object obj) {
auto typePtr =
torch::getCustomClass("__torch__.torch.classes.c10d.Work");
- auto ivalue = torch::jit::toIValue(obj, typePtr);
+ auto ivalue = torch::jit::toIValue(std::move(obj), typePtr);
return ivalue.toCustomClass<::c10d::Work>();
});
@@ -2787,12 +2792,11 @@ such as `dist.all_reduce(tensor, async_op=True)`.
// Define a lambda such that the pybind11 prototype can take a std::vector
// for the tensor list argument, but still pass it to the underlying
// function as a c10::ArrayRef.
- [](c10::intrusive_ptr<::c10d::ProcessGroup> process_group,
- std::vector<at::Tensor> tensors, // NOLINT
+ [](const c10::intrusive_ptr<::c10d::ProcessGroup>& process_group,
+ const std::vector<at::Tensor>& tensors,
size_t buffer_size,
int rank) {
- broadcast_coalesced(
- std::move(process_group), tensors, buffer_size, rank);
+ broadcast_coalesced(process_group, tensors, buffer_size, rank);
},
py::arg("process_group"),
py::arg("tensors"),
@@ -2875,7 +2879,7 @@ such as `dist.all_reduce(tensor, async_op=True)`.
module.def(
"_create_work_from_future",
- [](std::shared_ptr<jit::PythonFutureWrapper> future) {
+ [](const std::shared_ptr<jit::PythonFutureWrapper>& future) {
return ::c10d::Work::create_from_future(future->fut);
},
py::arg("future"),
@@ -2934,6 +2938,4 @@ PyMethodDef* python_functions() {
return methods;
}
-} // namespace c10d
-} // namespace distributed
-} // namespace torch
+} // namespace torch::distributed::c10d
diff --git a/torch/csrc/distributed/c10d/intra_node_comm.cpp b/torch/csrc/distributed/c10d/intra_node_comm.cpp
index 2579b38209..d18262ecfa 100644
--- a/torch/csrc/distributed/c10d/intra_node_comm.cpp
+++ b/torch/csrc/distributed/c10d/intra_node_comm.cpp
@@ -47,9 +47,6 @@ void* initTopoInfo(Topology topology, NvlMesh nvlMesh, size_t rank);
// Topology Detection
////////////////////////////////////////////////////////////////////////////////
-// TODO: find a better way to determine this
-static constexpr size_t kMaxNvLinks = 20;
-
static std::ostream& operator<<(std::ostream& os, const NvlMesh& nvlMesh) {
std::ostringstream oss;
for (size_t i = 0; i < kMaxDevices; ++i) {
@@ -98,6 +95,9 @@ static NvlMesh getNvlMesh(const std::vector<std::string>& rankToBusId) {
rankToBusId[r].c_str(), &devices[r]) == NVML_SUCCESS);
}
+ // TODO: find a better way to determine this
+ constexpr size_t kMaxNvLinks = 20;
+
// For each device, loop over devices connected to it via NVLink
for (size_t idx = 0; idx < worldSize; ++idx) {
for (size_t link = 0; link < kMaxNvLinks; ++link) {
diff --git a/torch/csrc/distributed/c10d/intra_node_comm.hpp b/torch/csrc/distributed/c10d/intra_node_comm.hpp
index 7d763c02c2..0e65ebf8d6 100644
--- a/torch/csrc/distributed/c10d/intra_node_comm.hpp
+++ b/torch/csrc/distributed/c10d/intra_node_comm.hpp
@@ -9,7 +9,7 @@
namespace c10d::intra_node_comm {
constexpr size_t kMaxDevices = 8;
-constexpr size_t kDefaultBufferSize = 10 * 1024 * 1024;
+constexpr size_t kDefaultBufferSize = 10ull * 1024 * 1024;
using NvlMesh = std::array<std::array<size_t, kMaxDevices>, kMaxDevices>;
using HybridCubeMesh = std::array<std::array<int, 4>, kMaxDevices>;
diff --git a/torch/csrc/distributed/c10d/logger.hpp b/torch/csrc/distributed/c10d/logger.hpp
index d4ac30cb5f..2ab7be9d03 100644
--- a/torch/csrc/distributed/c10d/logger.hpp
+++ b/torch/csrc/distributed/c10d/logger.hpp
@@ -2,6 +2,7 @@
#include <torch/csrc/distributed/c10d/reducer.hpp>
#include <mutex>
+#include <utility>
namespace c10d {
@@ -124,9 +125,8 @@ class TORCH_API C10dLogger {
protected:
// singletion, hide constructor from the public
- C10dLogger(const std::string& logDestination) {
- logDestination_ = logDestination;
- }
+ C10dLogger(std::string logDestination)
+ : logDestination_(std::move(logDestination)) {}
// the name of the destination this logger should log to
std::string logDestination_;
|
2.41.0
|
aad0554b4d5e64f828eaceeaf5d74d8348caed4
|
Mon, 15 Apr 2024 23:17:18 +0200
|
[PATCH 0179/1000] [Inductor] Fix endless recursion in codecache.DLLWrapper.__getattr__ (#123931)
|
Pull Request resolved: https://github.com/pytorch/pytorch/pull/123931 Approved by: https://github.com/peterbell10
|
diff --git a/torch/_inductor/codecache.py b/torch/_inductor/codecache.py
index 4e84838504..ede59ab828 100644
--- a/torch/_inductor/codecache.py
+++ b/torch/_inductor/codecache.py
@@ -2569,6 +2569,7 @@ class DLLWrapper:
lib_path: str,
):
self.lib_path = lib_path
+ self.is_open = False
self.DLL = cdll.LoadLibrary(lib_path)
self.is_open = True
|
2.41.0
|
cf62e86a47ee575b5fbb997fd00f60ef0163130
|
Tue, 16 Apr 2024 01:26:22 +0000
|
[PATCH 0180/1000] skip various unit tests for Jetson (#122531)
|
skip multiprocessing, cuda expandable segments, mem eff and flash attention tests on Jetson due to hanging / sigkill issues from nvidia internal testing Pull Request resolved: https://github.com/pytorch/pytorch/pull/122531 Approved by: https://github.com/eqy, https://github.com/malfet
|
diff --git a/test/test_cuda_expandable_segments.py b/test/test_cuda_expandable_segments.py
index 123d2d2fe8..8b634b774e 100644
--- a/test/test_cuda_expandable_segments.py
+++ b/test/test_cuda_expandable_segments.py
@@ -4,9 +4,11 @@
import os
import torch
-if torch.cuda.is_available():
+from torch.testing._internal.common_cuda import IS_JETSON
+
+if torch.cuda.is_available() and not IS_JETSON:
torch.cuda.memory._set_allocator_settings('expandable_segments:True')
-current_dir = os.path.dirname(os.path.abspath(__file__))
-filepath = os.path.join(current_dir, 'test_cuda.py')
-exec(compile(open(filepath).read(), filepath, mode='exec'))
+ current_dir = os.path.dirname(os.path.abspath(__file__))
+ filepath = os.path.join(current_dir, 'test_cuda.py')
+ exec(compile(open(filepath).read(), filepath, mode='exec'))
diff --git a/test/test_multiprocessing.py b/test/test_multiprocessing.py
index b45c4cded0..d529494075 100644
--- a/test/test_multiprocessing.py
+++ b/test/test_multiprocessing.py
@@ -14,6 +14,7 @@ import torch.cuda
import torch.multiprocessing as mp
import torch.utils.hooks
from torch.nn import Parameter
+from torch.testing._internal.common_cuda import IS_JETSON
from torch.testing._internal.common_utils import (
IS_MACOS,
IS_WINDOWS,
@@ -36,12 +37,15 @@ load_tests = load_tests
TEST_REPEATS = 30
HAS_SHM_FILES = os.path.isdir("/dev/shm")
MAX_WAITING_TIME_IN_SECONDS = 30
+
TEST_CUDA_IPC = (
torch.cuda.is_available()
and sys.platform != "darwin"
and sys.platform != "win32"
+ and not IS_JETSON
and not TEST_WITH_ROCM
) # https://github.com/pytorch/pytorch/issues/90940
+
TEST_MULTIGPU = TEST_CUDA_IPC and torch.cuda.device_count() > 1
if TEST_CUDA_IPC:
diff --git a/test/test_transformers.py b/test/test_transformers.py
index e71c120876..73f838143d 100644
--- a/test/test_transformers.py
+++ b/test/test_transformers.py
@@ -40,7 +40,7 @@ from torch._dynamo.testing import CompileCounterWithBackend
from torch.testing._internal.common_methods_invocations import wrapper_set_seed
from torch.testing._internal.common_cuda import (
- SM80OrLater, PLATFORM_SUPPORTS_FLASH_ATTENTION,
+ IS_JETSON, SM80OrLater, PLATFORM_SUPPORTS_FLASH_ATTENTION,
PLATFORM_SUPPORTS_MEM_EFF_ATTENTION,
PLATFORM_SUPPORTS_FUSED_ATTENTION,
PLATFORM_SUPPORTS_CUDNN_ATTENTION
@@ -2570,6 +2570,7 @@ class TestSDPACudaOnly(NNTestCase):
# verified passing successfully on H100
@unittest.skipIf(not PLATFORM_SUPPORTS_MEM_EFF_ATTENTION, "Does not support SDPA")
+ @unittest.skipIf(IS_JETSON, "causing sigkill on Jetson")
@parametrize("batch_size", [1, 8])
@parametrize("seq_len_q", [4, 8, 64, 128, 256, 512, 1024, 2048] if SM80OrLater else [4, 8, 64, 128, 256, 512])
@parametrize("seq_len_k", [4, 8, 64, 128, 256, 512, 1024, 2048] if SM80OrLater else [4, 8, 64, 128, 256, 512])
@@ -2671,6 +2672,7 @@ class TestSDPACudaOnly(NNTestCase):
@unittest.skipIf(not PLATFORM_SUPPORTS_MEM_EFF_ATTENTION, "Does not support SDPA")
+ @unittest.skipIf(IS_JETSON, "causing sigkill on Jetson")
@parametrize("batch_size", [1, 8])
@parametrize("seq_len_q", [4, 8, 64, 128, 256, 312, 512, 1024, 2048] if SM80OrLater else [4, 8, 64, 128, 152, 256, 512])
@parametrize("seq_len_k", [4, 8, 64, 65, 128, 256, 408, 512, 1024, 2048] if SM80OrLater else [4, 8, 37, 64, 128, 256, 512])
@@ -2788,6 +2790,7 @@ class TestSDPACudaOnly(NNTestCase):
atol=grad_attn_mask_atol, rtol=grad_attn_mask_rtol)
@unittest.skipIf(not PLATFORM_SUPPORTS_FLASH_ATTENTION, "Does not support SDPA or pre-SM80 hardware")
+ @unittest.skipIf(IS_JETSON, "causing sigkill on Jetson")
@parametrize("batch_size", [1, 8])
@parametrize("seq_len_q", [4, 8, 64, 143, 256, 512, 1024, 2048])
@parametrize("seq_len_k", [4, 8, 64, 128, 256, 587, 1024, 2048])
diff --git a/torch/testing/_internal/common_cuda.py b/torch/testing/_internal/common_cuda.py
index ef8468c199..054f1a1357 100644
--- a/torch/testing/_internal/common_cuda.py
+++ b/torch/testing/_internal/common_cuda.py
@@ -31,6 +31,8 @@ SM75OrLater = LazyVal(lambda: torch.cuda.is_available() and torch.cuda.get_devic
SM80OrLater = LazyVal(lambda: torch.cuda.is_available() and torch.cuda.get_device_capability() >= (8, 0))
SM90OrLater = LazyVal(lambda: torch.cuda.is_available() and torch.cuda.get_device_capability() >= (9, 0))
+IS_JETSON = LazyVal(lambda: torch.cuda.is_available() and torch.cuda.get_device_capability() in [(7, 2), (8, 7)])
+
def evaluate_gfx_arch_exact(matching_arch):
if not torch.cuda.is_available():
return False
|
2.41.0
|
eb626b46a4d606d6900aec8c690feb8a9d6e88b
|
Tue, 16 Apr 2024 01:39:42 +0000
|
[PATCH 0181/1000] [BE] Do not use `using namespace` in mps headers (#124117)
|
- Remove `using namespace std` from `MPSDevice.h` - Add `std::` prefix to 1st argument of `MPSProfiler::StartTrace` - Do the same in front of `numeric_limits` template instantiation in `ReduceOps.mm` Co-authored-by: Nikita Shulga <2453524+malfet@users.noreply.github.com> Pull Request resolved: https://github.com/pytorch/pytorch/pull/124117 Approved by: https://github.com/malfet
|
diff --git a/aten/src/ATen/mps/MPSDevice.h b/aten/src/ATen/mps/MPSDevice.h
index 40ab070772..084820ab42 100644
--- a/aten/src/ATen/mps/MPSDevice.h
+++ b/aten/src/ATen/mps/MPSDevice.h
@@ -22,8 +22,6 @@ typedef void* MTLComputePipelineState_t;
typedef void* MTLLibrary_t;
#endif
-using namespace std;
-
namespace at::mps {
// Helper enum to check if a MPSGraph op is supported in a given macOS version
diff --git a/aten/src/ATen/mps/MPSProfiler.h b/aten/src/ATen/mps/MPSProfiler.h
index 994c50ad9e..59010d4861 100644
--- a/aten/src/ATen/mps/MPSProfiler.h
+++ b/aten/src/ATen/mps/MPSProfiler.h
@@ -9,12 +9,12 @@
#include <os/signpost.h>
#include <os/log.h>
+#include <atomic>
+#include <ctime>
#include <sstream>
#include <string>
-#include <atomic>
#include <unordered_map>
#include <utility>
-#include <ctime>
namespace at::mps {
@@ -296,7 +296,7 @@ public:
// during runtime (instead of environment variables).
// The "mode" could be either "interval", "event", or both "interval,event"
// for interval-based and/or event-based signpost tracing.
- void StartTrace(const string& mode, bool waitUntilCompleted);
+ void StartTrace(const std::string& mode, bool waitUntilCompleted);
void StopTrace();
// convenience functions to indicate whether signpost tracing or
diff --git a/aten/src/ATen/mps/MPSProfiler.mm b/aten/src/ATen/mps/MPSProfiler.mm
index e6e1a72579..c01ea57aa7 100644
--- a/aten/src/ATen/mps/MPSProfiler.mm
+++ b/aten/src/ATen/mps/MPSProfiler.mm
@@ -195,7 +195,7 @@ void MPSProfiler::initialize() {
}
}
-void MPSProfiler::StartTrace(const string& mode, bool waitUntilCompleted) {
+void MPSProfiler::StartTrace(const std::string& mode, bool waitUntilCompleted) {
TORCH_CHECK(m_profile_options == ProfileOptions::OPTIONS_NONE, "Tracing Signposts is already enabled ");
std::stringstream ss(mode);
diff --git a/aten/src/ATen/native/mps/operations/ReduceOps.mm b/aten/src/ATen/native/mps/operations/ReduceOps.mm
index 95a0b3b850..416c83f0d3 100644
--- a/aten/src/ATen/native/mps/operations/ReduceOps.mm
+++ b/aten/src/ATen/native/mps/operations/ReduceOps.mm
@@ -318,8 +318,8 @@ static void impl_func_norm_mps(const Tensor& input_tensor,
auto reciprocal_p = 1 / p;
bool pIsZero = (p == 0.0);
- bool pIsPosInf = (p == numeric_limits<double>::infinity());
- bool pIsNegInf = (p == -numeric_limits<double>::infinity());
+ bool pIsPosInf = (p == std::numeric_limits<double>::infinity());
+ bool pIsNegInf = (p == -std::numeric_limits<double>::infinity());
int64_t num_input_dims = input_shape.size();
int64_t num_reduce_dims = dim.size();
|
2.41.0
|
4c8002ee0714db7d19d98dc0c84108521c5ce6f
|
Tue, 16 Apr 2024 02:02:37 +0000
|
[PATCH 0182/1000] MPS FFT implementation bug (#123274)
|
Current implementation drops the negative frequency components even when the user doesn't ask for the one-sided transform. The tests for the negative frequency components seem to have worked by accident due to internal implementation details but the issue becomes evident in MacOs 14.4. Co-authored-by: Nikita Shulga <2453524+malfet@users.noreply.github.com> Pull Request resolved: https://github.com/pytorch/pytorch/pull/123274 Approved by: https://github.com/malfet
|
diff --git a/aten/src/ATen/native/mps/operations/FastFourierTransform.mm b/aten/src/ATen/native/mps/operations/FastFourierTransform.mm
index 697ab12d42..21fb75bb21 100644
--- a/aten/src/ATen/native/mps/operations/FastFourierTransform.mm
+++ b/aten/src/ATen/native/mps/operations/FastFourierTransform.mm
@@ -95,10 +95,23 @@ Tensor& _fft_r2c_mps_out(const Tensor& self, IntArrayRef dim, int64_t normalizat
auto inputTensor = mpsGraphRankedPlaceHolder(mpsGraph, self);
auto descriptor = [MPSGraphFFTDescriptor descriptor];
descriptor.scalingMode = normalization_to_ScalingMode(normalization);
- auto outputTensor = [mpsGraph realToHermiteanFFTWithTensor:inputTensor
- axes:IntArrayToNSArray(dim)
- descriptor:descriptor
- name:nil];
+ MPSGraphTensor* outputTensor;
+ if (onesided) {
+ // Return only unique results:
+ outputTensor = [mpsGraph realToHermiteanFFTWithTensor:inputTensor
+ axes:IntArrayToNSArray(dim)
+ descriptor:descriptor
+ name:nil];
+ } else {
+ // Return with Hermitean conjugate results:
+ auto useDataType =
+ (inputTensor.dataType == MPSDataTypeFloat16) ? MPSDataTypeComplexFloat16 : MPSDataTypeComplexFloat32;
+ auto cTensor = [mpsGraph castTensor:inputTensor toType:useDataType name:nil];
+ outputTensor = [mpsGraph fastFourierTransformWithTensor:cTensor
+ axes:IntArrayToNSArray(dim)
+ descriptor:descriptor
+ name:nil];
+ }
newCachedGraph->inputTensor_ = inputTensor;
newCachedGraph->outputTensor_ = outputTensor;
});
|
2.41.0
|
f1e3ff5a503a520c1a310c8e72a383657f9a4bc
|
Tue, 16 Apr 2024 02:20:58 +0000
|
[PATCH 0183/1000] [reland] `_foreach_copy` with different src/dst dtypes (#123844)
|
Attempt to reland https://github.com/pytorch/pytorch/pull/121717. The change is the array bounds check. Pull Request resolved: https://github.com/pytorch/pytorch/pull/123844 Approved by: https://github.com/janeyx99
|
diff --git a/aten/src/ATen/native/ForeachUtils.h b/aten/src/ATen/native/ForeachUtils.h
index 9c22c35ee9..d7a1449463 100644
--- a/aten/src/ATen/native/ForeachUtils.h
+++ b/aten/src/ATen/native/ForeachUtils.h
@@ -102,12 +102,13 @@ inline void check_foreach_api_restrictions(
// corresponding tensors (aligning in index across the tensorLists) share the
// same device and dtype.
inline bool _check_tensors_share_device_and_dtype(
- ArrayRef<TensorList> tensorLists) {
+ ArrayRef<TensorList> tensorLists,
+ const bool skip_dtype_check = false) {
const auto expected_dtype = tensorLists[0][0].dtype();
const auto expected_device = tensorLists[0][0].device();
auto is_tensor_okay = [&](const Tensor& tensor) {
- return tensor.dtype() == expected_dtype &&
+ return (skip_dtype_check || tensor.dtype() == expected_dtype) &&
tensor.device() == expected_device && tensor.layout() == at::kStrided &&
tensor.is_non_overlapping_and_dense();
};
diff --git a/aten/src/ATen/native/cuda/ForeachBinaryOpList.cu b/aten/src/ATen/native/cuda/ForeachBinaryOpList.cu
index 366049a540..7329360230 100644
--- a/aten/src/ATen/native/cuda/ForeachBinaryOpList.cu
+++ b/aten/src/ATen/native/cuda/ForeachBinaryOpList.cu
@@ -1,9 +1,11 @@
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
#include <ATen/Dispatch.h>
+#include <ATen/cuda/CUDAContext.h>
#include <ATen/native/ForeachUtils.h>
#include <ATen/native/cuda/ForeachFunctors.cuh>
#include <ATen/native/cuda/ForeachMinMaxFunctors.cuh>
#include <functional>
+#include <type_traits>
#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/NativeFunctions.h>
@@ -250,20 +252,154 @@ FOREACH_BINARY_OP_LIST(
power_functor,
/*division_op*/ true);
-template <typename T>
-struct Identity {
- __device__ __forceinline__ T operator()(const T& x) {
- return x;
+template <typename dst_t, typename src_t = dst_t>
+struct Copy {
+ __device__ __forceinline__ dst_t operator()(const src_t& x) {
+ return static_cast<dst_t>(x);
}
};
+template <typename dst_t>
+struct Copy<dst_t, c10::complex<double>> {
+ __device__ __forceinline__ dst_t operator()(const c10::complex<double>& x) {
+ if constexpr (!(std::is_same_v<dst_t, c10::complex<double>> ||
+ std::is_same_v<dst_t, c10::complex<float>>)) {
+ return static_cast<dst_t>(x.real());
+ } else {
+ return static_cast<dst_t>(x);
+ }
+ }
+};
+
+template <typename dst_t>
+struct Copy<dst_t, c10::complex<float>> {
+ __device__ __forceinline__ dst_t operator()(const c10::complex<float>& x) {
+ if constexpr (!(std::is_same_v<dst_t, c10::complex<double>> ||
+ std::is_same_v<dst_t, c10::complex<float>>)) {
+ return static_cast<dst_t>(x.real());
+ } else {
+ return static_cast<dst_t>(x);
+ }
+ }
+};
+
+#define AT_DISPATCH_SOURCE_TYPES(TYPE, NAME, ...) \
+ AT_DISPATCH_SWITCH( \
+ TYPE, \
+ NAME, \
+ AT_PRIVATE_CASE_TYPE_USING_HINT( \
+ at::ScalarType::Byte, src_t, __VA_ARGS__) \
+ AT_PRIVATE_CASE_TYPE_USING_HINT( \
+ at::ScalarType::Char, src_t, __VA_ARGS__) \
+ AT_PRIVATE_CASE_TYPE_USING_HINT( \
+ at::ScalarType::Long, src_t, __VA_ARGS__) \
+ AT_PRIVATE_CASE_TYPE_USING_HINT( \
+ at::ScalarType::Short, src_t, __VA_ARGS__) \
+ AT_PRIVATE_CASE_TYPE_USING_HINT( \
+ at::ScalarType::Double, src_t, __VA_ARGS__) \
+ AT_PRIVATE_CASE_TYPE_USING_HINT( \
+ at::ScalarType::Float, src_t, __VA_ARGS__) \
+ AT_PRIVATE_CASE_TYPE_USING_HINT( \
+ at::ScalarType::ComplexDouble, \
+ src_t, \
+ __VA_ARGS__) \
+ AT_PRIVATE_CASE_TYPE_USING_HINT( \
+ at::ScalarType::ComplexFloat, \
+ src_t, \
+ __VA_ARGS__) \
+ AT_PRIVATE_CASE_TYPE_USING_HINT( \
+ at::ScalarType::Half, \
+ src_t, \
+ __VA_ARGS__) \
+ AT_PRIVATE_CASE_TYPE_USING_HINT( \
+ at::ScalarType::BFloat16, \
+ src_t, \
+ __VA_ARGS__) \
+ AT_PRIVATE_CASE_TYPE_USING_HINT( \
+ at::ScalarType::Bool, \
+ src_t, \
+ __VA_ARGS__))
+
+namespace {
+
+template <
+ typename T,
+ typename src_t,
+ int depth,
+ int r_args_depth,
+ int res_arg_index>
+struct CopyFunctor {
+ static_assert(depth == 2 && r_args_depth == 1 && res_arg_index == 1);
+ template <typename Op>
+ __device__ __forceinline__ void operator()(
+ int chunk_size,
+ TensorListMetadata<depth>& tl,
+ Op op) {
+ const auto tensor_loc = tl.block_to_tensor[blockIdx.x];
+ const auto chunk_idx = tl.block_to_chunk[blockIdx.x];
+ auto n = tl.numel_for_tensor[tensor_loc];
+
+ src_t* src_ptr = (src_t*)tl.addresses[0][tensor_loc];
+ src_ptr += chunk_idx * chunk_size;
+ T* self_ptr = (T*)tl.addresses[1][tensor_loc];
+ self_ptr += chunk_idx * chunk_size;
+
+ const bool all_aligned{is_aligned(src_ptr) && is_aligned(self_ptr)};
+
+ n -= chunk_idx * chunk_size;
+ src_t src_args[kILP];
+ T r_args[kILP];
+
+ // to make things simple, we put aligned case in a different code path
+ if (n % kILP == 0 && chunk_size % kILP == 0 && all_aligned) {
+ for (int64_t i_start = threadIdx.x;
+ i_start * kILP < n && i_start * kILP < chunk_size;
+ i_start += blockDim.x) {
+ // load
+ load_store(src_args, src_ptr, 0, i_start);
+#pragma unroll
+ for (int ii = 0; ii < kILP; ii++) {
+ r_args[ii] = static_cast<T>(op(src_args[ii]));
+ }
+ // store
+ load_store(self_ptr, r_args, i_start, 0);
+ }
+ } else {
+ for (int64_t i_start = 0; i_start < n && i_start < chunk_size;
+ i_start += blockDim.x * kILP) {
+#pragma unroll
+ for (int ii = 0; ii < kILP; ii++) {
+ const auto i = i_start + threadIdx.x + ii * blockDim.x;
+ if (i < n && i < chunk_size) {
+ src_args[ii] = src_ptr[i];
+ }
+ }
+#pragma unroll
+ for (int ii = 0; ii < kILP; ii++) {
+ r_args[ii] = static_cast<T>(op(src_args[ii]));
+ }
+ store_args(self_ptr, r_args, i_start, chunk_size, n);
+ }
+ }
+ }
+};
+
+} // anonymous namespace
+
void foreach_tensor_copy_list_kernel_cuda_(
TensorList self,
TensorList src,
const bool non_blocking) {
check_foreach_api_restrictions(self, src);
- if (!can_use_fast_route(
- self, src, /* does_op_promote_integer_inputs_to_float */ false)) {
+ if (!(_check_tensors_share_device_and_dtype(
+ {self, src}, /* skip_dtype_check */ true) &&
+ std::all_of(
+ src.cbegin(),
+ src.cend(),
+ [&](const auto& t) -> bool {
+ return t.dtype() == src[0].dtype();
+ }) &&
+ _check_tensors_share_sizes_and_strides({self, src}))) {
return at::native::foreach_tensor_copy_list_kernel_slow_(
self, src, non_blocking);
}
@@ -278,16 +414,38 @@ void foreach_tensor_copy_list_kernel_cuda_(
"foreach_tensor_copy",
[&]() {
using opmath_t = at::opmath_type<scalar_t>;
- multi_tensor_apply<2>(
- tensor_lists,
- UnaryOpFunctor<
- scalar_t,
- /* depth */ 2,
- /* r_args_depth */ 1,
- /* res_arg_index */ 1>(),
- Identity<opmath_t>());
+ AT_DISPATCH_SOURCE_TYPES(src[0].scalar_type(), "foreach_tensor_copy", [&] {
+ if constexpr (std::is_same_v<scalar_t, src_t>) {
+ multi_tensor_apply<2>(
+ tensor_lists,
+ UnaryOpFunctor<
+ scalar_t,
+ /* depth */ 2,
+ /* r_args_depth */ 1,
+ /* res_arg_index */ 1>(),
+ Copy<opmath_t, opmath_t>());
+ } else {
+ // Ref:
+ // https://github.com/pytorch/pytorch/blob/656134c38f4737d13c3f43fc5c59470bc23c1d2f/aten/src/ATen/native/Copy.cpp#L299-L301
+ if (!self[0].is_complex() && src[0].is_complex()) {
+ TORCH_WARN_ONCE(
+ "Casting complex values to real discards the imaginary part");
+ }
+ multi_tensor_apply<2>(
+ tensor_lists,
+ CopyFunctor<
+ scalar_t,
+ src_t,
+ /* depth */ 2,
+ /* r_args_depth */ 1,
+ /* res_arg_index */ 1>(),
+ Copy<scalar_t, src_t>());
+ }
+ });
});
increment_version(self);
}
+#undef AT_DISPATCH_SOURCE_TYPES
+
} // namespace at::native
diff --git a/test/test_foreach.py b/test/test_foreach.py
index 19d695762c..27867a4ace 100644
--- a/test/test_foreach.py
+++ b/test/test_foreach.py
@@ -1206,6 +1206,28 @@ class TestForeach(TestCase):
copy_(t, s, non_blocking)
self.assertEqual(ref_input, sample.input)
+ @onlyCUDA
+ @ops(filter(lambda op: op.name == "_foreach_copy", foreach_binary_op_db))
+ def test_foreach_copy_with_multi_dtypes(self, device, dtype, op):
+ # check (a) multi_tensor_apply is called and (b) numerical parity with for-loop and Tensor.copy_
+ foreach_copy_ = ForeachFuncWrapper(op.inplace_variant)
+ for sample in op.sample_inputs(device, dtype, noncontiguous=False):
+ for src_dtype in floating_types_and(torch.half, torch.bfloat16):
+ if src_dtype == dtype:
+ continue
+ self_tensors = [t.clone() for t in sample.input]
+ src_tensors = [t.to(src_dtype) for t in self_tensors]
+ out = foreach_copy_(
+ (self_tensors, src_tensors), is_cuda=True, expect_fastpath=True
+ )
+ self.assertEqual(
+ out,
+ [
+ torch.empty_like(t).copy_(s)
+ for t, s in zip(self_tensors, src_tensors)
+ ],
+ )
+
# Test reverse-mode & forward-mode AD if supported.
@onlyCUDA
@ops(
|
2.41.0
|
babf00014d160b29fea4f00435754e677af33a3
|
Mon, 15 Apr 2024 10:56:16 -0700
|
[PATCH 0184/1000] [inductor] Bypass FX graph cache when we have HigherOrderOperators (#123325)
|
Summary: The initial motivation was to avoid caching when we have triton higher order ops, but it's probably safer to avoid the cache for all higher order ops and allow/implement if/when we find it necessary. Test Plan: Unit test cribbed from: https://docs-preview.pytorch.org/pytorch/tutorials/2783/recipes/torch_compile_user_defined_triton_kernel_tutorial.html?highlight=triton Pull Request resolved: https://github.com/pytorch/pytorch/pull/123325 Approved by: https://github.com/eellison
|
diff --git a/test/inductor/test_codecache.py b/test/inductor/test_codecache.py
index 04ab69debc..e8ace5cd86 100644
--- a/test/inductor/test_codecache.py
+++ b/test/inductor/test_codecache.py
@@ -37,6 +37,10 @@ from torch.utils._triton import has_triton
HAS_TRITON = has_triton()
+if HAS_TRITON:
+ import triton
+ from torch.testing._internal.triton_utils import add_kernel
+
requires_gpu = functools.partial(unittest.skipIf, not HAS_GPU, "requires gpu")
requires_triton = functools.partial(unittest.skipIf, not HAS_TRITON, "requires triton")
@@ -289,6 +293,33 @@ class TestFxGraphCache(TestCase):
self.assertEqual(counters["inductor"]["fxgraph_cache_miss"], 2)
self.assertEqual(counters["inductor"]["fxgraph_cache_hit"], 0)
+ @requires_gpu()
+ @requires_triton()
+ @config.patch({"fx_graph_cache": True})
+ def test_higher_order_op_bypass(self):
+ """
+ Verify that we bypass the cache when we have higher order ops.
+ """
+
+ def fn(x, y):
+ output = torch.zeros_like(x)
+ n_elements = output.numel()
+ grid = lambda meta: ( # noqa: E731
+ triton.cdiv(n_elements, meta["BLOCK_SIZE"]),
+ )
+ add_kernel[grid](x, y, output, n_elements, BLOCK_SIZE=4)
+ return output
+
+ compiled_fn = torch.compile(fn, fullgraph=True)
+
+ x = torch.randn(4, device="cuda")
+ y = torch.randn(4, device="cuda")
+ compiled_fn(x, y)
+
+ self.assertEqual(counters["inductor"]["fxgraph_cache_miss"], 0)
+ self.assertEqual(counters["inductor"]["fxgraph_cache_hit"], 0)
+ self.assertGreater(counters["inductor"]["fxgraph_cache_bypass"], 0)
+
@config.patch({"fx_graph_cache": True})
def test_generated_kernel_count(self):
"""
diff --git a/torch/_inductor/codecache.py b/torch/_inductor/codecache.py
index ede59ab828..7287aa2e80 100644
--- a/torch/_inductor/codecache.py
+++ b/torch/_inductor/codecache.py
@@ -836,21 +836,27 @@ class FxGraphCache:
write_atomic(path, content)
@staticmethod
- def _check_can_cache():
+ def _check_can_cache(gm: torch.fx.GraphModule):
"""
Check some conditions that would preclude caching and raise BypassFxGraphCache
to bypass in case caching is not possible.
"""
+ # Freezing can embed constants that wouldn't be static across runs.
if config.freezing or config.aot_inductor.use_runtime_constant_folding:
- # Freezing can embed constants that wouldn't be static across runs.
raise BypassFxGraphCache()
+ # The treatment of guards in the caching implementation requires that
+ # we have a shape env.
if FxGraphCache._get_shape_env() is None:
- # The treatment of guards in the caching implementation requires that
- # we have a shape env.
log.debug("fx graph cache no shape env")
raise BypassFxGraphCache()
+ # HigherOrderOperators should be handled on a case-by-case basis.
+ # Currently, we just skip caching if we have any.
+ for node in gm.graph.nodes:
+ if isinstance(node.target, torch._ops.HigherOrderOperator):
+ raise BypassFxGraphCache()
+
@staticmethod
def load(
compile_fx_fn: Callable[..., Any],
@@ -866,7 +872,7 @@ class FxGraphCache:
compiled_graph = None
try:
- FxGraphCache._check_can_cache()
+ FxGraphCache._check_can_cache(gm)
key = compiled_fx_graph_hash(gm, example_inputs, fx_kwargs)
lock_path = os.path.join(get_lock_dir(), key + ".lock")
|
2.41.0
|
60efaa471f5b53f45ff67cc18fc67e02ea41b3a
|
Mon, 15 Apr 2024 17:32:39 +0800
|
[PATCH 0186/1000] Part 1: UFMT partial files in torch/optim due to the pr-sanity-checks (#124053)
|
Pull Request resolved: https://github.com/pytorch/pytorch/pull/124053 Approved by: https://github.com/ezyang ghstack dependencies: #124048
|
diff --git a/.lintrunner.toml b/.lintrunner.toml
index 73e225e9ee..d94ce427b7 100644
--- a/.lintrunner.toml
+++ b/.lintrunner.toml
@@ -2112,16 +2112,6 @@ exclude_patterns = [
'torch/nn/utils/rnn.py',
'torch/nn/utils/spectral_norm.py',
'torch/nn/utils/weight_norm.py',
- 'torch/optim/__init__.py',
- 'torch/optim/_functional.py',
- 'torch/optim/_multi_tensor/__init__.py',
- 'torch/optim/adadelta.py',
- 'torch/optim/adagrad.py',
- 'torch/optim/adam.py',
- 'torch/optim/adamax.py',
- 'torch/optim/adamw.py',
- 'torch/optim/asgd.py',
- 'torch/optim/lbfgs.py',
'torch/optim/lr_scheduler.py',
'torch/optim/nadam.py',
'torch/optim/optimizer.py',
diff --git a/torch/optim/__init__.py b/torch/optim/__init__.py
index 878842bfa4..5e836b4047 100644
--- a/torch/optim/__init__.py
+++ b/torch/optim/__init__.py
@@ -6,22 +6,21 @@ enough, so that more sophisticated ones can also be easily integrated in the
future.
"""
+from . import lr_scheduler, swa_utils
from .adadelta import Adadelta
from .adagrad import Adagrad
from .adam import Adam
-from .adamw import AdamW
-from .sparse_adam import SparseAdam
from .adamax import Adamax
+from .adamw import AdamW
from .asgd import ASGD
-from .sgd import SGD
+from .lbfgs import LBFGS
+from .nadam import NAdam
+from .optimizer import Optimizer
from .radam import RAdam
-from .rprop import Rprop
from .rmsprop import RMSprop
-from .optimizer import Optimizer
-from .nadam import NAdam
-from .lbfgs import LBFGS
-from . import lr_scheduler
-from . import swa_utils
+from .rprop import Rprop
+from .sgd import SGD
+from .sparse_adam import SparseAdam
del adadelta # noqa: F821
del adagrad # noqa: F821
diff --git a/torch/optim/_functional.py b/torch/optim/_functional.py
index 7ea361d8ef..4a6198956f 100644
--- a/torch/optim/_functional.py
+++ b/torch/optim/_functional.py
@@ -1,35 +1,38 @@
r"""Functional interface."""
import math
-from torch import Tensor
from typing import List
-from .adadelta import adadelta # type: ignore[attr-defined] # noqa: F401
-from .adagrad import adagrad, _make_sparse # type: ignore[attr-defined] # noqa: F401
-from .adam import adam # type: ignore[attr-defined] # noqa: F401
-from .adamw import adamw # type: ignore[attr-defined] # noqa: F401
-from .adamax import adamax # type: ignore[attr-defined] # noqa: F401
-from .asgd import asgd # type: ignore[attr-defined] # noqa: F401
-from .nadam import nadam # type: ignore[attr-defined] # noqa: F401
-from .radam import radam # type: ignore[attr-defined] # noqa: F401
-from .rmsprop import rmsprop # type: ignore[attr-defined] # noqa: F401
-from .rprop import rprop # type: ignore[attr-defined] # noqa: F401
-from .sgd import sgd # type: ignore[attr-defined] # noqa: F401
+from torch import Tensor
+
+from .adadelta import adadelta # type: ignore[attr-defined] # noqa: F401
+from .adagrad import _make_sparse, adagrad # type: ignore[attr-defined] # noqa: F401
+from .adam import adam # type: ignore[attr-defined] # noqa: F401
+from .adamax import adamax # type: ignore[attr-defined] # noqa: F401
+from .adamw import adamw # type: ignore[attr-defined] # noqa: F401
+from .asgd import asgd # type: ignore[attr-defined] # noqa: F401
+from .nadam import nadam # type: ignore[attr-defined] # noqa: F401
+from .radam import radam # type: ignore[attr-defined] # noqa: F401
+from .rmsprop import rmsprop # type: ignore[attr-defined] # noqa: F401
+from .rprop import rprop # type: ignore[attr-defined] # noqa: F401
+from .sgd import sgd # type: ignore[attr-defined] # noqa: F401
# TODO: use foreach API in optim._functional to do all the computation
-def sparse_adam(params: List[Tensor],
- grads: List[Tensor],
- exp_avgs: List[Tensor],
- exp_avg_sqs: List[Tensor],
- state_steps: List[int],
- *,
- eps: float,
- beta1: float,
- beta2: float,
- lr: float,
- maximize: bool):
+def sparse_adam(
+ params: List[Tensor],
+ grads: List[Tensor],
+ exp_avgs: List[Tensor],
+ exp_avg_sqs: List[Tensor],
+ state_steps: List[int],
+ *,
+ eps: float,
+ beta1: float,
+ beta2: float,
+ lr: float,
+ maximize: bool,
+):
r"""Functional API that performs Sparse Adam algorithm computation.
See :class:`~torch.optim.SparseAdam` for details.
@@ -49,7 +52,6 @@ def sparse_adam(params: List[Tensor],
exp_avg_sq = exp_avg_sqs[i]
step = state_steps[i]
-
def make_sparse(values):
constructor = grad.new
if grad_indices.dim() == 0 or values.dim() == 0:
@@ -63,7 +65,9 @@ def sparse_adam(params: List[Tensor],
exp_avg_update_values = grad_values.sub(old_exp_avg_values).mul_(1 - beta1)
exp_avg.add_(make_sparse(exp_avg_update_values))
old_exp_avg_sq_values = exp_avg_sq.sparse_mask(grad)._values()
- exp_avg_sq_update_values = grad_values.pow(2).sub_(old_exp_avg_sq_values).mul_(1 - beta2)
+ exp_avg_sq_update_values = (
+ grad_values.pow(2).sub_(old_exp_avg_sq_values).mul_(1 - beta2)
+ )
exp_avg_sq.add_(make_sparse(exp_avg_sq_update_values))
# Dense addition again is intended, avoiding another sparse_mask
@@ -72,8 +76,8 @@ def sparse_adam(params: List[Tensor],
denom = exp_avg_sq_update_values.sqrt_().add_(eps)
del exp_avg_update_values, exp_avg_sq_update_values
- bias_correction1 = 1 - beta1 ** step
- bias_correction2 = 1 - beta2 ** step
+ bias_correction1 = 1 - beta1**step
+ bias_correction2 = 1 - beta2**step
step_size = lr * math.sqrt(bias_correction2) / bias_correction1
param.add_(make_sparse(-step_size * numer.div_(denom)))
diff --git a/torch/optim/_multi_tensor/__init__.py b/torch/optim/_multi_tensor/__init__.py
index 32ea419566..8918197447 100644
--- a/torch/optim/_multi_tensor/__init__.py
+++ b/torch/optim/_multi_tensor/__init__.py
@@ -5,10 +5,11 @@ enough, so that more sophisticated ones can be also easily integrated in the
future.
"""
from functools import partialmethod
+
from torch import optim
-def partialclass(cls, *args, **kwargs):
+def partialclass(cls, *args, **kwargs):
class NewCls(cls):
__init__ = partialmethod(cls.__init__, *args, **kwargs)
diff --git a/torch/optim/adadelta.py b/torch/optim/adadelta.py
index 2f53aa78f5..4061a6b68f 100644
--- a/torch/optim/adadelta.py
+++ b/torch/optim/adadelta.py
@@ -1,9 +1,19 @@
+from typing import List, Optional
+
import torch
from torch import Tensor
-from .optimizer import (Optimizer, _use_grad_for_differentiable, _default_to_fused_or_foreach,
- _differentiable_doc, _foreach_doc, _maximize_doc, _capturable_doc, _view_as_real, _get_scalar_dtype)
-from typing import List, Optional
+from .optimizer import (
+ _capturable_doc,
+ _default_to_fused_or_foreach,
+ _differentiable_doc,
+ _foreach_doc,
+ _get_scalar_dtype,
+ _maximize_doc,
+ _use_grad_for_differentiable,
+ _view_as_real,
+ Optimizer,
+)
__all__ = ["Adadelta", "adadelta"]
@@ -52,13 +62,19 @@ class Adadelta(Optimizer):
group.setdefault("capturable", False)
for p in group["params"]:
p_state = self.state.get(p, [])
- if len(p_state) != 0 and not torch.is_tensor(p_state['step']):
+ if len(p_state) != 0 and not torch.is_tensor(p_state["step"]):
step_val = float(p_state["step"])
- p_state["step"] = (torch.tensor(step_val, dtype=_get_scalar_dtype(), device=p.device)
- if group['capturable']
- else torch.tensor(step_val, dtype=_get_scalar_dtype()))
-
- def _init_group(self, group, params_with_grad, grads, square_avgs, acc_deltas, state_steps):
+ p_state["step"] = (
+ torch.tensor(
+ step_val, dtype=_get_scalar_dtype(), device=p.device
+ )
+ if group["capturable"]
+ else torch.tensor(step_val, dtype=_get_scalar_dtype())
+ )
+
+ def _init_group(
+ self, group, params_with_grad, grads, square_avgs, acc_deltas, state_steps
+ ):
has_complex = False
for p in group["params"]:
if p.grad is None:
@@ -73,8 +89,11 @@ class Adadelta(Optimizer):
# Lazy state initialization
if len(state) == 0:
- state["step"] = (torch.zeros((), dtype=_get_scalar_dtype(), device=p.device)
- if group["capturable"] else torch.zeros((), dtype=_get_scalar_dtype()))
+ state["step"] = (
+ torch.zeros((), dtype=_get_scalar_dtype(), device=p.device)
+ if group["capturable"]
+ else torch.zeros((), dtype=_get_scalar_dtype())
+ )
state["square_avg"] = torch.zeros_like(
p, memory_format=torch.preserve_format
@@ -110,7 +129,16 @@ class Adadelta(Optimizer):
square_avgs = []
acc_deltas = []
state_steps = []
- lr, rho, eps, weight_decay, foreach, maximize, differentiable, capturable = (
+ (
+ lr,
+ rho,
+ eps,
+ weight_decay,
+ foreach,
+ maximize,
+ differentiable,
+ capturable,
+ ) = (
group["lr"],
group["rho"],
group["eps"],
@@ -118,10 +146,12 @@ class Adadelta(Optimizer):
group["foreach"],
group["maximize"],
group["differentiable"],
- group["capturable"]
+ group["capturable"],
)
- has_complex = self._init_group(group, params_with_grad, grads, square_avgs, acc_deltas, state_steps)
+ has_complex = self._init_group(
+ group, params_with_grad, grads, square_avgs, acc_deltas, state_steps
+ )
adadelta(
params_with_grad,
@@ -143,7 +173,8 @@ class Adadelta(Optimizer):
return loss
-Adadelta.__doc__ = r"""Implements Adadelta algorithm.
+Adadelta.__doc__ = (
+ r"""Implements Adadelta algorithm.
.. math::
\begin{aligned}
@@ -170,7 +201,8 @@ Adadelta.__doc__ = r"""Implements Adadelta algorithm.
\end{aligned}
For further details regarding the algorithm we refer to `ADADELTA: An Adaptive Learning Rate Method`_.
- """ + fr"""
+ """
+ + rf"""
Args:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
@@ -192,6 +224,7 @@ Adadelta.__doc__ = r"""Implements Adadelta algorithm.
https://arxiv.org/abs/1212.5701
"""
+)
def adadelta(
@@ -220,12 +253,18 @@ def adadelta(
# this check is slow during compilation, so we skip it
# if it's strictly needed we can add this check back in dynamo
- if not torch._utils.is_compiling() and not all(isinstance(t, torch.Tensor) for t in state_steps):
- raise RuntimeError("API has changed, `state_steps` argument must contain a list of singleton tensors")
+ if not torch._utils.is_compiling() and not all(
+ isinstance(t, torch.Tensor) for t in state_steps
+ ):
+ raise RuntimeError(
+ "API has changed, `state_steps` argument must contain a list of singleton tensors"
+ )
# We still respect when the user inputs False for foreach.
if foreach is None:
- _, foreach = _default_to_fused_or_foreach(params, differentiable, use_fused=False)
+ _, foreach = _default_to_fused_or_foreach(
+ params, differentiable, use_fused=False
+ )
if foreach and torch.jit.is_scripting():
raise RuntimeError("torch.jit.script not supported with foreach optimizers")
@@ -270,10 +309,11 @@ def _single_tensor_adadelta(
):
# If compiling, the compiler will handle cudagraph checks, see note [torch.compile x capturable]
if not torch._utils.is_compiling() and capturable:
- assert all(p.is_cuda and step.is_cuda for p, step in zip(params, state_steps)), \
- "If capturable=True, params and state_steps must be CUDA tensors."
+ assert all(
+ p.is_cuda and step.is_cuda for p, step in zip(params, state_steps)
+ ), "If capturable=True, params and state_steps must be CUDA tensors."
- for (param, grad, square_avg, acc_delta, step) in zip(
+ for param, grad, square_avg, acc_delta, step in zip(
params, grads, square_avgs, acc_deltas, state_steps
):
step += 1
@@ -316,28 +356,40 @@ def _multi_tensor_adadelta(
capturable: bool,
has_complex: bool,
):
-
assert not differentiable, "_foreach ops don't support autograd"
# If compiling, the compiler will handle cudagraph checks, see note [torch.compile x capturable]
if not torch._utils.is_compiling() and capturable:
- assert all(p.is_cuda and step.is_cuda for p, step in zip(params, state_steps)), \
- "If capturable=True, params and state_steps must be CUDA tensors."
+ assert all(
+ p.is_cuda and step.is_cuda for p, step in zip(params, state_steps)
+ ), "If capturable=True, params and state_steps must be CUDA tensors."
if len(params) == 0:
return
- grouped_tensors = Optimizer._group_tensors_by_device_and_dtype([params, grads, square_avgs, acc_deltas, state_steps])
- for ((device_params, device_grads, device_square_avgs, device_acc_deltas, device_state_steps), _) in grouped_tensors.values():
+ grouped_tensors = Optimizer._group_tensors_by_device_and_dtype(
+ [params, grads, square_avgs, acc_deltas, state_steps]
+ )
+ for (
+ device_params,
+ device_grads,
+ device_square_avgs,
+ device_acc_deltas,
+ device_state_steps,
+ ), _ in grouped_tensors.values():
if has_complex:
- _view_as_real(device_params, device_grads, device_square_avgs, device_acc_deltas)
+ _view_as_real(
+ device_params, device_grads, device_square_avgs, device_acc_deltas
+ )
# Update steps
# If steps are on CPU, foreach will fall back to the slow path, which is a for-loop calling t.add(1) over
# and over. 1 will then be wrapped into a Tensor over and over again, which is slower than if we just
# wrapped it once now. The alpha is required to assure we go to the right overload.
if device_state_steps[0].is_cpu:
- torch._foreach_add_(device_state_steps, torch.tensor(1.0, device='cpu'), alpha=1.0)
+ torch._foreach_add_(
+ device_state_steps, torch.tensor(1.0, device="cpu"), alpha=1.0
+ )
else:
torch._foreach_add_(device_state_steps, 1)
@@ -349,10 +401,14 @@ def _multi_tensor_adadelta(
if maximize:
torch._foreach_add_(device_grads, device_params, alpha=weight_decay)
else:
- device_grads = torch._foreach_add(device_grads, device_params, alpha=weight_decay)
+ device_grads = torch._foreach_add(
+ device_grads, device_params, alpha=weight_decay
+ )
torch._foreach_mul_(device_square_avgs, rho)
- torch._foreach_addcmul_(device_square_avgs, device_grads, device_grads, value=1 - rho)
+ torch._foreach_addcmul_(
+ device_square_avgs, device_grads, device_grads, value=1 - rho
+ )
std = torch._foreach_add(device_square_avgs, eps)
torch._foreach_sqrt_(std)
diff --git a/torch/optim/adagrad.py b/torch/optim/adagrad.py
index e839142a8f..e1d0422b4d 100644
--- a/torch/optim/adagrad.py
+++ b/torch/optim/adagrad.py
@@ -1,10 +1,19 @@
+from typing import List, Optional
+
import torch
from torch import Tensor
-from .optimizer import (Optimizer, _use_grad_for_differentiable, _get_value, _view_as_real,
- _default_to_fused_or_foreach, _get_scalar_dtype, _differentiable_doc,
- _foreach_doc, _maximize_doc)
-from typing import List, Optional
+from .optimizer import (
+ _default_to_fused_or_foreach,
+ _differentiable_doc,
+ _foreach_doc,
+ _get_scalar_dtype,
+ _get_value,
+ _maximize_doc,
+ _use_grad_for_differentiable,
+ _view_as_real,
+ Optimizer,
+)
__all__ = ["Adagrad", "adagrad"]
@@ -116,7 +125,9 @@ class Adagrad(Optimizer):
state_sums = []
state_steps = []
- has_sparse_grad, has_complex = self._init_group(group, params_with_grad, grads, state_sums, state_steps)
+ has_sparse_grad, has_complex = self._init_group(
+ group, params_with_grad, grads, state_sums, state_steps
+ )
adagrad(
params_with_grad,
@@ -137,7 +148,8 @@ class Adagrad(Optimizer):
return loss
-Adagrad.__doc__ = r"""Implements Adagrad algorithm.
+Adagrad.__doc__ = (
+ r"""Implements Adagrad algorithm.
.. math::
\begin{aligned}
@@ -162,7 +174,8 @@ Adagrad.__doc__ = r"""Implements Adagrad algorithm.
For further details regarding the algorithm we refer to `Adaptive Subgradient Methods for Online Learning
and Stochastic Optimization`_.
- """ + fr"""
+ """
+ + rf"""
Args:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
@@ -179,6 +192,7 @@ Adagrad.__doc__ = r"""Implements Adagrad algorithm.
Optimization: http://jmlr.org/papers/v12/duchi11a.html
"""
+)
def adagrad(
@@ -209,7 +223,9 @@ def adagrad(
)
if foreach is None:
- _, foreach = _default_to_fused_or_foreach(params, differentiable, use_fused=False)
+ _, foreach = _default_to_fused_or_foreach(
+ params, differentiable, use_fused=False
+ )
if foreach and torch.jit.is_scripting():
raise RuntimeError("torch.jit.script not supported with foreach optimizers")
@@ -257,8 +273,7 @@ def _single_tensor_adagrad(
differentiable: bool,
has_complex: bool,
):
-
- for (param, grad, state_sum, step_t) in zip(params, grads, state_sums, state_steps):
+ for param, grad, state_sum, step_t in zip(params, grads, state_sums, state_steps):
# update step
step_t += 1
step = _get_value(step_t)
@@ -316,16 +331,24 @@ def _multi_tensor_adagrad(
differentiable: bool,
has_complex: bool,
):
-
assert not differentiable, "_foreach ops don't support autograd"
# Foreach functions will throw errors if given empty lists
if len(params) == 0:
return
- grouped_tensorlists = Optimizer._group_tensors_by_device_and_dtype([params, grads, state_sums, state_steps])
- for ((device_params, device_grads, device_state_sums, device_state_steps), _) in grouped_tensorlists.values():
- device_has_sparse_grad = has_sparse_grad and any(grad.is_sparse for grad in device_grads)
+ grouped_tensorlists = Optimizer._group_tensors_by_device_and_dtype(
+ [params, grads, state_sums, state_steps]
+ )
+ for (
+ device_params,
+ device_grads,
+ device_state_sums,
+ device_state_steps,
+ ), _ in grouped_tensorlists.values():
+ device_has_sparse_grad = has_sparse_grad and any(
+ grad.is_sparse for grad in device_grads
+ )
if device_has_sparse_grad:
_single_tensor_adagrad(
@@ -356,7 +379,9 @@ def _multi_tensor_adagrad(
# and over. 1 will then be wrapped into a Tensor over and over again, which is slower than if we just
# wrapped it once now. The alpha is required to assure we go to the right overload.
if device_state_steps[0].is_cpu:
- torch._foreach_add_(device_state_steps, torch.tensor(1.0, device='cpu'), alpha=1.0)
+ torch._foreach_add_(
+ device_state_steps, torch.tensor(1.0, device="cpu"), alpha=1.0
+ )
else:
torch._foreach_add_(device_state_steps, 1)
@@ -365,9 +390,13 @@ def _multi_tensor_adagrad(
if maximize:
torch._foreach_add_(device_grads, device_params, alpha=weight_decay)
else:
- device_grads = torch._foreach_add(device_grads, device_params, alpha=weight_decay)
+ device_grads = torch._foreach_add(
+ device_grads, device_params, alpha=weight_decay
+ )
- minus_clr = [-lr / (1 + (_get_value(step) - 1) * lr_decay) for step in device_state_steps]
+ minus_clr = [
+ -lr / (1 + (_get_value(step) - 1) * lr_decay) for step in device_state_steps
+ ]
torch._foreach_addcmul_(device_state_sums, device_grads, device_grads, value=1)
diff --git a/torch/optim/adam.py b/torch/optim/adam.py
index 386bbc2705..cd45a197b3 100644
--- a/torch/optim/adam.py
+++ b/torch/optim/adam.py
@@ -1,34 +1,50 @@
-from typing import List, Optional, Union, Tuple
+from typing import List, Optional, Tuple, Union
import torch
from torch import Tensor
-from .optimizer import (Optimizer, ParamsT, _use_grad_for_differentiable, _get_value,
- _stack_if_compiling, _dispatch_sqrt, _default_to_fused_or_foreach,
- _get_scalar_dtype, _capturable_doc, _differentiable_doc, _foreach_doc,
- _fused_doc, _maximize_doc, _view_as_real)
from torch.utils._foreach_utils import _get_fused_kernels_supported_devices
-
-__all__ = ['Adam', 'adam']
+from .optimizer import (
+ _capturable_doc,
+ _default_to_fused_or_foreach,
+ _differentiable_doc,
+ _dispatch_sqrt,
+ _foreach_doc,
+ _fused_doc,
+ _get_scalar_dtype,
+ _get_value,
+ _maximize_doc,
+ _stack_if_compiling,
+ _use_grad_for_differentiable,
+ _view_as_real,
+ Optimizer,
+ ParamsT,
+)
+
+__all__ = ["Adam", "adam"]
class Adam(Optimizer):
- def __init__(self,
- params: ParamsT,
- lr: Union[float, Tensor] = 1e-3,
- betas: Tuple[float, float] = (0.9, 0.999),
- eps: float = 1e-8,
- weight_decay: float = 0,
- amsgrad: bool = False,
- *,
- foreach: Optional[bool] = None,
- maximize: bool = False,
- capturable: bool = False,
- differentiable: bool = False,
- fused: Optional[bool] = None):
+ def __init__(
+ self,
+ params: ParamsT,
+ lr: Union[float, Tensor] = 1e-3,
+ betas: Tuple[float, float] = (0.9, 0.999),
+ eps: float = 1e-8,
+ weight_decay: float = 0,
+ amsgrad: bool = False,
+ *,
+ foreach: Optional[bool] = None,
+ maximize: bool = False,
+ capturable: bool = False,
+ differentiable: bool = False,
+ fused: Optional[bool] = None,
+ ):
if not 0.0 <= lr:
raise ValueError(f"Invalid learning rate: {lr}")
if isinstance(lr, Tensor) and foreach and not capturable:
- raise ValueError("lr as a Tensor is not supported for capturable=False and foreach=True")
+ raise ValueError(
+ "lr as a Tensor is not supported for capturable=False and foreach=True"
+ )
if not 0.0 <= eps:
raise ValueError(f"Invalid epsilon value: {eps}")
if not 0.0 <= betas[0] < 1.0:
@@ -38,10 +54,18 @@ class Adam(Optimizer):
if not 0.0 <= weight_decay:
raise ValueError(f"Invalid weight_decay value: {weight_decay}")
- defaults = dict(lr=lr, betas=betas, eps=eps,
- weight_decay=weight_decay, amsgrad=amsgrad,
- maximize=maximize, foreach=foreach, capturable=capturable,
- differentiable=differentiable, fused=fused)
+ defaults = dict(
+ lr=lr,
+ betas=betas,
+ eps=eps,
+ weight_decay=weight_decay,
+ amsgrad=amsgrad,
+ maximize=maximize,
+ foreach=foreach,
+ capturable=capturable,
+ differentiable=differentiable,
+ fused=fused,
+ )
super().__init__(params, defaults)
if fused:
@@ -54,30 +78,39 @@ class Adam(Optimizer):
# alleviate the loss of information.
fused_supported_devices = _get_fused_kernels_supported_devices()
if not all(
- p.device.type in fused_supported_devices and
- torch.is_floating_point(p) for pg in self.param_groups for p in pg['params']
+ p.device.type in fused_supported_devices and torch.is_floating_point(p)
+ for pg in self.param_groups
+ for p in pg["params"]
):
- raise RuntimeError("`fused=True` requires all the params to be floating point Tensors of "
- f"supported devices: {fused_supported_devices}.")
+ raise RuntimeError(
+ "`fused=True` requires all the params to be floating point Tensors of "
+ f"supported devices: {fused_supported_devices}."
+ )
if foreach:
raise RuntimeError("`fused` and `foreach` cannot be `True` together.")
def __setstate__(self, state):
super().__setstate__(state)
for group in self.param_groups:
- group.setdefault('amsgrad', False)
- group.setdefault('maximize', False)
- group.setdefault('foreach', None)
- group.setdefault('capturable', False)
- group.setdefault('differentiable', False)
- fused = group.setdefault('fused', None)
+ group.setdefault("amsgrad", False)
+ group.setdefault("maximize", False)
+ group.setdefault("foreach", None)
+ group.setdefault("capturable", False)
+ group.setdefault("differentiable", False)
+ fused = group.setdefault("fused", None)
for p in group["params"]:
p_state = self.state.get(p, [])
- if len(p_state) != 0 and not torch.is_tensor(p_state['step']):
+ if len(p_state) != 0 and not torch.is_tensor(p_state["step"]):
step_val = float(p_state["step"])
- p_state["step"] = (torch.tensor(step_val, dtype=_get_scalar_dtype(is_fused=fused), device=p.device)
- if group['capturable'] or group['fused']
- else torch.tensor(step_val, dtype=_get_scalar_dtype()))
+ p_state["step"] = (
+ torch.tensor(
+ step_val,
+ dtype=_get_scalar_dtype(is_fused=fused),
+ device=p.device,
+ )
+ if group["capturable"] or group["fused"]
+ else torch.tensor(step_val, dtype=_get_scalar_dtype())
+ )
def _init_group(
self,
@@ -87,15 +120,17 @@ class Adam(Optimizer):
exp_avgs,
exp_avg_sqs,
max_exp_avg_sqs,
- state_steps
+ state_steps,
):
has_complex = False
- for p in group['params']:
+ for p in group["params"]:
if p.grad is not None:
has_complex |= torch.is_complex(p)
params_with_grad.append(p)
if p.grad.is_sparse:
- raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')
+ raise RuntimeError(
+ "Adam does not support sparse gradients, please consider SparseAdam instead"
+ )
grads.append(p.grad)
state = self.state[p]
@@ -104,32 +139,50 @@ class Adam(Optimizer):
# note(crcrpar): [special device hosting for step]
# Deliberately host `step` on CPU if both capturable and fused are off.
# This is because kernel launches are costly on CUDA and XLA.
- state['step'] = (
- torch.zeros((), dtype=_get_scalar_dtype(is_fused=group['fused']), device=p.device)
- if group['capturable'] or group['fused']
+ state["step"] = (
+ torch.zeros(
+ (),
+ dtype=_get_scalar_dtype(is_fused=group["fused"]),
+ device=p.device,
+ )
+ if group["capturable"] or group["fused"]
else torch.tensor(0.0, dtype=_get_scalar_dtype())
)
# Exponential moving average of gradient values
- state['exp_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format)
+ state["exp_avg"] = torch.zeros_like(
+ p, memory_format=torch.preserve_format
+ )
# Exponential moving average of squared gradient values
- state['exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format)
- if group['amsgrad']:
+ state["exp_avg_sq"] = torch.zeros_like(
+ p, memory_format=torch.preserve_format
+ )
+ if group["amsgrad"]:
# Maintains max of all exp. moving avg. of sq. grad. values
- state['max_exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format)
-
- exp_avgs.append(state['exp_avg'])
- exp_avg_sqs.append(state['exp_avg_sq'])
-
- if group['amsgrad']:
- max_exp_avg_sqs.append(state['max_exp_avg_sq'])
- if group['differentiable'] and state['step'].requires_grad:
- raise RuntimeError('`requires_grad` is not supported for `step` in differentiable mode')
+ state["max_exp_avg_sq"] = torch.zeros_like(
+ p, memory_format=torch.preserve_format
+ )
+
+ exp_avgs.append(state["exp_avg"])
+ exp_avg_sqs.append(state["exp_avg_sq"])
+
+ if group["amsgrad"]:
+ max_exp_avg_sqs.append(state["max_exp_avg_sq"])
+ if group["differentiable"] and state["step"].requires_grad:
+ raise RuntimeError(
+ "`requires_grad` is not supported for `step` in differentiable mode"
+ )
# Foreach without capturable does not support a tensor lr
- if group['foreach'] and torch.is_tensor(group['lr']) and not group['capturable']:
- raise RuntimeError('lr as a Tensor is not supported for capturable=False and foreach=True')
+ if (
+ group["foreach"]
+ and torch.is_tensor(group["lr"])
+ and not group["capturable"]
+ ):
+ raise RuntimeError(
+ "lr as a Tensor is not supported for capturable=False and foreach=True"
+ )
- state_steps.append(state['step'])
+ state_steps.append(state["step"])
return has_complex
@_use_grad_for_differentiable
@@ -154,7 +207,7 @@ class Adam(Optimizer):
exp_avg_sqs = []
max_exp_avg_sqs = []
state_steps = []
- beta1, beta2 = group['betas']
+ beta1, beta2 = group["betas"]
has_complex = self._init_group(
group,
@@ -163,7 +216,8 @@ class Adam(Optimizer):
exp_avgs,
exp_avg_sqs,
max_exp_avg_sqs,
- state_steps)
+ state_steps,
+ )
adam(
params_with_grad,
@@ -172,18 +226,18 @@ class Adam(Optimizer):
exp_avg_sqs,
max_exp_avg_sqs,
state_steps,
- amsgrad=group['amsgrad'],
+ amsgrad=group["amsgrad"],
has_complex=has_complex,
beta1=beta1,
beta2=beta2,
- lr=group['lr'],
- weight_decay=group['weight_decay'],
- eps=group['eps'],
- maximize=group['maximize'],
- foreach=group['foreach'],
- capturable=group['capturable'],
- differentiable=group['differentiable'],
- fused=group['fused'],
+ lr=group["lr"],
+ weight_decay=group["weight_decay"],
+ eps=group["eps"],
+ maximize=group["maximize"],
+ foreach=group["foreach"],
+ capturable=group["capturable"],
+ differentiable=group["differentiable"],
+ fused=group["fused"],
grad_scale=getattr(self, "grad_scale", None),
found_inf=getattr(self, "found_inf", None),
)
@@ -191,7 +245,8 @@ class Adam(Optimizer):
return loss
-Adam.__doc__ = r"""Implements Adam algorithm.
+Adam.__doc__ = (
+ r"""Implements Adam algorithm.
.. math::
\begin{aligned}
@@ -229,7 +284,8 @@ Adam.__doc__ = r"""Implements Adam algorithm.
\end{aligned}
For further details regarding the algorithm we refer to `Adam: A Method for Stochastic Optimization`_.
- """ + fr"""
+ """
+ + rf"""
Args:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
@@ -255,31 +311,34 @@ Adam.__doc__ = r"""Implements Adam algorithm.
https://openreview.net/forum?id=ryQu7f-RZ
"""
+)
-def adam(params: List[Tensor],
- grads: List[Tensor],
- exp_avgs: List[Tensor],
- exp_avg_sqs: List[Tensor],
- max_exp_avg_sqs: List[Tensor],
- state_steps: List[Tensor],
- # kwonly args with defaults are not supported by functions compiled with torchscript issue #70627
- # setting this as kwarg for now as functional API is compiled by torch/distributed/optim
- foreach: Optional[bool] = None,
- capturable: bool = False,
- differentiable: bool = False,
- fused: Optional[bool] = None,
- grad_scale: Optional[Tensor] = None,
- found_inf: Optional[Tensor] = None,
- has_complex: bool = False,
- *,
- amsgrad: bool,
- beta1: float,
- beta2: float,
- lr: Union[float, Tensor],
- weight_decay: float,
- eps: float,
- maximize: bool):
+def adam(
+ params: List[Tensor],
+ grads: List[Tensor],
+ exp_avgs: List[Tensor],
+ exp_avg_sqs: List[Tensor],
+ max_exp_avg_sqs: List[Tensor],
+ state_steps: List[Tensor],
+ # kwonly args with defaults are not supported by functions compiled with torchscript issue #70627
+ # setting this as kwarg for now as functional API is compiled by torch/distributed/optim
+ foreach: Optional[bool] = None,
+ capturable: bool = False,
+ differentiable: bool = False,
+ fused: Optional[bool] = None,
+ grad_scale: Optional[Tensor] = None,
+ found_inf: Optional[Tensor] = None,
+ has_complex: bool = False,
+ *,
+ amsgrad: bool,
+ beta1: float,
+ beta2: float,
+ lr: Union[float, Tensor],
+ weight_decay: float,
+ eps: float,
+ maximize: bool,
+):
r"""Functional API that performs Adam algorithm computation.
See :class:`~torch.optim.Adam` for details.
@@ -289,7 +348,9 @@ def adam(params: List[Tensor],
# and pass False to use_fused. This is not a mistake--we want to give the fused impl
# bake-in time before making it the default, even if it is typically faster.
if fused is None and foreach is None:
- _, foreach = _default_to_fused_or_foreach(params, differentiable, use_fused=False)
+ _, foreach = _default_to_fused_or_foreach(
+ params, differentiable, use_fused=False
+ )
# Do not flip on foreach for the unsupported case where lr is a Tensor and capturable=False.
if foreach and isinstance(lr, Tensor) and not capturable:
foreach = False
@@ -300,11 +361,15 @@ def adam(params: List[Tensor],
# this check is slow during compilation, so we skip it
# if it's strictly needed we can add this check back in dynamo
- if not torch._utils.is_compiling() and not all(isinstance(t, torch.Tensor) for t in state_steps):
- raise RuntimeError("API has changed, `state_steps` argument must contain a list of singleton tensors")
+ if not torch._utils.is_compiling() and not all(
+ isinstance(t, torch.Tensor) for t in state_steps
+ ):
+ raise RuntimeError(
+ "API has changed, `state_steps` argument must contain a list of singleton tensors"
+ )
if foreach and torch.jit.is_scripting():
- raise RuntimeError('torch.jit.script not supported with foreach optimizers')
+ raise RuntimeError("torch.jit.script not supported with foreach optimizers")
if fused and torch.jit.is_scripting():
raise RuntimeError("torch.jit.script not supported with fused optimizers")
@@ -315,46 +380,49 @@ def adam(params: List[Tensor],
else:
func = _single_tensor_adam
- func(params,
- grads,
- exp_avgs,
- exp_avg_sqs,
- max_exp_avg_sqs,
- state_steps,
- amsgrad=amsgrad,
- has_complex=has_complex,
- beta1=beta1,
- beta2=beta2,
- lr=lr,
- weight_decay=weight_decay,
- eps=eps,
- maximize=maximize,
- capturable=capturable,
- differentiable=differentiable,
- grad_scale=grad_scale,
- found_inf=found_inf)
-
-
-def _single_tensor_adam(params: List[Tensor],
- grads: List[Tensor],
- exp_avgs: List[Tensor],
- exp_avg_sqs: List[Tensor],
- max_exp_avg_sqs: List[Tensor],
- state_steps: List[Tensor],
- grad_scale: Optional[Tensor],
- found_inf: Optional[Tensor],
- *,
- amsgrad: bool,
- has_complex: bool,
- beta1: float,
- beta2: float,
- lr: Union[float, Tensor],
- weight_decay: float,
- eps: float,
- maximize: bool,
- capturable: bool,
- differentiable: bool):
-
+ func(
+ params,
+ grads,
+ exp_avgs,
+ exp_avg_sqs,
+ max_exp_avg_sqs,
+ state_steps,
+ amsgrad=amsgrad,
+ has_complex=has_complex,
+ beta1=beta1,
+ beta2=beta2,
+ lr=lr,
+ weight_decay=weight_decay,
+ eps=eps,
+ maximize=maximize,
+ capturable=capturable,
+ differentiable=differentiable,
+ grad_scale=grad_scale,
+ found_inf=found_inf,
+ )
+
+
+def _single_tensor_adam(
+ params: List[Tensor],
+ grads: List[Tensor],
+ exp_avgs: List[Tensor],
+ exp_avg_sqs: List[Tensor],
+ max_exp_avg_sqs: List[Tensor],
+ state_steps: List[Tensor],
+ grad_scale: Optional[Tensor],
+ found_inf: Optional[Tensor],
+ *,
+ amsgrad: bool,
+ has_complex: bool,
+ beta1: float,
+ beta2: float,
+ lr: Union[float, Tensor],
+ weight_decay: float,
+ eps: float,
+ maximize: bool,
+ capturable: bool,
+ differentiable: bool,
+):
assert grad_scale is None and found_inf is None
if torch.jit.is_scripting():
@@ -371,8 +439,8 @@ def _single_tensor_adam(params: List[Tensor],
# If compiling, the compiler will handle cudagraph checks, see note [torch.compile x capturable]
if not torch._utils.is_compiling() and capturable:
- assert (
- (param.is_cuda and step_t.is_cuda) or (param.is_xla and step_t.is_xla)
+ assert (param.is_cuda and step_t.is_cuda) or (
+ param.is_xla and step_t.is_xla
), "If capturable=True, params and state_steps must be CUDA or XLA tensors."
# update step
@@ -396,8 +464,8 @@ def _single_tensor_adam(params: List[Tensor],
if capturable or differentiable:
step = step_t
- bias_correction1 = 1 - beta1 ** step
- bias_correction2 = 1 - beta2 ** step
+ bias_correction1 = 1 - beta1**step
+ bias_correction2 = 1 - beta2**step
step_size = lr / bias_correction1
step_size_neg = step_size.neg()
@@ -416,16 +484,20 @@ def _single_tensor_adam(params: List[Tensor],
# Uses the max. for normalizing running avg. of gradient
# Folds in (admittedly ugly) 1-elem step_size math here to avoid extra param-set-sized read+write
# (can't fold it into addcdiv_ below because addcdiv_ requires value is a Number, not a Tensor)
- denom = (max_exp_avg_sqs[i].sqrt() / (bias_correction2_sqrt * step_size_neg)).add_(eps / step_size_neg)
+ denom = (
+ max_exp_avg_sqs[i].sqrt() / (bias_correction2_sqrt * step_size_neg)
+ ).add_(eps / step_size_neg)
else:
- denom = (exp_avg_sq.sqrt() / (bias_correction2_sqrt * step_size_neg)).add_(eps / step_size_neg)
+ denom = (
+ exp_avg_sq.sqrt() / (bias_correction2_sqrt * step_size_neg)
+ ).add_(eps / step_size_neg)
param.addcdiv_(exp_avg, denom)
else:
step = _get_value(step_t)
- bias_correction1 = 1 - beta1 ** step
- bias_correction2 = 1 - beta2 ** step
+ bias_correction1 = 1 - beta1**step
+ bias_correction2 = 1 - beta2**step
step_size = lr / bias_correction1
@@ -447,57 +519,70 @@ def _single_tensor_adam(params: List[Tensor],
max_exp_avg_sqs[i] = torch.view_as_complex(max_exp_avg_sqs[i])
-def _multi_tensor_adam(params: List[Tensor],
- grads: List[Tensor],
- exp_avgs: List[Tensor],
- exp_avg_sqs: List[Tensor],
- max_exp_avg_sqs: List[Tensor],
- state_steps: List[Tensor],
- grad_scale: Optional[Tensor],
- found_inf: Optional[Tensor],
- *,
- amsgrad: bool,
- has_complex: bool,
- beta1: float,
- beta2: float,
- lr: Union[float, Tensor],
- weight_decay: float,
- eps: float,
- maximize: bool,
- capturable: bool,
- differentiable: bool):
+def _multi_tensor_adam(
+ params: List[Tensor],
+ grads: List[Tensor],
+ exp_avgs: List[Tensor],
+ exp_avg_sqs: List[Tensor],
+ max_exp_avg_sqs: List[Tensor],
+ state_steps: List[Tensor],
+ grad_scale: Optional[Tensor],
+ found_inf: Optional[Tensor],
+ *,
+ amsgrad: bool,
+ has_complex: bool,
+ beta1: float,
+ beta2: float,
+ lr: Union[float, Tensor],
+ weight_decay: float,
+ eps: float,
+ maximize: bool,
+ capturable: bool,
+ differentiable: bool,
+):
if len(params) == 0:
return
if isinstance(lr, Tensor) and not capturable:
- raise RuntimeError("lr as a Tensor is not supported for capturable=False and foreach=True")
+ raise RuntimeError(
+ "lr as a Tensor is not supported for capturable=False and foreach=True"
+ )
# If compiling, the compiler will handle cudagraph checks, see note [torch.compile x capturable]
if not torch._utils.is_compiling() and capturable:
- assert all(p.is_cuda and step.is_cuda for p, step in zip(params, state_steps)), \
- "If capturable=True, params and state_steps must be CUDA tensors."
+ assert all(
+ p.is_cuda and step.is_cuda for p, step in zip(params, state_steps)
+ ), "If capturable=True, params and state_steps must be CUDA tensors."
assert grad_scale is None and found_inf is None
assert not differentiable, "_foreach ops don't support autograd"
grouped_tensors = Optimizer._group_tensors_by_device_and_dtype(
- [params, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps])
- for ((
+ [params, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps]
+ )
+ for (
device_params,
device_grads,
device_exp_avgs,
device_exp_avg_sqs,
device_max_exp_avg_sqs,
device_state_steps,
- ), _) in grouped_tensors.values():
-
+ ), _ in grouped_tensors.values():
# Handle complex parameters
if has_complex:
if amsgrad:
- _view_as_real(device_params, device_grads, device_exp_avgs, device_exp_avg_sqs, device_max_exp_avg_sqs)
+ _view_as_real(
+ device_params,
+ device_grads,
+ device_exp_avgs,
+ device_exp_avg_sqs,
+ device_max_exp_avg_sqs,
+ )
else:
- _view_as_real(device_params, device_grads, device_exp_avgs, device_exp_avg_sqs)
+ _view_as_real(
+ device_params, device_grads, device_exp_avgs, device_exp_avg_sqs
+ )
if maximize:
device_grads = torch._foreach_neg(device_grads)
@@ -507,7 +592,9 @@ def _multi_tensor_adam(params: List[Tensor],
# and over. 1 will then be wrapped into a Tensor over and over again, which is slower than if we just
# wrapped it once now. The alpha is required to assure we go to the right overload.
if device_state_steps[0].is_cpu:
- torch._foreach_add_(device_state_steps, torch.tensor(1.0, device='cpu'), alpha=1.0)
+ torch._foreach_add_(
+ device_state_steps, torch.tensor(1.0, device="cpu"), alpha=1.0
+ )
else:
torch._foreach_add_(device_state_steps, 1)
@@ -516,13 +603,17 @@ def _multi_tensor_adam(params: List[Tensor],
if maximize:
torch._foreach_add_(device_grads, device_params, alpha=weight_decay)
else:
- device_grads = torch._foreach_add(device_grads, device_params, alpha=weight_decay)
+ device_grads = torch._foreach_add(
+ device_grads, device_params, alpha=weight_decay
+ )
# Decay the first and second moment running average coefficient
torch._foreach_lerp_(device_exp_avgs, device_grads, 1 - beta1)
torch._foreach_mul_(device_exp_avg_sqs, beta2)
- torch._foreach_addcmul_(device_exp_avg_sqs, device_grads, device_grads, 1 - beta2)
+ torch._foreach_addcmul_(
+ device_exp_avg_sqs, device_grads, device_grads, 1 - beta2
+ )
# Delete the local intermediate since it won't be used anymore to save on peak memory
del device_grads
@@ -564,8 +655,12 @@ def _multi_tensor_adam(params: List[Tensor],
# at this point, exp_avg_sq_sqrt = - (1 - beta^t) * [sqrt(exp_avg_sq / (1 - beta2^t)) + eps] / lr
torch._foreach_addcdiv_(device_params, device_exp_avgs, exp_avg_sq_sqrt)
else:
- bias_correction1 = [1 - beta1 ** _get_value(step) for step in device_state_steps]
- bias_correction2 = [1 - beta2 ** _get_value(step) for step in device_state_steps]
+ bias_correction1 = [
+ 1 - beta1 ** _get_value(step) for step in device_state_steps
+ ]
+ bias_correction2 = [
+ 1 - beta2 ** _get_value(step) for step in device_state_steps
+ ]
step_size = _stack_if_compiling([(lr / bc) * -1 for bc in bias_correction1])
@@ -582,7 +677,9 @@ def _multi_tensor_adam(params: List[Tensor],
torch._foreach_div_(exp_avg_sq_sqrt, bias_correction2_sqrt)
torch._foreach_add_(exp_avg_sq_sqrt, eps)
- torch._foreach_addcdiv_(device_params, device_exp_avgs, exp_avg_sq_sqrt, step_size)
+ torch._foreach_addcdiv_(
+ device_params, device_exp_avgs, exp_avg_sq_sqrt, step_size
+ )
def _fused_adam(
@@ -611,21 +708,31 @@ def _fused_adam(
if differentiable:
raise RuntimeError("Adam with fused=True does not support differentiable=True")
- grad_scale_dict = {grad_scale.device: grad_scale} if grad_scale is not None else None
+ grad_scale_dict = (
+ {grad_scale.device: grad_scale} if grad_scale is not None else None
+ )
found_inf_dict = {found_inf.device: found_inf} if found_inf is not None else None
# We only shuffle around the lr when it is a Tensor and on CUDA, otherwise, we prefer
# treating it as a scalar.
- lr_dict = {lr.device: lr} if isinstance(lr, Tensor) and str(lr.device) != "cpu" else None
+ lr_dict = (
+ {lr.device: lr} if isinstance(lr, Tensor) and str(lr.device) != "cpu" else None
+ )
grouped_tensors = Optimizer._group_tensors_by_device_and_dtype(
- [params, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps])
- for (device, _), ((device_params,
- device_grads,
- device_exp_avgs,
- device_exp_avg_sqs,
- device_max_exp_avg_sqs,
- device_state_steps,), _) in grouped_tensors.items():
+ [params, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps]
+ )
+ for (device, _), (
+ (
+ device_params,
+ device_grads,
+ device_exp_avgs,
+ device_exp_avg_sqs,
+ device_max_exp_avg_sqs,
+ device_state_steps,
+ ),
+ _,
+ ) in grouped_tensors.items():
device_grad_scale, device_found_inf = None, None
if grad_scale is not None:
if device not in grad_scale_dict:
@@ -657,4 +764,6 @@ def _fused_adam(
found_inf=device_found_inf,
)
if device_found_inf is not None:
- torch._foreach_sub_(device_state_steps, [device_found_inf] * len(device_state_steps))
+ torch._foreach_sub_(
+ device_state_steps, [device_found_inf] * len(device_state_steps)
+ )
diff --git a/torch/optim/adamax.py b/torch/optim/adamax.py
index a5406ce8e9..2269c8484d 100644
--- a/torch/optim/adamax.py
+++ b/torch/optim/adamax.py
@@ -1,10 +1,20 @@
+from typing import List, Optional
+
import torch
from torch import Tensor
-from .optimizer import (Optimizer, _use_grad_for_differentiable, _get_value, _default_to_fused_or_foreach,
- _get_scalar_dtype, _differentiable_doc, _maximize_doc, _foreach_doc, _view_as_real,
- _capturable_doc)
-from typing import List, Optional
+from .optimizer import (
+ _capturable_doc,
+ _default_to_fused_or_foreach,
+ _differentiable_doc,
+ _foreach_doc,
+ _get_scalar_dtype,
+ _get_value,
+ _maximize_doc,
+ _use_grad_for_differentiable,
+ _view_as_real,
+ Optimizer,
+)
__all__ = ["Adamax", "adamax"]
@@ -55,12 +65,19 @@ class Adamax(Optimizer):
group.setdefault("capturable", False)
for p in group["params"]:
p_state = self.state.get(p, [])
- if len(p_state) != 0 and not torch.is_tensor(p_state['step']):
+ if len(p_state) != 0 and not torch.is_tensor(p_state["step"]):
step_val = float(p_state["step"])
- p_state["step"] = (torch.tensor(step_val, dtype=_get_scalar_dtype(), device=p.device) if group['capturable']
- else torch.tensor(step_val, dtype=_get_scalar_dtype()))
-
- def _init_group(self, group, params_with_grad, grads, exp_avgs, exp_infs, state_steps):
+ p_state["step"] = (
+ torch.tensor(
+ step_val, dtype=_get_scalar_dtype(), device=p.device
+ )
+ if group["capturable"]
+ else torch.tensor(step_val, dtype=_get_scalar_dtype())
+ )
+
+ def _init_group(
+ self, group, params_with_grad, grads, exp_avgs, exp_infs, state_steps
+ ):
has_complex = False
for p in group["params"]:
if p.grad is None:
@@ -75,8 +92,11 @@ class Adamax(Optimizer):
# State initialization
if len(state) == 0:
- state['step'] = (torch.zeros((), dtype=_get_scalar_dtype(), device=p.device)
- if group['capturable'] else torch.tensor(0.0, dtype=_get_scalar_dtype()))
+ state["step"] = (
+ torch.zeros((), dtype=_get_scalar_dtype(), device=p.device)
+ if group["capturable"]
+ else torch.tensor(0.0, dtype=_get_scalar_dtype())
+ )
state["exp_avg"] = torch.zeros_like(
p, memory_format=torch.preserve_format
)
@@ -121,7 +141,9 @@ class Adamax(Optimizer):
differentiable = group["differentiable"]
capturable = group["capturable"]
- has_complex = self._init_group(group, params_with_grad, grads, exp_avgs, exp_infs, state_steps)
+ has_complex = self._init_group(
+ group, params_with_grad, grads, exp_avgs, exp_infs, state_steps
+ )
adamax(
params_with_grad,
@@ -144,7 +166,8 @@ class Adamax(Optimizer):
return loss
-Adamax.__doc__ = r"""Implements Adamax algorithm (a variant of Adam based on infinity norm).
+Adamax.__doc__ = (
+ r"""Implements Adamax algorithm (a variant of Adam based on infinity norm).
.. math::
\begin{aligned}
@@ -169,7 +192,8 @@ Adamax.__doc__ = r"""Implements Adamax algorithm (a variant of Adam based on inf
\end{aligned}
For further details regarding the algorithm we refer to `Adam: A Method for Stochastic Optimization`_.
- """ + fr"""
+ """
+ + rf"""
Args:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
@@ -188,6 +212,7 @@ Adamax.__doc__ = r"""Implements Adamax algorithm (a variant of Adam based on inf
https://arxiv.org/abs/1412.6980
"""
+)
def adamax(
@@ -221,7 +246,9 @@ def adamax(
)
if foreach is None:
- _, foreach = _default_to_fused_or_foreach(params, differentiable, use_fused=False)
+ _, foreach = _default_to_fused_or_foreach(
+ params, differentiable, use_fused=False
+ )
if foreach and torch.jit.is_scripting():
raise RuntimeError("torch.jit.script not supported with foreach optimizers")
@@ -302,14 +329,15 @@ def _single_tensor_adamax(
)
else:
norm_buf = torch.cat(
- [exp_inf.mul_(beta2).unsqueeze(0), grad.abs().add_(eps).unsqueeze_(0)], 0
+ [exp_inf.mul_(beta2).unsqueeze(0), grad.abs().add_(eps).unsqueeze_(0)],
+ 0,
)
exp_inf.copy_(torch.amax(norm_buf, 0, keepdim=False))
if capturable:
# why jump through extra hoops and negate bias_correction? check out #121238
# once fixed, we should use bias_correction with addcdiv value=-1 for readability
- neg_bias_correction = beta1 ** step_t - 1
+ neg_bias_correction = beta1**step_t - 1
neg_bias_correction.div_(lr)
denom = exp_inf * neg_bias_correction
param.addcdiv_(exp_avg, denom)
@@ -337,21 +365,35 @@ def _multi_tensor_adamax(
capturable: bool,
has_complex: bool,
):
-
assert not differentiable, "_foreach ops don't support autograd"
if len(params) == 0:
return
# If compiling, the compiler will handle cudagraph checks, see note [torch.compile x capturable]
- if (not torch._utils.is_compiling() and capturable
- and not all(p.is_cuda and step.is_cuda for p, step in zip(params, state_steps))):
- raise RuntimeError("If capturable=True, params and state_steps must be CUDA tensors.")
+ if (
+ not torch._utils.is_compiling()
+ and capturable
+ and not all(p.is_cuda and step.is_cuda for p, step in zip(params, state_steps))
+ ):
+ raise RuntimeError(
+ "If capturable=True, params and state_steps must be CUDA tensors."
+ )
- grouped_tensors = Optimizer._group_tensors_by_device_and_dtype([params, grads, exp_avgs, exp_infs, state_steps])
- for ((grouped_params, grouped_grads, grouped_exp_avgs, grouped_exp_infs, grouped_state_steps), _) in grouped_tensors.values():
+ grouped_tensors = Optimizer._group_tensors_by_device_and_dtype(
+ [params, grads, exp_avgs, exp_infs, state_steps]
+ )
+ for (
+ grouped_params,
+ grouped_grads,
+ grouped_exp_avgs,
+ grouped_exp_infs,
+ grouped_state_steps,
+ ), _ in grouped_tensors.values():
if has_complex:
- _view_as_real(grouped_params, grouped_grads, grouped_exp_avgs, grouped_exp_infs)
+ _view_as_real(
+ grouped_params, grouped_grads, grouped_exp_avgs, grouped_exp_infs
+ )
if maximize:
grouped_grads = torch._foreach_neg(grouped_grads)
@@ -361,7 +403,9 @@ def _multi_tensor_adamax(
# and over. 1 will then be wrapped into a Tensor over and over again, which is slower than if we just
# wrapped it once now. The alpha is required to assure we go to the right overload.
if grouped_state_steps[0].is_cpu:
- torch._foreach_add_(grouped_state_steps, torch.tensor(1.0, device='cpu'), alpha=1.0)
+ torch._foreach_add_(
+ grouped_state_steps, torch.tensor(1.0, device="cpu"), alpha=1.0
+ )
else:
torch._foreach_add_(grouped_state_steps, 1)
@@ -370,8 +414,9 @@ def _multi_tensor_adamax(
# Re-use the intermediate memory (grouped_grads) already allocated for maximize
torch._foreach_add_(grouped_grads, grouped_params, alpha=weight_decay)
else:
- grouped_grads = torch._foreach_add(grouped_grads, grouped_params, alpha=weight_decay)
-
+ grouped_grads = torch._foreach_add(
+ grouped_grads, grouped_params, alpha=weight_decay
+ )
# Update biased first moment estimate.
torch._foreach_lerp_(grouped_exp_avgs, grouped_grads, 1 - beta1)
@@ -398,6 +443,10 @@ def _multi_tensor_adamax(
denom = torch._foreach_mul(grouped_exp_infs, bias_corrections)
torch._foreach_addcdiv_(grouped_params, grouped_exp_avgs, denom)
else:
- bias_corrections = [1 - beta1 ** _get_value(step) for step in grouped_state_steps]
+ bias_corrections = [
+ 1 - beta1 ** _get_value(step) for step in grouped_state_steps
+ ]
step_size = [(lr / bc) * -1 for bc in bias_corrections]
- torch._foreach_addcdiv_(grouped_params, grouped_exp_avgs, grouped_exp_infs, step_size)
+ torch._foreach_addcdiv_(
+ grouped_params, grouped_exp_avgs, grouped_exp_infs, step_size
+ )
diff --git a/torch/optim/adamw.py b/torch/optim/adamw.py
index f97e66e6f3..bbe03c1ce5 100644
--- a/torch/optim/adamw.py
+++ b/torch/optim/adamw.py
@@ -1,11 +1,24 @@
+from typing import List, Optional, Tuple, Union
+
import torch
from torch import Tensor
-from .optimizer import (Optimizer, _use_grad_for_differentiable, _get_value, _dispatch_sqrt,
- _stack_if_compiling, _get_scalar_dtype, _capturable_doc, _differentiable_doc,
- _foreach_doc, _fused_doc, _maximize_doc, _default_to_fused_or_foreach,
- ParamsT, _view_as_real)
-from typing import List, Optional, Tuple, Union
from torch.utils._foreach_utils import _get_fused_kernels_supported_devices
+from .optimizer import (
+ _capturable_doc,
+ _default_to_fused_or_foreach,
+ _differentiable_doc,
+ _dispatch_sqrt,
+ _foreach_doc,
+ _fused_doc,
+ _get_scalar_dtype,
+ _get_value,
+ _maximize_doc,
+ _stack_if_compiling,
+ _use_grad_for_differentiable,
+ _view_as_real,
+ Optimizer,
+ ParamsT,
+)
__all__ = ["AdamW", "adamw"]
@@ -29,7 +42,9 @@ class AdamW(Optimizer):
if not 0.0 <= lr:
raise ValueError(f"Invalid learning rate: {lr}")
if isinstance(lr, Tensor) and foreach and not capturable:
- raise ValueError("lr as a Tensor is not supported for capturable=False and foreach=True")
+ raise ValueError(
+ "lr as a Tensor is not supported for capturable=False and foreach=True"
+ )
if not 0.0 <= eps:
raise ValueError(f"Invalid epsilon value: {eps}")
if not 0.0 <= betas[0] < 1.0:
@@ -62,12 +77,14 @@ class AdamW(Optimizer):
# alleviate the loss of information.
fused_supported_devices = _get_fused_kernels_supported_devices()
if not all(
- p.device.type in fused_supported_devices and
- torch.is_floating_point(p)
- for pg in self.param_groups for p in pg['params']
+ p.device.type in fused_supported_devices and torch.is_floating_point(p)
+ for pg in self.param_groups
+ for p in pg["params"]
):
- raise RuntimeError("`fused=True` requires all the params to be floating point Tensors of "
- f"supported devices: {fused_supported_devices}.")
+ raise RuntimeError(
+ "`fused=True` requires all the params to be floating point Tensors of "
+ f"supported devices: {fused_supported_devices}."
+ )
if foreach:
raise RuntimeError("`fused` and `foreach` cannot be `True` together.")
@@ -82,11 +99,17 @@ class AdamW(Optimizer):
fused = group.setdefault("fused", None)
for p in group["params"]:
p_state = self.state.get(p, [])
- if len(p_state) != 0 and not torch.is_tensor(p_state['step']):
+ if len(p_state) != 0 and not torch.is_tensor(p_state["step"]):
step_val = float(p_state["step"])
- p_state["step"] = (torch.tensor(step_val, dtype=_get_scalar_dtype(is_fused=fused), device=p.device)
- if group['capturable'] or group['fused']
- else torch.tensor(step_val, dtype=_get_scalar_dtype()))
+ p_state["step"] = (
+ torch.tensor(
+ step_val,
+ dtype=_get_scalar_dtype(is_fused=fused),
+ device=p.device,
+ )
+ if group["capturable"] or group["fused"]
+ else torch.tensor(step_val, dtype=_get_scalar_dtype())
+ )
def _init_group(
self,
@@ -116,7 +139,11 @@ class AdamW(Optimizer):
# note(crcrpar): Deliberately host `step` on CPU if both capturable and fused are off.
# This is because kernel launches are costly on CUDA and XLA.
state["step"] = (
- torch.zeros((), dtype=_get_scalar_dtype(is_fused=group["fused"]), device=p.device)
+ torch.zeros(
+ (),
+ dtype=_get_scalar_dtype(is_fused=group["fused"]),
+ device=p.device,
+ )
if group["capturable"] or group["fused"]
else torch.tensor(0.0, dtype=_get_scalar_dtype())
)
@@ -137,14 +164,22 @@ class AdamW(Optimizer):
exp_avgs.append(state["exp_avg"])
exp_avg_sqs.append(state["exp_avg_sq"])
- if group['amsgrad']:
+ if group["amsgrad"]:
max_exp_avg_sqs.append(state["max_exp_avg_sq"])
- if group['differentiable'] and state['step'].requires_grad:
- raise RuntimeError('`requires_grad` is not supported for `step` in differentiable mode')
+ if group["differentiable"] and state["step"].requires_grad:
+ raise RuntimeError(
+ "`requires_grad` is not supported for `step` in differentiable mode"
+ )
# Foreach without capturable does not support a tensor lr
- if group['foreach'] and isinstance(group['lr'], Tensor) and not group['capturable']:
- raise RuntimeError('lr as a Tensor is not supported for capturable=False and foreach=True')
+ if (
+ group["foreach"]
+ and isinstance(group["lr"], Tensor)
+ and not group["capturable"]
+ ):
+ raise RuntimeError(
+ "lr as a Tensor is not supported for capturable=False and foreach=True"
+ )
state_steps.append(state["step"])
return has_complex
@@ -211,7 +246,8 @@ class AdamW(Optimizer):
return loss
-AdamW.__doc__ = r"""Implements AdamW algorithm.
+AdamW.__doc__ = (
+ r"""Implements AdamW algorithm.
.. math::
\begin{aligned}
@@ -249,7 +285,8 @@ AdamW.__doc__ = r"""Implements AdamW algorithm.
\end{aligned}
For further details regarding the algorithm we refer to `Decoupled Weight Decay Regularization`_.
- """ + fr"""
+ """
+ + rf"""
Args:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
@@ -275,6 +312,7 @@ AdamW.__doc__ = r"""Implements AdamW algorithm.
https://openreview.net/forum?id=ryQu7f-RZ
"""
+)
def adamw(
@@ -306,7 +344,9 @@ def adamw(
See :class:`~torch.optim.AdamW` for details.
"""
- if not torch._utils.is_compiling() and not all(isinstance(t, torch.Tensor) for t in state_steps):
+ if not torch._utils.is_compiling() and not all(
+ isinstance(t, torch.Tensor) for t in state_steps
+ ):
raise RuntimeError(
"API has changed, `state_steps` argument must contain a list of singleton tensors"
)
@@ -316,7 +356,9 @@ def adamw(
# and pass False to use_fused. This is not a mistake--we want to give the fused impl
# bake-in time before making it the default, even if it is typically faster.
if fused is None and foreach is None:
- _, foreach = _default_to_fused_or_foreach(params, differentiable, use_fused=False)
+ _, foreach = _default_to_fused_or_foreach(
+ params, differentiable, use_fused=False
+ )
# Do not flip on foreach for the unsupported case where lr is a Tensor and capturable=False.
if foreach and isinstance(lr, Tensor) and not capturable:
foreach = False
@@ -380,7 +422,6 @@ def _single_tensor_adamw(
differentiable: bool,
has_complex: bool,
):
-
assert grad_scale is None and found_inf is None
if torch.jit.is_scripting():
@@ -397,8 +438,8 @@ def _single_tensor_adamw(
# If compiling, the compiler will handle cudagraph checks, see note [torch.compile x capturable]
if not torch._utils.is_compiling() and capturable:
- assert (
- (param.is_cuda and step_t.is_cuda) or (param.is_xla and step_t.is_xla)
+ assert (param.is_cuda and step_t.is_cuda) or (
+ param.is_xla and step_t.is_xla
), "If capturable=True, params and state_steps must be CUDA or XLA tensors."
if torch.is_complex(param):
@@ -422,8 +463,8 @@ def _single_tensor_adamw(
if capturable or differentiable:
step = step_t
- bias_correction1 = 1 - beta1 ** step
- bias_correction2 = 1 - beta2 ** step
+ bias_correction1 = 1 - beta1**step
+ bias_correction2 = 1 - beta2**step
step_size = lr / bias_correction1
step_size_neg = step_size.neg()
@@ -454,8 +495,8 @@ def _single_tensor_adamw(
else:
step = _get_value(step_t)
- bias_correction1 = 1 - beta1 ** step
- bias_correction2 = 1 - beta2 ** step
+ bias_correction1 = 1 - beta1**step
+ bias_correction2 = 1 - beta2**step
step_size = lr / bias_correction1
@@ -502,7 +543,9 @@ def _multi_tensor_adamw(
return
if isinstance(lr, Tensor) and not capturable:
- raise RuntimeError("lr as a Tensor is not supported for capturable=False and foreach=True")
+ raise RuntimeError(
+ "lr as a Tensor is not supported for capturable=False and foreach=True"
+ )
# If compiling, the compiler will handle cudagraph checks, see note [torch.compile x capturable]
if not torch._utils.is_compiling() and capturable:
@@ -514,21 +557,30 @@ def _multi_tensor_adamw(
assert grad_scale is None and found_inf is None
- grouped_tensors = Optimizer._group_tensors_by_device_and_dtype([
- params, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps])
- for ((
+ grouped_tensors = Optimizer._group_tensors_by_device_and_dtype(
+ [params, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps]
+ )
+ for (
device_params,
device_grads,
device_exp_avgs,
device_exp_avg_sqs,
device_max_exp_avg_sqs,
device_state_steps,
- ), _) in grouped_tensors.values():
+ ), _ in grouped_tensors.values():
if has_complex:
if amsgrad:
- _view_as_real(device_params, device_grads, device_exp_avgs, device_exp_avg_sqs, device_max_exp_avg_sqs)
+ _view_as_real(
+ device_params,
+ device_grads,
+ device_exp_avgs,
+ device_exp_avg_sqs,
+ device_max_exp_avg_sqs,
+ )
else:
- _view_as_real(device_params, device_grads, device_exp_avgs, device_exp_avg_sqs)
+ _view_as_real(
+ device_params, device_grads, device_exp_avgs, device_exp_avg_sqs
+ )
if maximize:
device_grads = torch._foreach_neg(device_grads)
@@ -538,7 +590,9 @@ def _multi_tensor_adamw(
# and over. 1 will then be wrapped into a Tensor over and over again, which is slower than if we just
# wrapped it once now. The alpha is required to assure we go to the right overload.
if device_state_steps[0].is_cpu:
- torch._foreach_add_(device_state_steps, torch.tensor(1.0, device='cpu'), alpha=1.0)
+ torch._foreach_add_(
+ device_state_steps, torch.tensor(1.0, device="cpu"), alpha=1.0
+ )
else:
torch._foreach_add_(device_state_steps, 1)
@@ -550,7 +604,9 @@ def _multi_tensor_adamw(
torch._foreach_lerp_(device_exp_avgs, device_grads, 1 - beta1)
torch._foreach_mul_(device_exp_avg_sqs, beta2)
- torch._foreach_addcmul_(device_exp_avg_sqs, device_grads, device_grads, 1 - beta2)
+ torch._foreach_addcmul_(
+ device_exp_avg_sqs, device_grads, device_grads, 1 - beta2
+ )
# Delete the local intermediate since it won't be used anymore to save on peak memory
del device_grads
@@ -592,8 +648,12 @@ def _multi_tensor_adamw(
# at this point, exp_avg_sq_sqrt = - (1 - beta^t) * [sqrt(exp_avg_sq / (1 - beta2^t)) + eps] / lr
torch._foreach_addcdiv_(device_params, device_exp_avgs, exp_avg_sq_sqrt)
else:
- bias_correction1 = [1 - beta1 ** _get_value(step) for step in device_state_steps]
- bias_correction2 = [1 - beta2 ** _get_value(step) for step in device_state_steps]
+ bias_correction1 = [
+ 1 - beta1 ** _get_value(step) for step in device_state_steps
+ ]
+ bias_correction2 = [
+ 1 - beta2 ** _get_value(step) for step in device_state_steps
+ ]
step_size = _stack_if_compiling([(lr / bc) * -1 for bc in bias_correction1])
@@ -610,7 +670,9 @@ def _multi_tensor_adamw(
torch._foreach_div_(exp_avg_sq_sqrt, bias_correction2_sqrt)
torch._foreach_add_(exp_avg_sq_sqrt, eps)
- torch._foreach_addcdiv_(device_params, device_exp_avgs, exp_avg_sq_sqrt, step_size)
+ torch._foreach_addcdiv_(
+ device_params, device_exp_avgs, exp_avg_sq_sqrt, step_size
+ )
def _fused_adamw(
@@ -639,21 +701,31 @@ def _fused_adamw(
if differentiable:
raise RuntimeError("Adam with fused=True does not support differentiable=True")
- grad_scale_dict = {grad_scale.device: grad_scale} if grad_scale is not None else None
+ grad_scale_dict = (
+ {grad_scale.device: grad_scale} if grad_scale is not None else None
+ )
found_inf_dict = {found_inf.device: found_inf} if found_inf is not None else None
# We only shuffle around the lr when it is a Tensor and on CUDA, otherwise, we prefer
# treating it as a scalar.
- lr_dict = {lr.device: lr} if isinstance(lr, Tensor) and str(lr.device) != "cpu" else None
+ lr_dict = (
+ {lr.device: lr} if isinstance(lr, Tensor) and str(lr.device) != "cpu" else None
+ )
grouped_tensors = Optimizer._group_tensors_by_device_and_dtype(
- [params, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps])
- for (device, _), ((device_params,
- device_grads,
- device_exp_avgs,
- device_exp_avg_sqs,
- device_max_exp_avg_sqs,
- device_state_steps,), _) in grouped_tensors.items():
+ [params, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps]
+ )
+ for (device, _), (
+ (
+ device_params,
+ device_grads,
+ device_exp_avgs,
+ device_exp_avg_sqs,
+ device_max_exp_avg_sqs,
+ device_state_steps,
+ ),
+ _,
+ ) in grouped_tensors.items():
device_grad_scale, device_found_inf = None, None
if grad_scale is not None:
if device not in grad_scale_dict:
@@ -685,4 +757,6 @@ def _fused_adamw(
found_inf=device_found_inf,
)
if device_found_inf is not None:
- torch._foreach_sub_(device_state_steps, [device_found_inf] * len(device_state_steps))
+ torch._foreach_sub_(
+ device_state_steps, [device_found_inf] * len(device_state_steps)
+ )
diff --git a/torch/optim/asgd.py b/torch/optim/asgd.py
index 247c8388e9..3d16ef7119 100644
--- a/torch/optim/asgd.py
+++ b/torch/optim/asgd.py
@@ -1,19 +1,31 @@
+from typing import List, Optional
+
import torch
from torch import Tensor
-from .optimizer import (Optimizer, _use_grad_for_differentiable, _get_value, _default_to_fused_or_foreach,
- _get_scalar_dtype, _view_as_real, _differentiable_doc, _foreach_doc, _maximize_doc,
- _capturable_doc)
-from typing import List, Optional
+from .optimizer import (
+ _capturable_doc,
+ _default_to_fused_or_foreach,
+ _differentiable_doc,
+ _foreach_doc,
+ _get_scalar_dtype,
+ _get_value,
+ _maximize_doc,
+ _use_grad_for_differentiable,
+ _view_as_real,
+ Optimizer,
+)
__all__ = ["ASGD", "asgd"]
+
def _to_tensor(x, device=None):
if not isinstance(x, torch.Tensor):
return torch.tensor(x, device=device)
return x
+
class ASGD(Optimizer):
def __init__(
self,
@@ -56,14 +68,19 @@ class ASGD(Optimizer):
for p in group["params"]:
p_state = self.state.get(p, [])
if len(p_state) != 0:
- if not torch.is_tensor(p_state['step']):
+ if not torch.is_tensor(p_state["step"]):
step_val = float(p_state["step"])
- p_state["step"] = torch.tensor(step_val, dtype=_get_scalar_dtype(), device=p.device)
+ p_state["step"] = torch.tensor(
+ step_val, dtype=_get_scalar_dtype(), device=p.device
+ )
if not torch.is_tensor(p_state["eta"]):
- p_state["eta"] = torch.tensor(p_state["eta"], dtype=_get_scalar_dtype(), device=p.device)
+ p_state["eta"] = torch.tensor(
+ p_state["eta"], dtype=_get_scalar_dtype(), device=p.device
+ )
if not torch.is_tensor(p_state["mu"]):
- p_state["mu"] = torch.tensor(p_state["mu"], dtype=_get_scalar_dtype(), device=p.device)
-
+ p_state["mu"] = torch.tensor(
+ p_state["mu"], dtype=_get_scalar_dtype(), device=p.device
+ )
def _init_group(self, group, params_with_grad, grads, mus, axs, etas, state_steps):
has_complex = False
@@ -78,9 +95,15 @@ class ASGD(Optimizer):
state = self.state[p]
# State initialization
if len(state) == 0:
- state["step"] = torch.zeros((), device=p.device, dtype=_get_scalar_dtype())
- state["eta"] = torch.tensor(group["lr"], device=p.device, dtype=_get_scalar_dtype())
- state["mu"] = torch.ones((), device=p.device, dtype=_get_scalar_dtype())
+ state["step"] = torch.zeros(
+ (), device=p.device, dtype=_get_scalar_dtype()
+ )
+ state["eta"] = torch.tensor(
+ group["lr"], device=p.device, dtype=_get_scalar_dtype()
+ )
+ state["mu"] = torch.ones(
+ (), device=p.device, dtype=_get_scalar_dtype()
+ )
state["ax"] = torch.zeros_like(
p, memory_format=torch.preserve_format
)
@@ -114,7 +137,9 @@ class ASGD(Optimizer):
etas = []
state_steps = []
- has_complex = self._init_group(group, params_with_grad, grads, mus, axs, etas, state_steps)
+ has_complex = self._init_group(
+ group, params_with_grad, grads, mus, axs, etas, state_steps
+ )
asgd(
params_with_grad,
@@ -138,7 +163,7 @@ class ASGD(Optimizer):
return loss
-ASGD.__doc__ = fr"""Implements Averaged Stochastic Gradient Descent.
+ASGD.__doc__ = rf"""Implements Averaged Stochastic Gradient Descent.
It has been proposed in `Acceleration of stochastic approximation by
averaging`_.
@@ -188,7 +213,9 @@ def asgd(
See :class:`~torch.optim.ASGD` for details.
"""
if foreach is None:
- _, foreach = _default_to_fused_or_foreach(params, differentiable, use_fused=False)
+ _, foreach = _default_to_fused_or_foreach(
+ params, differentiable, use_fused=False
+ )
if foreach and torch.jit.is_scripting():
raise RuntimeError("torch.jit.script not supported with foreach optimizers")
@@ -245,7 +272,9 @@ def _single_tensor_asgd(
# If compiling, the compiler will handle cudagraph checks, see note [torch.compile x capturable]
if not torch._utils.is_compiling() and capturable:
- assert (param.is_cuda and mu.is_cuda and eta.is_cuda and step_t.is_cuda) or (
+ assert (
+ param.is_cuda and mu.is_cuda and eta.is_cuda and step_t.is_cuda
+ ) or (
param.is_xla and mu.is_xla and eta.is_xla and step_t.is_xla
), "If capturable=True, params, mus, etas, and state_steps must be CUDA or XLA tensors."
@@ -310,13 +339,25 @@ def _multi_tensor_asgd(
# If compiling, the compiler will handle cudagraph checks, see note [torch.compile x capturable]
if not torch._utils.is_compiling() and capturable:
- assert all(p.is_cuda and mu.is_cuda and eta.is_cuda and step.is_cuda
- for p, mu, eta, step in zip(params, mus, etas, state_steps)), \
- "If capturable=True, params, mus, etas, and state_steps must be CUDA tensors."
+ assert all(
+ p.is_cuda and mu.is_cuda and eta.is_cuda and step.is_cuda
+ for p, mu, eta, step in zip(params, mus, etas, state_steps)
+ ), "If capturable=True, params, mus, etas, and state_steps must be CUDA tensors."
- grouped_tensors = Optimizer._group_tensors_by_device_and_dtype([params, grads, axs, mus, etas, state_steps])
- for ((device, _), ((grouped_params, grouped_grads, grouped_axs, grouped_mus,
- grouped_etas, grouped_state_steps), _)) in grouped_tensors.items():
+ grouped_tensors = Optimizer._group_tensors_by_device_and_dtype(
+ [params, grads, axs, mus, etas, state_steps]
+ )
+ for (device, _), (
+ (
+ grouped_params,
+ grouped_grads,
+ grouped_axs,
+ grouped_mus,
+ grouped_etas,
+ grouped_state_steps,
+ ),
+ _,
+ ) in grouped_tensors.items():
if has_complex:
_view_as_real(grouped_params, grouped_grads, grouped_axs)
@@ -328,7 +369,9 @@ def _multi_tensor_asgd(
# and over. 1 will then be wrapped into a Tensor over and over again, which is slower than if we just
# wrapped it once now. The alpha is required to assure we go to the right overload.
if grouped_state_steps[0].is_cpu:
- torch._foreach_add_(grouped_state_steps, torch.tensor(1.0, device='cpu'), alpha=1.0)
+ torch._foreach_add_(
+ grouped_state_steps, torch.tensor(1.0, device="cpu"), alpha=1.0
+ )
else:
torch._foreach_add_(grouped_state_steps, 1)
@@ -338,11 +381,15 @@ def _multi_tensor_asgd(
torch._foreach_add_(grouped_grads, grouped_params, alpha=weight_decay)
intermediate = grouped_grads
else:
- intermediate = torch._foreach_add(grouped_grads, grouped_params, alpha=weight_decay)
+ intermediate = torch._foreach_add(
+ grouped_grads, grouped_params, alpha=weight_decay
+ )
torch._foreach_add_(intermediate, grouped_params, alpha=lambd)
else:
- intermediate = torch._foreach_add(grouped_grads, grouped_params, alpha=lambd)
+ intermediate = torch._foreach_add(
+ grouped_grads, grouped_params, alpha=lambd
+ )
# update param
# param * (1 - lambd * eta) - eta * grad
@@ -386,7 +433,7 @@ def _multi_tensor_asgd(
for i in range(len(grouped_mus)):
new_eta = _to_tensor(
- lr / (1 + lambd * lr * step ** alpha), device=device
+ lr / (1 + lambd * lr * step**alpha), device=device
)
new_etas.append(new_eta)
new_mu = _to_tensor(1 / max(1, step - t0), device=device)
diff --git a/torch/optim/lbfgs.py b/torch/optim/lbfgs.py
index 7a65dfcc40..1e0f5738ad 100644
--- a/torch/optim/lbfgs.py
+++ b/torch/optim/lbfgs.py
@@ -1,7 +1,8 @@
import torch
from .optimizer import Optimizer
-__all__ = ['LBFGS']
+__all__ = ["LBFGS"]
+
def _cubic_interpolate(x1, f1, g1, x2, f2, g2, bounds=None):
# ported from https://github.com/torch/optim/blob/master/polyinterp.lua
@@ -28,20 +29,12 @@ def _cubic_interpolate(x1, f1, g1, x2, f2, g2, bounds=None):
min_pos = x1 - (x1 - x2) * ((g1 + d2 - d1) / (g1 - g2 + 2 * d2))
return min(max(min_pos, xmin_bound), xmax_bound)
else:
- return (xmin_bound + xmax_bound) / 2.
-
-
-def _strong_wolfe(obj_func,
- x,
- t,
- d,
- f,
- g,
- gtd,
- c1=1e-4,
- c2=0.9,
- tolerance_change=1e-9,
- max_ls=25):
+ return (xmin_bound + xmax_bound) / 2.0
+
+
+def _strong_wolfe(
+ obj_func, x, t, d, f, g, gtd, c1=1e-4, c2=0.9, tolerance_change=1e-9, max_ls=25
+):
# ported from https://github.com/torch/optim/blob/master/lswolfe.lua
d_norm = d.abs().max()
g = g.clone(memory_format=torch.contiguous_format)
@@ -82,13 +75,8 @@ def _strong_wolfe(obj_func,
max_step = t * 10
tmp = t
t = _cubic_interpolate(
- t_prev,
- f_prev,
- gtd_prev,
- t,
- f_new,
- gtd_new,
- bounds=(min_step, max_step))
+ t_prev, f_prev, gtd_prev, t, f_new, gtd_new, bounds=(min_step, max_step)
+ )
# next step
t_prev = tmp
@@ -118,8 +106,14 @@ def _strong_wolfe(obj_func,
break
# compute new trial value
- t = _cubic_interpolate(bracket[0], bracket_f[0], bracket_gtd[0],
- bracket[1], bracket_f[1], bracket_gtd[1])
+ t = _cubic_interpolate(
+ bracket[0],
+ bracket_f[0],
+ bracket_gtd[0],
+ bracket[1],
+ bracket_f[1],
+ bracket_gtd[1],
+ )
# test that we are making sufficient progress:
# in case `t` is so close to boundary, we mark that we are making
@@ -214,15 +208,17 @@ class LBFGS(Optimizer):
line_search_fn (str): either 'strong_wolfe' or None (default: None).
"""
- def __init__(self,
- params,
- lr=1,
- max_iter=20,
- max_eval=None,
- tolerance_grad=1e-7,
- tolerance_change=1e-9,
- history_size=100,
- line_search_fn=None):
+ def __init__(
+ self,
+ params,
+ lr=1,
+ max_iter=20,
+ max_eval=None,
+ tolerance_grad=1e-7,
+ tolerance_change=1e-9,
+ history_size=100,
+ line_search_fn=None,
+ ):
if max_eval is None:
max_eval = max_iter * 5 // 4
defaults = dict(
@@ -232,19 +228,24 @@ class LBFGS(Optimizer):
tolerance_grad=tolerance_grad,
tolerance_change=tolerance_change,
history_size=history_size,
- line_search_fn=line_search_fn)
+ line_search_fn=line_search_fn,
+ )
super().__init__(params, defaults)
if len(self.param_groups) != 1:
- raise ValueError("LBFGS doesn't support per-parameter options "
- "(parameter groups)")
+ raise ValueError(
+ "LBFGS doesn't support per-parameter options " "(parameter groups)"
+ )
- self._params = self.param_groups[0]['params']
+ self._params = self.param_groups[0]["params"]
self._numel_cache = None
def _numel(self):
if self._numel_cache is None:
- self._numel_cache = sum(2 * p.numel() if torch.is_complex(p) else p.numel() for p in self._params)
+ self._numel_cache = sum(
+ 2 * p.numel() if torch.is_complex(p) else p.numel()
+ for p in self._params
+ )
return self._numel_cache
@@ -269,7 +270,7 @@ class LBFGS(Optimizer):
p = torch.view_as_real(p)
numel = p.numel()
# view as to avoid deprecated pointwise semantics
- p.add_(update[offset:offset + numel].view_as(p), alpha=step_size)
+ p.add_(update[offset : offset + numel].view_as(p), alpha=step_size)
offset += numel
assert offset == self._numel()
@@ -301,25 +302,25 @@ class LBFGS(Optimizer):
closure = torch.enable_grad()(closure)
group = self.param_groups[0]
- lr = group['lr']
- max_iter = group['max_iter']
- max_eval = group['max_eval']
- tolerance_grad = group['tolerance_grad']
- tolerance_change = group['tolerance_change']
- line_search_fn = group['line_search_fn']
- history_size = group['history_size']
+ lr = group["lr"]
+ max_iter = group["max_iter"]
+ max_eval = group["max_eval"]
+ tolerance_grad = group["tolerance_grad"]
+ tolerance_change = group["tolerance_change"]
+ line_search_fn = group["line_search_fn"]
+ history_size = group["history_size"]
# NOTE: LBFGS has only global state, but we register it as state for
# the first param, because this helps with casting in load_state_dict
state = self.state[self._params[0]]
- state.setdefault('func_evals', 0)
- state.setdefault('n_iter', 0)
+ state.setdefault("func_evals", 0)
+ state.setdefault("n_iter", 0)
# evaluate initial f(x) and df/dx
orig_loss = closure()
loss = float(orig_loss)
current_evals = 1
- state['func_evals'] += 1
+ state["func_evals"] += 1
flat_grad = self._gather_flat_grad()
opt_cond = flat_grad.abs().max() <= tolerance_grad
@@ -329,26 +330,26 @@ class LBFGS(Optimizer):
return orig_loss
# tensors cached in state (for tracing)
- d = state.get('d')
- t = state.get('t')
- old_dirs = state.get('old_dirs')
- old_stps = state.get('old_stps')
- ro = state.get('ro')
- H_diag = state.get('H_diag')
- prev_flat_grad = state.get('prev_flat_grad')
- prev_loss = state.get('prev_loss')
+ d = state.get("d")
+ t = state.get("t")
+ old_dirs = state.get("old_dirs")
+ old_stps = state.get("old_stps")
+ ro = state.get("ro")
+ H_diag = state.get("H_diag")
+ prev_flat_grad = state.get("prev_flat_grad")
+ prev_loss = state.get("prev_loss")
n_iter = 0
# optimize for a max of max_iter iterations
while n_iter < max_iter:
# keep track of nb of iterations
n_iter += 1
- state['n_iter'] += 1
+ state["n_iter"] += 1
############################################################
# compute gradient descent direction
############################################################
- if state['n_iter'] == 1:
+ if state["n_iter"] == 1:
d = flat_grad.neg()
old_dirs = []
old_stps = []
@@ -370,7 +371,7 @@ class LBFGS(Optimizer):
# store new direction/step
old_dirs.append(y)
old_stps.append(s)
- ro.append(1. / ys)
+ ro.append(1.0 / ys)
# update scale of initial Hessian approximation
H_diag = ys / y.dot(y) # (y*y)
@@ -379,9 +380,9 @@ class LBFGS(Optimizer):
# multiplied by the gradient
num_old = len(old_dirs)
- if 'al' not in state:
- state['al'] = [None] * history_size
- al = state['al']
+ if "al" not in state:
+ state["al"] = [None] * history_size
+ al = state["al"]
# iteration in L-BFGS loop collapsed to use just one buffer
q = flat_grad.neg()
@@ -406,8 +407,8 @@ class LBFGS(Optimizer):
# compute step length
############################################################
# reset initial guess for step size
- if state['n_iter'] == 1:
- t = min(1., 1. / flat_grad.abs().sum()) * lr
+ if state["n_iter"] == 1:
+ t = min(1.0, 1.0 / flat_grad.abs().sum()) * lr
else:
t = lr
@@ -431,7 +432,8 @@ class LBFGS(Optimizer):
return self._directional_evaluate(closure, x, t, d)
loss, flat_grad, t, ls_func_evals = _strong_wolfe(
- obj_func, x_init, t, d, loss, flat_grad, gtd)
+ obj_func, x_init, t, d, loss, flat_grad, gtd
+ )
self._add_grad(t, d)
opt_cond = flat_grad.abs().max() <= tolerance_grad
else:
@@ -449,7 +451,7 @@ class LBFGS(Optimizer):
# update func eval
current_evals += ls_func_evals
- state['func_evals'] += ls_func_evals
+ state["func_evals"] += ls_func_evals
############################################################
# check conditions
@@ -471,13 +473,13 @@ class LBFGS(Optimizer):
if abs(loss - prev_loss) < tolerance_change:
break
- state['d'] = d
- state['t'] = t
- state['old_dirs'] = old_dirs
- state['old_stps'] = old_stps
- state['ro'] = ro
- state['H_diag'] = H_diag
- state['prev_flat_grad'] = prev_flat_grad
- state['prev_loss'] = prev_loss
+ state["d"] = d
+ state["t"] = t
+ state["old_dirs"] = old_dirs
+ state["old_stps"] = old_stps
+ state["ro"] = ro
+ state["H_diag"] = H_diag
+ state["prev_flat_grad"] = prev_flat_grad
+ state["prev_loss"] = prev_loss
return orig_loss
|
2.41.0
|
c74a6783b6dc92b74d8d49dce951b469764abd2
|
Mon, 15 Apr 2024 17:32:40 +0800
|
[PATCH 0187/1000] Part 2: UFMT fix 2 files in torch/optim due to the pr-sanity-checks (#124054)
|
Pull Request resolved: https://github.com/pytorch/pytorch/pull/124054 Approved by: https://github.com/ezyang ghstack dependencies: #124048, #124053
|
diff --git a/.lintrunner.toml b/.lintrunner.toml
index d94ce427b7..9ddc5def54 100644
--- a/.lintrunner.toml
+++ b/.lintrunner.toml
@@ -2112,8 +2112,6 @@ exclude_patterns = [
'torch/nn/utils/rnn.py',
'torch/nn/utils/spectral_norm.py',
'torch/nn/utils/weight_norm.py',
- 'torch/optim/lr_scheduler.py',
- 'torch/optim/nadam.py',
'torch/optim/optimizer.py',
'torch/optim/radam.py',
'torch/optim/rmsprop.py',
diff --git a/torch/optim/lr_scheduler.py b/torch/optim/lr_scheduler.py
index a77bb378d8..773812b0a4 100644
--- a/torch/optim/lr_scheduler.py
+++ b/torch/optim/lr_scheduler.py
@@ -1,17 +1,33 @@
-import types
import math
-from torch import inf
-from functools import wraps, partial
+import types
import warnings
import weakref
-from collections import Counter
from bisect import bisect_right
+from collections import Counter
+from functools import partial, wraps
+
+from torch import inf
from .optimizer import Optimizer
-__all__ = ['LambdaLR', 'MultiplicativeLR', 'StepLR', 'MultiStepLR', 'ConstantLR', 'LinearLR',
- 'ExponentialLR', 'SequentialLR', 'CosineAnnealingLR', 'ChainedScheduler', 'ReduceLROnPlateau',
- 'CyclicLR', 'CosineAnnealingWarmRestarts', 'OneCycleLR', 'PolynomialLR', 'LRScheduler']
+__all__ = [
+ "LambdaLR",
+ "MultiplicativeLR",
+ "StepLR",
+ "MultiStepLR",
+ "ConstantLR",
+ "LinearLR",
+ "ExponentialLR",
+ "SequentialLR",
+ "CosineAnnealingLR",
+ "ChainedScheduler",
+ "ReduceLROnPlateau",
+ "CyclicLR",
+ "CosineAnnealingWarmRestarts",
+ "OneCycleLR",
+ "PolynomialLR",
+ "LRScheduler",
+]
EPOCH_DEPRECATION_WARNING = (
"The epoch parameter in `scheduler.step()` was not necessary and is being "
@@ -22,40 +38,45 @@ EPOCH_DEPRECATION_WARNING = (
"https://github.com/pytorch/pytorch/issues/new/choose."
)
+
def _check_verbose_deprecated_warning(verbose):
"""Raises a warning when verbose is not the default value."""
if verbose != "deprecated":
- warnings.warn("The verbose parameter is deprecated. Please use get_last_lr() "
- "to access the learning rate.", UserWarning)
+ warnings.warn(
+ "The verbose parameter is deprecated. Please use get_last_lr() "
+ "to access the learning rate.",
+ UserWarning,
+ )
return verbose
return False
-class LRScheduler:
+class LRScheduler:
def __init__(self, optimizer, last_epoch=-1, verbose="deprecated"):
-
# Attach optimizer
if not isinstance(optimizer, Optimizer):
- raise TypeError(f'{type(optimizer).__name__} is not an Optimizer')
+ raise TypeError(f"{type(optimizer).__name__} is not an Optimizer")
self.optimizer = optimizer
# Initialize epoch and base learning rates
if last_epoch == -1:
for group in optimizer.param_groups:
- group.setdefault('initial_lr', group['lr'])
+ group.setdefault("initial_lr", group["lr"])
else:
for i, group in enumerate(optimizer.param_groups):
- if 'initial_lr' not in group:
- raise KeyError("param 'initial_lr' is not specified "
- f"in param_groups[{i}] when resuming an optimizer")
- self.base_lrs = [group['initial_lr'] for group in optimizer.param_groups]
+ if "initial_lr" not in group:
+ raise KeyError(
+ "param 'initial_lr' is not specified "
+ f"in param_groups[{i}] when resuming an optimizer"
+ )
+ self.base_lrs = [group["initial_lr"] for group in optimizer.param_groups]
self.last_epoch = last_epoch
# Following https://github.com/pytorch/pytorch/issues/20124
# We would like to ensure that `lr_scheduler.step()` is called after
# `optimizer.step()`
def with_counter(method):
- if getattr(method, '_with_counter', False):
+ if getattr(method, "_with_counter", False):
# `optimizer.step()` has already been replaced, return.
return method
@@ -96,7 +117,9 @@ class LRScheduler:
It contains an entry for every variable in self.__dict__ which
is not the optimizer.
"""
- return {key: value for key, value in self.__dict__.items() if key != 'optimizer'}
+ return {
+ key: value for key, value in self.__dict__.items() if key != "optimizer"
+ }
def load_state_dict(self, state_dict):
"""Loads the schedulers state.
@@ -108,8 +131,7 @@ class LRScheduler:
self.__dict__.update(state_dict)
def get_last_lr(self):
- """ Return last computed learning rate by current scheduler.
- """
+ """Return last computed learning rate by current scheduler."""
return self._last_lr
def get_lr(self):
@@ -117,35 +139,40 @@ class LRScheduler:
raise NotImplementedError
def print_lr(self, is_verbose, group, lr, epoch=None):
- """Display the current learning rate.
- """
+ """Display the current learning rate."""
if is_verbose:
if epoch is None:
- print(f'Adjusting learning rate of group {group} to {lr:.4e}.')
+ print(f"Adjusting learning rate of group {group} to {lr:.4e}.")
else:
- epoch_str = ("%.2f" if isinstance(epoch, float) else
- "%.5d") % epoch
- print(f'Epoch {epoch_str}: adjusting learning rate of group {group} to {lr:.4e}.')
-
+ epoch_str = ("%.2f" if isinstance(epoch, float) else "%.5d") % epoch
+ print(
+ f"Epoch {epoch_str}: adjusting learning rate of group {group} to {lr:.4e}."
+ )
def step(self, epoch=None):
# Raise a warning if old pattern is detected
# https://github.com/pytorch/pytorch/issues/20124
if self._step_count == 1:
if not hasattr(self.optimizer.step, "_with_counter"):
- warnings.warn("Seems like `optimizer.step()` has been overridden after learning rate scheduler "
- "initialization. Please, make sure to call `optimizer.step()` before "
- "`lr_scheduler.step()`. See more details at "
- "https://pytorch.org/docs/stable/optim.html#how-to-adjust-learning-rate", UserWarning)
+ warnings.warn(
+ "Seems like `optimizer.step()` has been overridden after learning rate scheduler "
+ "initialization. Please, make sure to call `optimizer.step()` before "
+ "`lr_scheduler.step()`. See more details at "
+ "https://pytorch.org/docs/stable/optim.html#how-to-adjust-learning-rate",
+ UserWarning,
+ )
# Just check if there were two first lr_scheduler.step() calls before optimizer.step()
elif self.optimizer._step_count < 1:
- warnings.warn("Detected call of `lr_scheduler.step()` before `optimizer.step()`. "
- "In PyTorch 1.1.0 and later, you should call them in the opposite order: "
- "`optimizer.step()` before `lr_scheduler.step()`. Failure to do this "
- "will result in PyTorch skipping the first value of the learning rate schedule. "
- "See more details at "
- "https://pytorch.org/docs/stable/optim.html#how-to-adjust-learning-rate", UserWarning)
+ warnings.warn(
+ "Detected call of `lr_scheduler.step()` before `optimizer.step()`. "
+ "In PyTorch 1.1.0 and later, you should call them in the opposite order: "
+ "`optimizer.step()` before `lr_scheduler.step()`. Failure to do this "
+ "will result in PyTorch skipping the first value of the learning rate schedule. "
+ "See more details at "
+ "https://pytorch.org/docs/stable/optim.html#how-to-adjust-learning-rate",
+ UserWarning,
+ )
self._step_count += 1
with _enable_get_lr_call(self):
@@ -162,9 +189,9 @@ class LRScheduler:
for i, data in enumerate(zip(self.optimizer.param_groups, values)):
param_group, lr = data
- param_group['lr'] = lr
+ param_group["lr"] = lr
- self._last_lr = [group['lr'] for group in self.optimizer.param_groups]
+ self._last_lr = [group["lr"] for group in self.optimizer.param_groups]
# Including _LRScheduler for backwards compatibility
@@ -174,7 +201,6 @@ class _LRScheduler(LRScheduler):
class _enable_get_lr_call:
-
def __init__(self, o):
self.o = o
@@ -222,7 +248,9 @@ class LambdaLR(LRScheduler):
self.lr_lambdas = [lr_lambda] * len(optimizer.param_groups)
else:
if len(lr_lambda) != len(optimizer.param_groups):
- raise ValueError(f"Expected {len(optimizer.param_groups)} lr_lambdas, but got {len(lr_lambda)}")
+ raise ValueError(
+ f"Expected {len(optimizer.param_groups)} lr_lambdas, but got {len(lr_lambda)}"
+ )
self.lr_lambdas = list(lr_lambda)
super().__init__(optimizer, last_epoch, verbose)
@@ -237,12 +265,16 @@ class LambdaLR(LRScheduler):
When saving or loading the scheduler, please make sure to also save or load the state of the optimizer.
"""
- state_dict = {key: value for key, value in self.__dict__.items() if key not in ('optimizer', 'lr_lambdas')}
- state_dict['lr_lambdas'] = [None] * len(self.lr_lambdas)
+ state_dict = {
+ key: value
+ for key, value in self.__dict__.items()
+ if key not in ("optimizer", "lr_lambdas")
+ }
+ state_dict["lr_lambdas"] = [None] * len(self.lr_lambdas)
for idx, fn in enumerate(self.lr_lambdas):
if not isinstance(fn, types.FunctionType):
- state_dict['lr_lambdas'][idx] = fn.__dict__.copy()
+ state_dict["lr_lambdas"][idx] = fn.__dict__.copy()
return state_dict
@@ -256,11 +288,11 @@ class LambdaLR(LRScheduler):
from a call to :meth:`state_dict`.
"""
- lr_lambdas = state_dict.pop('lr_lambdas')
+ lr_lambdas = state_dict.pop("lr_lambdas")
self.__dict__.update(state_dict)
# Restore state_dict keys in order to prevent side effects
# https://github.com/pytorch/pytorch/issues/32756
- state_dict['lr_lambdas'] = lr_lambdas
+ state_dict["lr_lambdas"] = lr_lambdas
for idx, fn in enumerate(lr_lambdas):
if fn is not None:
@@ -268,11 +300,15 @@ class LambdaLR(LRScheduler):
def get_lr(self):
if not self._get_lr_called_within_step:
- warnings.warn("To get the last learning rate computed by the scheduler, "
- "please use `get_last_lr()`.")
+ warnings.warn(
+ "To get the last learning rate computed by the scheduler, "
+ "please use `get_last_lr()`."
+ )
- return [base_lr * lmbda(self.last_epoch)
- for lmbda, base_lr in zip(self.lr_lambdas, self.base_lrs)]
+ return [
+ base_lr * lmbda(self.last_epoch)
+ for lmbda, base_lr in zip(self.lr_lambdas, self.base_lrs)
+ ]
class MultiplicativeLR(LRScheduler):
@@ -309,7 +345,9 @@ class MultiplicativeLR(LRScheduler):
self.lr_lambdas = [lr_lambda] * len(optimizer.param_groups)
else:
if len(lr_lambda) != len(optimizer.param_groups):
- raise ValueError(f"Expected {len(optimizer.param_groups)} lr_lambdas, but got {len(lr_lambda)}")
+ raise ValueError(
+ f"Expected {len(optimizer.param_groups)} lr_lambdas, but got {len(lr_lambda)}"
+ )
self.lr_lambdas = list(lr_lambda)
super().__init__(optimizer, last_epoch, verbose)
@@ -321,12 +359,16 @@ class MultiplicativeLR(LRScheduler):
The learning rate lambda functions will only be saved if they are callable objects
and not if they are functions or lambdas.
"""
- state_dict = {key: value for key, value in self.__dict__.items() if key not in ('optimizer', 'lr_lambdas')}
- state_dict['lr_lambdas'] = [None] * len(self.lr_lambdas)
+ state_dict = {
+ key: value
+ for key, value in self.__dict__.items()
+ if key not in ("optimizer", "lr_lambdas")
+ }
+ state_dict["lr_lambdas"] = [None] * len(self.lr_lambdas)
for idx, fn in enumerate(self.lr_lambdas):
if not isinstance(fn, types.FunctionType):
- state_dict['lr_lambdas'][idx] = fn.__dict__.copy()
+ state_dict["lr_lambdas"][idx] = fn.__dict__.copy()
return state_dict
@@ -337,11 +379,11 @@ class MultiplicativeLR(LRScheduler):
state_dict (dict): scheduler state. Should be an object returned
from a call to :meth:`state_dict`.
"""
- lr_lambdas = state_dict.pop('lr_lambdas')
+ lr_lambdas = state_dict.pop("lr_lambdas")
self.__dict__.update(state_dict)
# Restore state_dict keys in order to prevent side effects
# https://github.com/pytorch/pytorch/issues/32756
- state_dict['lr_lambdas'] = lr_lambdas
+ state_dict["lr_lambdas"] = lr_lambdas
for idx, fn in enumerate(lr_lambdas):
if fn is not None:
@@ -349,14 +391,19 @@ class MultiplicativeLR(LRScheduler):
def get_lr(self):
if not self._get_lr_called_within_step:
- warnings.warn("To get the last learning rate computed by the scheduler, "
- "please use `get_last_lr()`.", UserWarning)
+ warnings.warn(
+ "To get the last learning rate computed by the scheduler, "
+ "please use `get_last_lr()`.",
+ UserWarning,
+ )
if self.last_epoch > 0:
- return [group['lr'] * lmbda(self.last_epoch)
- for lmbda, group in zip(self.lr_lambdas, self.optimizer.param_groups)]
+ return [
+ group["lr"] * lmbda(self.last_epoch)
+ for lmbda, group in zip(self.lr_lambdas, self.optimizer.param_groups)
+ ]
else:
- return [group['lr'] for group in self.optimizer.param_groups]
+ return [group["lr"] for group in self.optimizer.param_groups]
class StepLR(LRScheduler):
@@ -392,24 +439,30 @@ class StepLR(LRScheduler):
>>> scheduler.step()
"""
- def __init__(self, optimizer, step_size, gamma=0.1, last_epoch=-1, verbose="deprecated"):
+ def __init__(
+ self, optimizer, step_size, gamma=0.1, last_epoch=-1, verbose="deprecated"
+ ):
self.step_size = step_size
self.gamma = gamma
super().__init__(optimizer, last_epoch, verbose)
def get_lr(self):
if not self._get_lr_called_within_step:
- warnings.warn("To get the last learning rate computed by the scheduler, "
- "please use `get_last_lr()`.", UserWarning)
+ warnings.warn(
+ "To get the last learning rate computed by the scheduler, "
+ "please use `get_last_lr()`.",
+ UserWarning,
+ )
if (self.last_epoch == 0) or (self.last_epoch % self.step_size != 0):
- return [group['lr'] for group in self.optimizer.param_groups]
- return [group['lr'] * self.gamma
- for group in self.optimizer.param_groups]
+ return [group["lr"] for group in self.optimizer.param_groups]
+ return [group["lr"] * self.gamma for group in self.optimizer.param_groups]
def _get_closed_form_lr(self):
- return [base_lr * self.gamma ** (self.last_epoch // self.step_size)
- for base_lr in self.base_lrs]
+ return [
+ base_lr * self.gamma ** (self.last_epoch // self.step_size)
+ for base_lr in self.base_lrs
+ ]
class MultiStepLR(LRScheduler):
@@ -444,25 +497,34 @@ class MultiStepLR(LRScheduler):
>>> scheduler.step()
"""
- def __init__(self, optimizer, milestones, gamma=0.1, last_epoch=-1, verbose="deprecated"):
+ def __init__(
+ self, optimizer, milestones, gamma=0.1, last_epoch=-1, verbose="deprecated"
+ ):
self.milestones = Counter(milestones)
self.gamma = gamma
super().__init__(optimizer, last_epoch, verbose)
def get_lr(self):
if not self._get_lr_called_within_step:
- warnings.warn("To get the last learning rate computed by the scheduler, "
- "please use `get_last_lr()`.", UserWarning)
+ warnings.warn(
+ "To get the last learning rate computed by the scheduler, "
+ "please use `get_last_lr()`.",
+ UserWarning,
+ )
if self.last_epoch not in self.milestones:
- return [group['lr'] for group in self.optimizer.param_groups]
- return [group['lr'] * self.gamma ** self.milestones[self.last_epoch]
- for group in self.optimizer.param_groups]
+ return [group["lr"] for group in self.optimizer.param_groups]
+ return [
+ group["lr"] * self.gamma ** self.milestones[self.last_epoch]
+ for group in self.optimizer.param_groups
+ ]
def _get_closed_form_lr(self):
milestones = sorted(self.milestones.elements())
- return [base_lr * self.gamma ** bisect_right(milestones, self.last_epoch)
- for base_lr in self.base_lrs]
+ return [
+ base_lr * self.gamma ** bisect_right(milestones, self.last_epoch)
+ for base_lr in self.base_lrs
+ ]
class ConstantLR(LRScheduler):
@@ -500,9 +562,18 @@ class ConstantLR(LRScheduler):
>>> scheduler.step()
"""
- def __init__(self, optimizer, factor=1.0 / 3, total_iters=5, last_epoch=-1, verbose="deprecated"):
+ def __init__(
+ self,
+ optimizer,
+ factor=1.0 / 3,
+ total_iters=5,
+ last_epoch=-1,
+ verbose="deprecated",
+ ):
if factor > 1.0 or factor < 0:
- raise ValueError('Constant multiplicative factor expected to be between 0 and 1.')
+ raise ValueError(
+ "Constant multiplicative factor expected to be between 0 and 1."
+ )
self.factor = factor
self.total_iters = total_iters
@@ -510,20 +581,28 @@ class ConstantLR(LRScheduler):
def get_lr(self):
if not self._get_lr_called_within_step:
- warnings.warn("To get the last learning rate computed by the scheduler, "
- "please use `get_last_lr()`.", UserWarning)
+ warnings.warn(
+ "To get the last learning rate computed by the scheduler, "
+ "please use `get_last_lr()`.",
+ UserWarning,
+ )
if self.last_epoch == 0:
- return [group['lr'] * self.factor for group in self.optimizer.param_groups]
+ return [group["lr"] * self.factor for group in self.optimizer.param_groups]
if self.last_epoch != self.total_iters:
- return [group['lr'] for group in self.optimizer.param_groups]
+ return [group["lr"] for group in self.optimizer.param_groups]
- return [group['lr'] * (1.0 / self.factor) for group in self.optimizer.param_groups]
+ return [
+ group["lr"] * (1.0 / self.factor) for group in self.optimizer.param_groups
+ ]
def _get_closed_form_lr(self):
- return [base_lr * (self.factor + (self.last_epoch >= self.total_iters) * (1 - self.factor))
- for base_lr in self.base_lrs]
+ return [
+ base_lr
+ * (self.factor + (self.last_epoch >= self.total_iters) * (1 - self.factor))
+ for base_lr in self.base_lrs
+ ]
class LinearLR(LRScheduler):
@@ -564,13 +643,24 @@ class LinearLR(LRScheduler):
>>> scheduler.step()
"""
- def __init__(self, optimizer, start_factor=1.0 / 3, end_factor=1.0, total_iters=5, last_epoch=-1,
- verbose="deprecated"):
+ def __init__(
+ self,
+ optimizer,
+ start_factor=1.0 / 3,
+ end_factor=1.0,
+ total_iters=5,
+ last_epoch=-1,
+ verbose="deprecated",
+ ):
if start_factor > 1.0 or start_factor <= 0:
- raise ValueError('Starting multiplicative factor expected to be greater than 0 and less or equal to 1.')
+ raise ValueError(
+ "Starting multiplicative factor expected to be greater than 0 and less or equal to 1."
+ )
if end_factor > 1.0 or end_factor < 0:
- raise ValueError('Ending multiplicative factor expected to be between 0 and 1.')
+ raise ValueError(
+ "Ending multiplicative factor expected to be between 0 and 1."
+ )
self.start_factor = start_factor
self.end_factor = end_factor
@@ -579,23 +669,44 @@ class LinearLR(LRScheduler):
def get_lr(self):
if not self._get_lr_called_within_step:
- warnings.warn("To get the last learning rate computed by the scheduler, "
- "please use `get_last_lr()`.", UserWarning)
+ warnings.warn(
+ "To get the last learning rate computed by the scheduler, "
+ "please use `get_last_lr()`.",
+ UserWarning,
+ )
if self.last_epoch == 0:
- return [group['lr'] * self.start_factor for group in self.optimizer.param_groups]
+ return [
+ group["lr"] * self.start_factor for group in self.optimizer.param_groups
+ ]
if self.last_epoch > self.total_iters:
- return [group['lr'] for group in self.optimizer.param_groups]
+ return [group["lr"] for group in self.optimizer.param_groups]
- return [group['lr'] * (1. + (self.end_factor - self.start_factor) /
- (self.total_iters * self.start_factor + (self.last_epoch - 1) * (self.end_factor - self.start_factor)))
- for group in self.optimizer.param_groups]
+ return [
+ group["lr"]
+ * (
+ 1.0
+ + (self.end_factor - self.start_factor)
+ / (
+ self.total_iters * self.start_factor
+ + (self.last_epoch - 1) * (self.end_factor - self.start_factor)
+ )
+ )
+ for group in self.optimizer.param_groups
+ ]
def _get_closed_form_lr(self):
- return [base_lr * (self.start_factor +
- (self.end_factor - self.start_factor) * min(self.total_iters, self.last_epoch) / self.total_iters)
- for base_lr in self.base_lrs]
+ return [
+ base_lr
+ * (
+ self.start_factor
+ + (self.end_factor - self.start_factor)
+ * min(self.total_iters, self.last_epoch)
+ / self.total_iters
+ )
+ for base_lr in self.base_lrs
+ ]
class ExponentialLR(LRScheduler):
@@ -620,17 +731,18 @@ class ExponentialLR(LRScheduler):
def get_lr(self):
if not self._get_lr_called_within_step:
- warnings.warn("To get the last learning rate computed by the scheduler, "
- "please use `get_last_lr()`.", UserWarning)
+ warnings.warn(
+ "To get the last learning rate computed by the scheduler, "
+ "please use `get_last_lr()`.",
+ UserWarning,
+ )
if self.last_epoch == 0:
- return [group['lr'] for group in self.optimizer.param_groups]
- return [group['lr'] * self.gamma
- for group in self.optimizer.param_groups]
+ return [group["lr"] for group in self.optimizer.param_groups]
+ return [group["lr"] * self.gamma for group in self.optimizer.param_groups]
def _get_closed_form_lr(self):
- return [base_lr * self.gamma ** self.last_epoch
- for base_lr in self.base_lrs]
+ return [base_lr * self.gamma**self.last_epoch for base_lr in self.base_lrs]
class SequentialLR(LRScheduler):
@@ -666,7 +778,9 @@ class SequentialLR(LRScheduler):
>>> scheduler.step()
"""
- def __init__(self, optimizer, schedulers, milestones, last_epoch=-1, verbose="deprecated"):
+ def __init__(
+ self, optimizer, schedulers, milestones, last_epoch=-1, verbose="deprecated"
+ ):
for scheduler_idx in range(len(schedulers)):
if schedulers[scheduler_idx].optimizer != optimizer:
raise ValueError(
@@ -674,12 +788,12 @@ class SequentialLR(LRScheduler):
f"got schedulers at index {scheduler_idx} to be different than the optimizer passed in."
)
- if (schedulers[scheduler_idx].optimizer != schedulers[0].optimizer):
+ if schedulers[scheduler_idx].optimizer != schedulers[0].optimizer:
raise ValueError(
"Sequential Schedulers expects all schedulers to belong to the same optimizer, but "
f"got schedulers at index {0} and {scheduler_idx} to be different."
)
- if (len(milestones) != len(schedulers) - 1):
+ if len(milestones) != len(schedulers) - 1:
raise ValueError(
"Sequential Schedulers expects number of schedulers provided to be one more "
f"than the number of milestone points, but got number of schedulers {len(schedulers)} and the "
@@ -722,11 +836,15 @@ class SequentialLR(LRScheduler):
is not the optimizer.
The wrapped scheduler states will also be saved.
"""
- state_dict = {key: value for key, value in self.__dict__.items() if key not in ('optimizer', '_schedulers')}
- state_dict['_schedulers'] = [None] * len(self._schedulers)
+ state_dict = {
+ key: value
+ for key, value in self.__dict__.items()
+ if key not in ("optimizer", "_schedulers")
+ }
+ state_dict["_schedulers"] = [None] * len(self._schedulers)
for idx, s in enumerate(self._schedulers):
- state_dict['_schedulers'][idx] = s.state_dict()
+ state_dict["_schedulers"][idx] = s.state_dict()
return state_dict
@@ -737,11 +855,11 @@ class SequentialLR(LRScheduler):
state_dict (dict): scheduler state. Should be an object returned
from a call to :meth:`state_dict`.
"""
- _schedulers = state_dict.pop('_schedulers')
+ _schedulers = state_dict.pop("_schedulers")
self.__dict__.update(state_dict)
# Restore state_dict keys in order to prevent side effects
# https://github.com/pytorch/pytorch/issues/32756
- state_dict['_schedulers'] = _schedulers
+ state_dict["_schedulers"] = _schedulers
for idx, s in enumerate(_schedulers):
self._schedulers[idx].load_state_dict(s)
@@ -776,26 +894,37 @@ class PolynomialLR(LRScheduler):
>>> validate(...)
>>> scheduler.step()
"""
- def __init__(self, optimizer, total_iters=5, power=1.0, last_epoch=-1, verbose="deprecated"):
+
+ def __init__(
+ self, optimizer, total_iters=5, power=1.0, last_epoch=-1, verbose="deprecated"
+ ):
self.total_iters = total_iters
self.power = power
super().__init__(optimizer, last_epoch, verbose)
def get_lr(self):
if not self._get_lr_called_within_step:
- warnings.warn("To get the last learning rate computed by the scheduler, "
- "please use `get_last_lr()`.", UserWarning)
+ warnings.warn(
+ "To get the last learning rate computed by the scheduler, "
+ "please use `get_last_lr()`.",
+ UserWarning,
+ )
if self.last_epoch == 0 or self.last_epoch > self.total_iters:
return [group["lr"] for group in self.optimizer.param_groups]
- decay_factor = ((1.0 - self.last_epoch / self.total_iters) / (1.0 - (self.last_epoch - 1) / self.total_iters)) ** self.power
+ decay_factor = (
+ (1.0 - self.last_epoch / self.total_iters)
+ / (1.0 - (self.last_epoch - 1) / self.total_iters)
+ ) ** self.power
return [group["lr"] * decay_factor for group in self.optimizer.param_groups]
def _get_closed_form_lr(self):
return [
(
- base_lr * (1.0 - min(self.total_iters, self.last_epoch) / self.total_iters) ** self.power
+ base_lr
+ * (1.0 - min(self.total_iters, self.last_epoch) / self.total_iters)
+ ** self.power
)
for base_lr in self.base_lrs
]
@@ -845,37 +974,53 @@ class CosineAnnealingLR(LRScheduler):
https://arxiv.org/abs/1608.03983
"""
- def __init__(self, optimizer, T_max, eta_min=0, last_epoch=-1, verbose="deprecated"):
+ def __init__(
+ self, optimizer, T_max, eta_min=0, last_epoch=-1, verbose="deprecated"
+ ):
self.T_max = T_max
self.eta_min = eta_min
super().__init__(optimizer, last_epoch, verbose)
def get_lr(self):
if not self._get_lr_called_within_step:
- warnings.warn("To get the last learning rate computed by the scheduler, "
- "please use `get_last_lr()`.", UserWarning)
+ warnings.warn(
+ "To get the last learning rate computed by the scheduler, "
+ "please use `get_last_lr()`.",
+ UserWarning,
+ )
if self.last_epoch == 0:
- return [group['lr'] for group in self.optimizer.param_groups]
+ return [group["lr"] for group in self.optimizer.param_groups]
elif self._step_count == 1 and self.last_epoch > 0:
- return [self.eta_min + (base_lr - self.eta_min) *
- (1 + math.cos((self.last_epoch) * math.pi / self.T_max)) / 2
- for base_lr, group in
- zip(self.base_lrs, self.optimizer.param_groups)]
+ return [
+ self.eta_min
+ + (base_lr - self.eta_min)
+ * (1 + math.cos((self.last_epoch) * math.pi / self.T_max))
+ / 2
+ for base_lr, group in zip(self.base_lrs, self.optimizer.param_groups)
+ ]
elif (self.last_epoch - 1 - self.T_max) % (2 * self.T_max) == 0:
- return [group['lr'] + (base_lr - self.eta_min) *
- (1 - math.cos(math.pi / self.T_max)) / 2
- for base_lr, group in
- zip(self.base_lrs, self.optimizer.param_groups)]
- return [(1 + math.cos(math.pi * self.last_epoch / self.T_max)) /
- (1 + math.cos(math.pi * (self.last_epoch - 1) / self.T_max)) *
- (group['lr'] - self.eta_min) + self.eta_min
- for group in self.optimizer.param_groups]
+ return [
+ group["lr"]
+ + (base_lr - self.eta_min) * (1 - math.cos(math.pi / self.T_max)) / 2
+ for base_lr, group in zip(self.base_lrs, self.optimizer.param_groups)
+ ]
+ return [
+ (1 + math.cos(math.pi * self.last_epoch / self.T_max))
+ / (1 + math.cos(math.pi * (self.last_epoch - 1) / self.T_max))
+ * (group["lr"] - self.eta_min)
+ + self.eta_min
+ for group in self.optimizer.param_groups
+ ]
def _get_closed_form_lr(self):
- return [self.eta_min + (base_lr - self.eta_min) *
- (1 + math.cos(math.pi * self.last_epoch / self.T_max)) / 2
- for base_lr in self.base_lrs]
+ return [
+ self.eta_min
+ + (base_lr - self.eta_min)
+ * (1 + math.cos(math.pi * self.last_epoch / self.T_max))
+ / 2
+ for base_lr in self.base_lrs
+ ]
class ChainedScheduler(LRScheduler):
@@ -905,19 +1050,23 @@ class ChainedScheduler(LRScheduler):
def __init__(self, schedulers):
for scheduler_idx in range(1, len(schedulers)):
- if (schedulers[scheduler_idx].optimizer != schedulers[0].optimizer):
+ if schedulers[scheduler_idx].optimizer != schedulers[0].optimizer:
raise ValueError(
"ChainedScheduler expects all schedulers to belong to the same optimizer, but "
f"got schedulers at index {0} and {scheduler_idx} to be different"
)
self._schedulers = list(schedulers)
self.optimizer = schedulers[0].optimizer
- self._last_lr = [group['lr'] for group in self._schedulers[-1].optimizer.param_groups]
+ self._last_lr = [
+ group["lr"] for group in self._schedulers[-1].optimizer.param_groups
+ ]
def step(self):
for scheduler in self._schedulers:
scheduler.step()
- self._last_lr = [group['lr'] for group in self._schedulers[-1].optimizer.param_groups]
+ self._last_lr = [
+ group["lr"] for group in self._schedulers[-1].optimizer.param_groups
+ ]
def state_dict(self):
"""Returns the state of the scheduler as a :class:`dict`.
@@ -926,11 +1075,15 @@ class ChainedScheduler(LRScheduler):
is not the optimizer.
The wrapped scheduler states will also be saved.
"""
- state_dict = {key: value for key, value in self.__dict__.items() if key not in ('optimizer', '_schedulers')}
- state_dict['_schedulers'] = [None] * len(self._schedulers)
+ state_dict = {
+ key: value
+ for key, value in self.__dict__.items()
+ if key not in ("optimizer", "_schedulers")
+ }
+ state_dict["_schedulers"] = [None] * len(self._schedulers)
for idx, s in enumerate(self._schedulers):
- state_dict['_schedulers'][idx] = s.state_dict()
+ state_dict["_schedulers"][idx] = s.state_dict()
return state_dict
@@ -941,11 +1094,11 @@ class ChainedScheduler(LRScheduler):
state_dict (dict): scheduler state. Should be an object returned
from a call to :meth:`state_dict`.
"""
- _schedulers = state_dict.pop('_schedulers')
+ _schedulers = state_dict.pop("_schedulers")
self.__dict__.update(state_dict)
# Restore state_dict keys in order to prevent side effects
# https://github.com/pytorch/pytorch/issues/32756
- state_dict['_schedulers'] = _schedulers
+ state_dict["_schedulers"] = _schedulers
for idx, s in enumerate(_schedulers):
self._schedulers[idx].load_state_dict(s)
@@ -1011,22 +1164,33 @@ class ReduceLROnPlateau(LRScheduler):
>>> scheduler.step(val_loss)
"""
- def __init__(self, optimizer, mode='min', factor=0.1, patience=10,
- threshold=1e-4, threshold_mode='rel', cooldown=0,
- min_lr=0, eps=1e-8, verbose="deprecated"):
-
+ def __init__(
+ self,
+ optimizer,
+ mode="min",
+ factor=0.1,
+ patience=10,
+ threshold=1e-4,
+ threshold_mode="rel",
+ cooldown=0,
+ min_lr=0,
+ eps=1e-8,
+ verbose="deprecated",
+ ):
if factor >= 1.0:
- raise ValueError('Factor should be < 1.0.')
+ raise ValueError("Factor should be < 1.0.")
self.factor = factor
# Attach optimizer
if not isinstance(optimizer, Optimizer):
- raise TypeError(f'{type(optimizer).__name__} is not an Optimizer')
+ raise TypeError(f"{type(optimizer).__name__} is not an Optimizer")
self.optimizer = optimizer
if isinstance(min_lr, (list, tuple)):
if len(min_lr) != len(optimizer.param_groups):
- raise ValueError(f"expected {len(optimizer.param_groups)} min_lrs, got {len(min_lr)}")
+ raise ValueError(
+ f"expected {len(optimizer.param_groups)} min_lrs, got {len(min_lr)}"
+ )
self.min_lrs = list(min_lr)
else:
self.min_lrs = [min_lr] * len(optimizer.param_groups)
@@ -1044,9 +1208,10 @@ class ReduceLROnPlateau(LRScheduler):
self.mode_worse = None # the worse value for the chosen mode
self.eps = eps
self.last_epoch = 0
- self._last_lr = [group['lr'] for group in self.optimizer.param_groups]
- self._init_is_better(mode=mode, threshold=threshold,
- threshold_mode=threshold_mode)
+ self._last_lr = [group["lr"] for group in self.optimizer.param_groups]
+ self._init_is_better(
+ mode=mode, threshold=threshold, threshold_mode=threshold_mode
+ )
self._reset()
def _reset(self):
@@ -1079,41 +1244,41 @@ class ReduceLROnPlateau(LRScheduler):
self.cooldown_counter = self.cooldown
self.num_bad_epochs = 0
- self._last_lr = [group['lr'] for group in self.optimizer.param_groups]
+ self._last_lr = [group["lr"] for group in self.optimizer.param_groups]
def _reduce_lr(self, epoch):
for i, param_group in enumerate(self.optimizer.param_groups):
- old_lr = float(param_group['lr'])
+ old_lr = float(param_group["lr"])
new_lr = max(old_lr * self.factor, self.min_lrs[i])
if old_lr - new_lr > self.eps:
- param_group['lr'] = new_lr
+ param_group["lr"] = new_lr
@property
def in_cooldown(self):
return self.cooldown_counter > 0
def is_better(self, a, best):
- if self.mode == 'min' and self.threshold_mode == 'rel':
- rel_epsilon = 1. - self.threshold
+ if self.mode == "min" and self.threshold_mode == "rel":
+ rel_epsilon = 1.0 - self.threshold
return a < best * rel_epsilon
- elif self.mode == 'min' and self.threshold_mode == 'abs':
+ elif self.mode == "min" and self.threshold_mode == "abs":
return a < best - self.threshold
- elif self.mode == 'max' and self.threshold_mode == 'rel':
- rel_epsilon = self.threshold + 1.
+ elif self.mode == "max" and self.threshold_mode == "rel":
+ rel_epsilon = self.threshold + 1.0
return a > best * rel_epsilon
else: # mode == 'max' and epsilon_mode == 'abs':
return a > best + self.threshold
def _init_is_better(self, mode, threshold, threshold_mode):
- if mode not in {'min', 'max'}:
- raise ValueError('mode ' + mode + ' is unknown!')
- if threshold_mode not in {'rel', 'abs'}:
- raise ValueError('threshold mode ' + threshold_mode + ' is unknown!')
+ if mode not in {"min", "max"}:
+ raise ValueError("mode " + mode + " is unknown!")
+ if threshold_mode not in {"rel", "abs"}:
+ raise ValueError("threshold mode " + threshold_mode + " is unknown!")
- if mode == 'min':
+ if mode == "min":
self.mode_worse = inf
else: # mode == 'max':
self.mode_worse = -inf
@@ -1123,11 +1288,15 @@ class ReduceLROnPlateau(LRScheduler):
self.threshold_mode = threshold_mode
def state_dict(self):
- return {key: value for key, value in self.__dict__.items() if key != 'optimizer'}
+ return {
+ key: value for key, value in self.__dict__.items() if key != "optimizer"
+ }
def load_state_dict(self, state_dict):
self.__dict__.update(state_dict)
- self._init_is_better(mode=self.mode, threshold=self.threshold, threshold_mode=self.threshold_mode)
+ self._init_is_better(
+ mode=self.mode, threshold=self.threshold, threshold_mode=self.threshold_mode
+ )
class CyclicLR(LRScheduler):
@@ -1229,42 +1398,44 @@ class CyclicLR(LRScheduler):
.. _bckenstler/CLR: https://github.com/bckenstler/CLR
"""
- def __init__(self,
- optimizer,
- base_lr,
- max_lr,
- step_size_up=2000,
- step_size_down=None,
- mode='triangular',
- gamma=1.,
- scale_fn=None,
- scale_mode='cycle',
- cycle_momentum=True,
- base_momentum=0.8,
- max_momentum=0.9,
- last_epoch=-1,
- verbose="deprecated"):
-
+ def __init__(
+ self,
+ optimizer,
+ base_lr,
+ max_lr,
+ step_size_up=2000,
+ step_size_down=None,
+ mode="triangular",
+ gamma=1.0,
+ scale_fn=None,
+ scale_mode="cycle",
+ cycle_momentum=True,
+ base_momentum=0.8,
+ max_momentum=0.9,
+ last_epoch=-1,
+ verbose="deprecated",
+ ):
# Attach optimizer
if not isinstance(optimizer, Optimizer):
- raise TypeError(f'{type(optimizer).__name__} is not an Optimizer')
+ raise TypeError(f"{type(optimizer).__name__} is not an Optimizer")
self.optimizer = optimizer
- base_lrs = self._format_param('base_lr', optimizer, base_lr)
+ base_lrs = self._format_param("base_lr", optimizer, base_lr)
if last_epoch == -1:
for lr, group in zip(base_lrs, optimizer.param_groups):
- group['lr'] = lr
+ group["lr"] = lr
- self.max_lrs = self._format_param('max_lr', optimizer, max_lr)
+ self.max_lrs = self._format_param("max_lr", optimizer, max_lr)
step_size_up = float(step_size_up)
- step_size_down = float(step_size_down) if step_size_down is not None else step_size_up
+ step_size_down = (
+ float(step_size_down) if step_size_down is not None else step_size_up
+ )
self.total_size = step_size_up + step_size_down
self.step_ratio = step_size_up / self.total_size
- if mode not in ['triangular', 'triangular2', 'exp_range'] \
- and scale_fn is None:
- raise ValueError('mode is invalid and scale_fn is None')
+ if mode not in ["triangular", "triangular2", "exp_range"] and scale_fn is None:
+ raise ValueError("mode is invalid and scale_fn is None")
self.mode = mode
self.gamma = gamma
@@ -1276,20 +1447,31 @@ class CyclicLR(LRScheduler):
self.cycle_momentum = cycle_momentum
if cycle_momentum:
- if 'momentum' not in optimizer.defaults and 'betas' not in optimizer.defaults:
- raise ValueError('optimizer must support momentum or beta1 with `cycle_momentum` option enabled')
+ if (
+ "momentum" not in optimizer.defaults
+ and "betas" not in optimizer.defaults
+ ):
+ raise ValueError(
+ "optimizer must support momentum or beta1 with `cycle_momentum` option enabled"
+ )
- self.use_beta1 = 'betas' in self.optimizer.defaults
- self.base_momentums = self._format_param('base_momentum', optimizer, base_momentum)
- self.max_momentums = self._format_param('max_momentum', optimizer, max_momentum)
+ self.use_beta1 = "betas" in self.optimizer.defaults
+ self.base_momentums = self._format_param(
+ "base_momentum", optimizer, base_momentum
+ )
+ self.max_momentums = self._format_param(
+ "max_momentum", optimizer, max_momentum
+ )
if last_epoch == -1:
- for m_momentum, b_momentum, group in zip(self.max_momentums, self.base_momentums, optimizer.param_groups):
+ for m_momentum, b_momentum, group in zip(
+ self.max_momentums, self.base_momentums, optimizer.param_groups
+ ):
if self.use_beta1:
- group['betas'] = (m_momentum, *group['betas'][1:])
+ group["betas"] = (m_momentum, *group["betas"][1:])
else:
- group['momentum'] = m_momentum
- group['max_momentum'] = m_momentum
- group['base_momentum'] = b_momentum
+ group["momentum"] = m_momentum
+ group["max_momentum"] = m_momentum
+ group["base_momentum"] = b_momentum
super().__init__(optimizer, last_epoch, verbose)
self.base_lrs = base_lrs
@@ -1297,21 +1479,23 @@ class CyclicLR(LRScheduler):
def _init_scale_fn(self):
if self._scale_fn_custom is not None:
return
- if self.mode == 'triangular':
+ if self.mode == "triangular":
self._scale_fn_ref = self._triangular_scale_fn
- self.scale_mode = 'cycle'
- elif self.mode == 'triangular2':
+ self.scale_mode = "cycle"
+ elif self.mode == "triangular2":
self._scale_fn_ref = self._triangular2_scale_fn
- self.scale_mode = 'cycle'
- elif self.mode == 'exp_range':
+ self.scale_mode = "cycle"
+ elif self.mode == "exp_range":
self._scale_fn_ref = partial(self._exp_range_scale_fn, self.gamma)
- self.scale_mode = 'iterations'
+ self.scale_mode = "iterations"
def _format_param(self, name, optimizer, param):
"""Return correctly formatted lr/momentum for each param group."""
if isinstance(param, (list, tuple)):
if len(param) != len(optimizer.param_groups):
- raise ValueError(f"expected {len(optimizer.param_groups)} values for {name}, got {len(param)}")
+ raise ValueError(
+ f"expected {len(optimizer.param_groups)} values for {name}, got {len(param)}"
+ )
return param
else:
return [param] * len(optimizer.param_groups)
@@ -1324,15 +1508,15 @@ class CyclicLR(LRScheduler):
@staticmethod
def _triangular_scale_fn(x):
- return 1.
+ return 1.0
@staticmethod
def _triangular2_scale_fn(x):
- return 1 / (2. ** (x - 1))
+ return 1 / (2.0 ** (x - 1))
@staticmethod
def _exp_range_scale_fn(gamma, x):
- return gamma ** x
+ return gamma**x
def get_lr(self):
"""Calculates the learning rate at batch index. This function treats
@@ -1343,11 +1527,14 @@ class CyclicLR(LRScheduler):
"""
if not self._get_lr_called_within_step:
- warnings.warn("To get the last learning rate computed by the scheduler, "
- "please use `get_last_lr()`.", UserWarning)
+ warnings.warn(
+ "To get the last learning rate computed by the scheduler, "
+ "please use `get_last_lr()`.",
+ UserWarning,
+ )
cycle = math.floor(1 + self.last_epoch / self.total_size)
- x = 1. + self.last_epoch / self.total_size - cycle
+ x = 1.0 + self.last_epoch / self.total_size - cycle
if x <= self.step_ratio:
scale_factor = x / self.step_ratio
else:
@@ -1356,7 +1543,7 @@ class CyclicLR(LRScheduler):
lrs = []
for base_lr, max_lr in zip(self.base_lrs, self.max_lrs):
base_height = (max_lr - base_lr) * scale_factor
- if self.scale_mode == 'cycle':
+ if self.scale_mode == "cycle":
lr = base_lr + base_height * self.scale_fn(cycle)
else:
lr = base_lr + base_height * self.scale_fn(self.last_epoch)
@@ -1364,18 +1551,22 @@ class CyclicLR(LRScheduler):
if self.cycle_momentum:
momentums = []
- for base_momentum, max_momentum in zip(self.base_momentums, self.max_momentums):
+ for base_momentum, max_momentum in zip(
+ self.base_momentums, self.max_momentums
+ ):
base_height = (max_momentum - base_momentum) * scale_factor
- if self.scale_mode == 'cycle':
+ if self.scale_mode == "cycle":
momentum = max_momentum - base_height * self.scale_fn(cycle)
else:
- momentum = max_momentum - base_height * self.scale_fn(self.last_epoch)
+ momentum = max_momentum - base_height * self.scale_fn(
+ self.last_epoch
+ )
momentums.append(momentum)
for param_group, momentum in zip(self.optimizer.param_groups, momentums):
if self.use_beta1:
- param_group['betas'] = (momentum, *param_group['betas'][1:])
+ param_group["betas"] = (momentum, *param_group["betas"][1:])
else:
- param_group['momentum'] = momentum
+ param_group["momentum"] = momentum
return lrs
@@ -1383,18 +1574,18 @@ class CyclicLR(LRScheduler):
state = super().state_dict()
# We are dropping the `_scale_fn_ref` attribute because it is a
# `weakref.WeakMethod` and can't be pickled.
- state.pop('_scale_fn_ref')
- fn = state.pop('_scale_fn_custom')
- state['_scale_fn_custom'] = None
+ state.pop("_scale_fn_ref")
+ fn = state.pop("_scale_fn_custom")
+ state["_scale_fn_custom"] = None
if fn is not None and not isinstance(fn, types.FunctionType):
# The _scale_fn_custom will only be saved if it is a callable object
# and not if it is a function or lambda.
- state['_scale_fn_custom'] = fn.__dict__.copy()
+ state["_scale_fn_custom"] = fn.__dict__.copy()
return state
def load_state_dict(self, state_dict):
- fn = state_dict.pop('_scale_fn_custom')
+ fn = state_dict.pop("_scale_fn_custom")
super().load_state_dict(state_dict)
if fn is not None:
self._scale_fn_custom.__dict__.update(fn)
@@ -1434,13 +1625,17 @@ class CosineAnnealingWarmRestarts(LRScheduler):
https://arxiv.org/abs/1608.03983
"""
- def __init__(self, optimizer, T_0, T_mult=1, eta_min=0, last_epoch=-1, verbose="deprecated"):
+ def __init__(
+ self, optimizer, T_0, T_mult=1, eta_min=0, last_epoch=-1, verbose="deprecated"
+ ):
if T_0 <= 0 or not isinstance(T_0, int):
raise ValueError(f"Expected positive integer T_0, but got {T_0}")
if T_mult < 1 or not isinstance(T_mult, int):
raise ValueError(f"Expected integer T_mult >= 1, but got {T_mult}")
if not isinstance(eta_min, (float, int)):
- raise ValueError(f"Expected float or int eta_min, but got {eta_min} of type {type(eta_min)}")
+ raise ValueError(
+ f"Expected float or int eta_min, but got {eta_min} of type {type(eta_min)}"
+ )
self.T_0 = T_0
self.T_i = T_0
self.T_mult = T_mult
@@ -1450,11 +1645,19 @@ class CosineAnnealingWarmRestarts(LRScheduler):
def get_lr(self):
if not self._get_lr_called_within_step:
- warnings.warn("To get the last learning rate computed by the scheduler, "
- "please use `get_last_lr()`.", UserWarning)
+ warnings.warn(
+ "To get the last learning rate computed by the scheduler, "
+ "please use `get_last_lr()`.",
+ UserWarning,
+ )
- return [self.eta_min + (base_lr - self.eta_min) * (1 + math.cos(math.pi * self.T_cur / self.T_i)) / 2
- for base_lr in self.base_lrs]
+ return [
+ self.eta_min
+ + (base_lr - self.eta_min)
+ * (1 + math.cos(math.pi * self.T_cur / self.T_i))
+ / 2
+ for base_lr in self.base_lrs
+ ]
def step(self, epoch=None):
"""Step could be called after every batch update
@@ -1500,8 +1703,14 @@ class CosineAnnealingWarmRestarts(LRScheduler):
if self.T_mult == 1:
self.T_cur = epoch % self.T_0
else:
- n = int(math.log((epoch / self.T_0 * (self.T_mult - 1) + 1), self.T_mult))
- self.T_cur = epoch - self.T_0 * (self.T_mult ** n - 1) / (self.T_mult - 1)
+ n = int(
+ math.log(
+ (epoch / self.T_0 * (self.T_mult - 1) + 1), self.T_mult
+ )
+ )
+ self.T_cur = epoch - self.T_0 * (self.T_mult**n - 1) / (
+ self.T_mult - 1
+ )
self.T_i = self.T_0 * self.T_mult ** (n)
else:
self.T_i = self.T_0
@@ -1509,7 +1718,6 @@ class CosineAnnealingWarmRestarts(LRScheduler):
self.last_epoch = math.floor(epoch)
class _enable_get_lr_call:
-
def __init__(self, o):
self.o = o
@@ -1524,9 +1732,9 @@ class CosineAnnealingWarmRestarts(LRScheduler):
with _enable_get_lr_call(self):
for i, data in enumerate(zip(self.optimizer.param_groups, self.get_lr())):
param_group, lr = data
- param_group['lr'] = lr
+ param_group["lr"] = lr
- self._last_lr = [group['lr'] for group in self.optimizer.param_groups]
+ self._last_lr = [group["lr"] for group in self.optimizer.param_groups]
class OneCycleLR(LRScheduler):
@@ -1635,120 +1843,141 @@ class OneCycleLR(LRScheduler):
.. _Super-Convergence\: Very Fast Training of Neural Networks Using Large Learning Rates:
https://arxiv.org/abs/1708.07120
"""
- def __init__(self,
- optimizer,
- max_lr,
- total_steps=None,
- epochs=None,
- steps_per_epoch=None,
- pct_start=0.3,
- anneal_strategy='cos',
- cycle_momentum=True,
- base_momentum=0.85,
- max_momentum=0.95,
- div_factor=25.,
- final_div_factor=1e4,
- three_phase=False,
- last_epoch=-1,
- verbose="deprecated"):
+ def __init__(
+ self,
+ optimizer,
+ max_lr,
+ total_steps=None,
+ epochs=None,
+ steps_per_epoch=None,
+ pct_start=0.3,
+ anneal_strategy="cos",
+ cycle_momentum=True,
+ base_momentum=0.85,
+ max_momentum=0.95,
+ div_factor=25.0,
+ final_div_factor=1e4,
+ three_phase=False,
+ last_epoch=-1,
+ verbose="deprecated",
+ ):
# Validate optimizer
if not isinstance(optimizer, Optimizer):
- raise TypeError(f'{type(optimizer).__name__} is not an Optimizer')
+ raise TypeError(f"{type(optimizer).__name__} is not an Optimizer")
self.optimizer = optimizer
# Validate total_steps
if total_steps is None and epochs is None and steps_per_epoch is None:
- raise ValueError("You must define either total_steps OR (epochs AND steps_per_epoch)")
+ raise ValueError(
+ "You must define either total_steps OR (epochs AND steps_per_epoch)"
+ )
elif total_steps is not None:
if total_steps <= 0 or not isinstance(total_steps, int):
- raise ValueError(f"Expected positive integer total_steps, but got {total_steps}")
+ raise ValueError(
+ f"Expected positive integer total_steps, but got {total_steps}"
+ )
self.total_steps = total_steps
else:
if epochs <= 0 or not isinstance(epochs, int):
raise ValueError(f"Expected positive integer epochs, but got {epochs}")
if steps_per_epoch <= 0 or not isinstance(steps_per_epoch, int):
- raise ValueError(f"Expected positive integer steps_per_epoch, but got {steps_per_epoch}")
+ raise ValueError(
+ f"Expected positive integer steps_per_epoch, but got {steps_per_epoch}"
+ )
self.total_steps = epochs * steps_per_epoch
if three_phase:
self._schedule_phases = [
{
- 'end_step': float(pct_start * self.total_steps) - 1,
- 'start_lr': 'initial_lr',
- 'end_lr': 'max_lr',
- 'start_momentum': 'max_momentum',
- 'end_momentum': 'base_momentum',
+ "end_step": float(pct_start * self.total_steps) - 1,
+ "start_lr": "initial_lr",
+ "end_lr": "max_lr",
+ "start_momentum": "max_momentum",
+ "end_momentum": "base_momentum",
},
{
- 'end_step': float(2 * pct_start * self.total_steps) - 2,
- 'start_lr': 'max_lr',
- 'end_lr': 'initial_lr',
- 'start_momentum': 'base_momentum',
- 'end_momentum': 'max_momentum',
+ "end_step": float(2 * pct_start * self.total_steps) - 2,
+ "start_lr": "max_lr",
+ "end_lr": "initial_lr",
+ "start_momentum": "base_momentum",
+ "end_momentum": "max_momentum",
},
{
- 'end_step': self.total_steps - 1,
- 'start_lr': 'initial_lr',
- 'end_lr': 'min_lr',
- 'start_momentum': 'max_momentum',
- 'end_momentum': 'max_momentum',
+ "end_step": self.total_steps - 1,
+ "start_lr": "initial_lr",
+ "end_lr": "min_lr",
+ "start_momentum": "max_momentum",
+ "end_momentum": "max_momentum",
},
]
else:
self._schedule_phases = [
{
- 'end_step': float(pct_start * self.total_steps) - 1,
- 'start_lr': 'initial_lr',
- 'end_lr': 'max_lr',
- 'start_momentum': 'max_momentum',
- 'end_momentum': 'base_momentum',
+ "end_step": float(pct_start * self.total_steps) - 1,
+ "start_lr": "initial_lr",
+ "end_lr": "max_lr",
+ "start_momentum": "max_momentum",
+ "end_momentum": "base_momentum",
},
{
- 'end_step': self.total_steps - 1,
- 'start_lr': 'max_lr',
- 'end_lr': 'min_lr',
- 'start_momentum': 'base_momentum',
- 'end_momentum': 'max_momentum',
+ "end_step": self.total_steps - 1,
+ "start_lr": "max_lr",
+ "end_lr": "min_lr",
+ "start_momentum": "base_momentum",
+ "end_momentum": "max_momentum",
},
]
# Validate pct_start
if pct_start < 0 or pct_start > 1 or not isinstance(pct_start, float):
- raise ValueError(f"Expected float between 0 and 1 pct_start, but got {pct_start}")
+ raise ValueError(
+ f"Expected float between 0 and 1 pct_start, but got {pct_start}"
+ )
# Validate anneal_strategy
- if anneal_strategy not in ['cos', 'linear']:
- raise ValueError(f"anneal_strategy must by one of 'cos' or 'linear', instead got {anneal_strategy}")
- elif anneal_strategy == 'cos':
+ if anneal_strategy not in ["cos", "linear"]:
+ raise ValueError(
+ f"anneal_strategy must by one of 'cos' or 'linear', instead got {anneal_strategy}"
+ )
+ elif anneal_strategy == "cos":
self.anneal_func = self._annealing_cos
- elif anneal_strategy == 'linear':
+ elif anneal_strategy == "linear":
self.anneal_func = self._annealing_linear
# Initialize learning rate variables
- max_lrs = self._format_param('max_lr', self.optimizer, max_lr)
+ max_lrs = self._format_param("max_lr", self.optimizer, max_lr)
if last_epoch == -1:
for idx, group in enumerate(self.optimizer.param_groups):
- group['initial_lr'] = max_lrs[idx] / div_factor
- group['max_lr'] = max_lrs[idx]
- group['min_lr'] = group['initial_lr'] / final_div_factor
+ group["initial_lr"] = max_lrs[idx] / div_factor
+ group["max_lr"] = max_lrs[idx]
+ group["min_lr"] = group["initial_lr"] / final_div_factor
# Initialize momentum variables
self.cycle_momentum = cycle_momentum
if self.cycle_momentum:
- if 'momentum' not in self.optimizer.defaults and 'betas' not in self.optimizer.defaults:
- raise ValueError('optimizer must support momentum or beta1 with `cycle_momentum` option enabled')
- self.use_beta1 = 'betas' in self.optimizer.defaults
- max_momentums = self._format_param('max_momentum', optimizer, max_momentum)
- base_momentums = self._format_param('base_momentum', optimizer, base_momentum)
+ if (
+ "momentum" not in self.optimizer.defaults
+ and "betas" not in self.optimizer.defaults
+ ):
+ raise ValueError(
+ "optimizer must support momentum or beta1 with `cycle_momentum` option enabled"
+ )
+ self.use_beta1 = "betas" in self.optimizer.defaults
+ max_momentums = self._format_param("max_momentum", optimizer, max_momentum)
+ base_momentums = self._format_param(
+ "base_momentum", optimizer, base_momentum
+ )
if last_epoch == -1:
- for m_momentum, b_momentum, group in zip(max_momentums, base_momentums, optimizer.param_groups):
+ for m_momentum, b_momentum, group in zip(
+ max_momentums, base_momentums, optimizer.param_groups
+ ):
if self.use_beta1:
- group['betas'] = (m_momentum, *group['betas'][1:])
+ group["betas"] = (m_momentum, *group["betas"][1:])
else:
- group['momentum'] = m_momentum
- group['max_momentum'] = m_momentum
- group['base_momentum'] = b_momentum
+ group["momentum"] = m_momentum
+ group["max_momentum"] = m_momentum
+ group["base_momentum"] = b_momentum
super().__init__(optimizer, last_epoch, verbose)
@@ -1756,7 +1985,9 @@ class OneCycleLR(LRScheduler):
"""Return correctly formatted lr/momentum for each param group."""
if isinstance(param, (list, tuple)):
if len(param) != len(optimizer.param_groups):
- raise ValueError(f"expected {len(optimizer.param_groups)} values for {name}, got {len(param)}")
+ raise ValueError(
+ f"expected {len(optimizer.param_groups)} values for {name}, got {len(param)}"
+ )
return param
else:
return [param] * len(optimizer.param_groups)
@@ -1774,8 +2005,11 @@ class OneCycleLR(LRScheduler):
def get_lr(self):
if not self._get_lr_called_within_step:
- warnings.warn("To get the last learning rate computed by the scheduler, "
- "please use `get_last_lr()`.", UserWarning)
+ warnings.warn(
+ "To get the last learning rate computed by the scheduler, "
+ "please use `get_last_lr()`.",
+ UserWarning,
+ )
lrs = []
step_num = self.last_epoch
@@ -1788,20 +2022,26 @@ class OneCycleLR(LRScheduler):
for group in self.optimizer.param_groups:
start_step = 0
for i, phase in enumerate(self._schedule_phases):
- end_step = phase['end_step']
+ end_step = phase["end_step"]
if step_num <= end_step or i == len(self._schedule_phases) - 1:
pct = (step_num - start_step) / (end_step - start_step)
- computed_lr = self.anneal_func(group[phase['start_lr']], group[phase['end_lr']], pct)
+ computed_lr = self.anneal_func(
+ group[phase["start_lr"]], group[phase["end_lr"]], pct
+ )
if self.cycle_momentum:
- computed_momentum = self.anneal_func(group[phase['start_momentum']], group[phase['end_momentum']], pct)
+ computed_momentum = self.anneal_func(
+ group[phase["start_momentum"]],
+ group[phase["end_momentum"]],
+ pct,
+ )
break
- start_step = phase['end_step']
+ start_step = phase["end_step"]
lrs.append(computed_lr)
if self.cycle_momentum:
if self.use_beta1:
- group['betas'] = (computed_momentum, *group['betas'][1:])
+ group["betas"] = (computed_momentum, *group["betas"][1:])
else:
- group['momentum'] = computed_momentum
+ group["momentum"] = computed_momentum
return lrs
diff --git a/torch/optim/nadam.py b/torch/optim/nadam.py
index f05b6b0ae7..190b1a64a0 100644
--- a/torch/optim/nadam.py
+++ b/torch/optim/nadam.py
@@ -1,17 +1,39 @@
+from typing import List, Optional
+
import torch
from torch import Tensor
-from .optimizer import (Optimizer, _use_grad_for_differentiable, _get_value, _dispatch_sqrt,
- _stack_if_compiling, _get_scalar_dtype, _default_to_fused_or_foreach,
- _view_as_real, _capturable_doc, _differentiable_doc, _foreach_doc,)
-from typing import List, Optional
+from .optimizer import (
+ _capturable_doc,
+ _default_to_fused_or_foreach,
+ _differentiable_doc,
+ _dispatch_sqrt,
+ _foreach_doc,
+ _get_scalar_dtype,
+ _get_value,
+ _stack_if_compiling,
+ _use_grad_for_differentiable,
+ _view_as_real,
+ Optimizer,
+)
+
+__all__ = ["NAdam", "nadam"]
-__all__ = ['NAdam', 'nadam']
class NAdam(Optimizer):
- def __init__(self, params, lr=2e-3, betas=(0.9, 0.999), eps=1e-8,
- weight_decay=0, momentum_decay=4e-3, decoupled_weight_decay: bool = False,
- *, foreach: Optional[bool] = None, capturable: bool = False,
- differentiable: bool = False):
+ def __init__(
+ self,
+ params,
+ lr=2e-3,
+ betas=(0.9, 0.999),
+ eps=1e-8,
+ weight_decay=0,
+ momentum_decay=4e-3,
+ decoupled_weight_decay: bool = False,
+ *,
+ foreach: Optional[bool] = None,
+ capturable: bool = False,
+ differentiable: bool = False,
+ ):
if not 0.0 <= lr:
raise ValueError(f"Invalid learning rate: {lr}")
if not 0.0 <= eps:
@@ -24,40 +46,65 @@ class NAdam(Optimizer):
raise ValueError(f"Invalid weight_decay value: {weight_decay}")
if not 0.0 <= momentum_decay:
raise ValueError(f"Invalid momentum_decay value: {momentum_decay}")
- defaults = dict(lr=lr, betas=betas, eps=eps,
- weight_decay=weight_decay, momentum_decay=momentum_decay,
- decoupled_weight_decay=decoupled_weight_decay,
- foreach=foreach, capturable=capturable, differentiable=differentiable)
+ defaults = dict(
+ lr=lr,
+ betas=betas,
+ eps=eps,
+ weight_decay=weight_decay,
+ momentum_decay=momentum_decay,
+ decoupled_weight_decay=decoupled_weight_decay,
+ foreach=foreach,
+ capturable=capturable,
+ differentiable=differentiable,
+ )
super().__init__(params, defaults)
def __setstate__(self, state):
super().__setstate__(state)
for group in self.param_groups:
- group.setdefault('foreach', None)
- group.setdefault('capturable', False)
- group.setdefault('differentiable', False)
- group.setdefault('decoupled_weight_decay', False)
+ group.setdefault("foreach", None)
+ group.setdefault("capturable", False)
+ group.setdefault("differentiable", False)
+ group.setdefault("decoupled_weight_decay", False)
for p in group["params"]:
p_state = self.state.get(p, [])
if len(p_state) != 0:
- if not torch.is_tensor(p_state['step']):
+ if not torch.is_tensor(p_state["step"]):
step_val = float(p_state["step"])
- p_state["step"] = (torch.tensor(step_val, dtype=_get_scalar_dtype(), device=p.device)
- if group['capturable'] else torch.tensor(step_val, dtype=_get_scalar_dtype()))
- if not torch.is_tensor(p_state['mu_product']):
+ p_state["step"] = (
+ torch.tensor(
+ step_val, dtype=_get_scalar_dtype(), device=p.device
+ )
+ if group["capturable"]
+ else torch.tensor(step_val, dtype=_get_scalar_dtype())
+ )
+ if not torch.is_tensor(p_state["mu_product"]):
mu_prod_val = p_state["mu_product"]
- p_state["mu_product"] = (torch.tensor(mu_prod_val, dtype=_get_scalar_dtype(), device=p.device)
- if group['capturable'] else torch.tensor(mu_prod_val, dtype=_get_scalar_dtype()))
-
-
- def _init_group(self, group, params_with_grad, grads, exp_avgs, exp_avg_sqs, mu_products, state_steps):
+ p_state["mu_product"] = (
+ torch.tensor(
+ mu_prod_val, dtype=_get_scalar_dtype(), device=p.device
+ )
+ if group["capturable"]
+ else torch.tensor(mu_prod_val, dtype=_get_scalar_dtype())
+ )
+
+ def _init_group(
+ self,
+ group,
+ params_with_grad,
+ grads,
+ exp_avgs,
+ exp_avg_sqs,
+ mu_products,
+ state_steps,
+ ):
has_complex = False
- for p in group['params']:
+ for p in group["params"]:
if p.grad is not None:
has_complex |= torch.is_complex(p)
params_with_grad.append(p)
if p.grad.is_sparse:
- raise RuntimeError('NAdam does not support sparse gradients')
+ raise RuntimeError("NAdam does not support sparse gradients")
grads.append(p.grad)
state = self.state[p]
@@ -66,23 +113,29 @@ class NAdam(Optimizer):
# note(crcrpar): [special device hosting for step]
# Deliberately host `step` and `mu_product` on CPU if capturable is False.
# This is because kernel launches are costly on CUDA and XLA.
- state['step'] = (
+ state["step"] = (
torch.zeros((), dtype=_get_scalar_dtype(), device=p.device)
- if group['capturable'] else torch.tensor(0.0, dtype=_get_scalar_dtype())
+ if group["capturable"]
+ else torch.tensor(0.0, dtype=_get_scalar_dtype())
)
- state['mu_product'] = (
+ state["mu_product"] = (
torch.ones((), dtype=_get_scalar_dtype(), device=p.device)
- if group['capturable'] else torch.tensor(1.0, dtype=_get_scalar_dtype())
+ if group["capturable"]
+ else torch.tensor(1.0, dtype=_get_scalar_dtype())
)
# Exponential moving average of gradient values
- state['exp_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format)
+ state["exp_avg"] = torch.zeros_like(
+ p, memory_format=torch.preserve_format
+ )
# Exponential moving average of squared gradient values
- state['exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format)
+ state["exp_avg_sq"] = torch.zeros_like(
+ p, memory_format=torch.preserve_format
+ )
- exp_avgs.append(state['exp_avg'])
- exp_avg_sqs.append(state['exp_avg_sq'])
- mu_products.append(state['mu_product'])
- state_steps.append(state['step'])
+ exp_avgs.append(state["exp_avg"])
+ exp_avg_sqs.append(state["exp_avg_sq"])
+ mu_products.append(state["mu_product"])
+ state_steps.append(state["step"])
return has_complex
@_use_grad_for_differentiable
@@ -107,31 +160,43 @@ class NAdam(Optimizer):
exp_avg_sqs = []
mu_products = []
state_steps = []
- beta1, beta2 = group['betas']
-
- has_complex = self._init_group(group, params_with_grad, grads, exp_avgs, exp_avg_sqs, mu_products, state_steps)
-
- nadam(params_with_grad,
- grads,
- exp_avgs,
- exp_avg_sqs,
- mu_products,
- state_steps,
- beta1=beta1,
- beta2=beta2,
- lr=group['lr'],
- weight_decay=group['weight_decay'],
- momentum_decay=group['momentum_decay'],
- eps=group['eps'],
- decoupled_weight_decay=group['decoupled_weight_decay'],
- foreach=group['foreach'],
- capturable=group['capturable'],
- differentiable=group['differentiable'],
- has_complex=has_complex)
+ beta1, beta2 = group["betas"]
+
+ has_complex = self._init_group(
+ group,
+ params_with_grad,
+ grads,
+ exp_avgs,
+ exp_avg_sqs,
+ mu_products,
+ state_steps,
+ )
+
+ nadam(
+ params_with_grad,
+ grads,
+ exp_avgs,
+ exp_avg_sqs,
+ mu_products,
+ state_steps,
+ beta1=beta1,
+ beta2=beta2,
+ lr=group["lr"],
+ weight_decay=group["weight_decay"],
+ momentum_decay=group["momentum_decay"],
+ eps=group["eps"],
+ decoupled_weight_decay=group["decoupled_weight_decay"],
+ foreach=group["foreach"],
+ capturable=group["capturable"],
+ differentiable=group["differentiable"],
+ has_complex=has_complex,
+ )
return loss
-NAdam.__doc__ = r"""Implements NAdam algorithm.
+
+NAdam.__doc__ = (
+ r"""Implements NAdam algorithm.
.. math::
\begin{aligned}
@@ -166,7 +231,8 @@ NAdam.__doc__ = r"""Implements NAdam algorithm.
\end{aligned}
For further details regarding the algorithm we refer to `Incorporating Nesterov Momentum into Adam`_.
- """ + fr"""
+ """
+ + rf"""
Args:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
@@ -189,87 +255,98 @@ NAdam.__doc__ = r"""Implements NAdam algorithm.
https://arxiv.org/abs/1711.05101
"""
-
-
-def nadam(params: List[Tensor],
- grads: List[Tensor],
- exp_avgs: List[Tensor],
- exp_avg_sqs: List[Tensor],
- mu_products: List[Tensor],
- state_steps: List[Tensor],
- # kwonly args with defaults are not supported by functions compiled with torchscript issue #70627
- # setting this as kwarg for now as functional API is compiled by torch/distributed/optim
- decoupled_weight_decay: bool = False,
- foreach: Optional[bool] = None,
- capturable: bool = False,
- differentiable: bool = False,
- has_complex: bool = False,
- *,
- beta1: float,
- beta2: float,
- lr: float,
- weight_decay: float,
- momentum_decay: float,
- eps: float):
+)
+
+
+def nadam(
+ params: List[Tensor],
+ grads: List[Tensor],
+ exp_avgs: List[Tensor],
+ exp_avg_sqs: List[Tensor],
+ mu_products: List[Tensor],
+ state_steps: List[Tensor],
+ # kwonly args with defaults are not supported by functions compiled with torchscript issue #70627
+ # setting this as kwarg for now as functional API is compiled by torch/distributed/optim
+ decoupled_weight_decay: bool = False,
+ foreach: Optional[bool] = None,
+ capturable: bool = False,
+ differentiable: bool = False,
+ has_complex: bool = False,
+ *,
+ beta1: float,
+ beta2: float,
+ lr: float,
+ weight_decay: float,
+ momentum_decay: float,
+ eps: float,
+):
r"""Functional API that performs NAdam algorithm computation.
See :class:`~torch.optim.NAdam` for details.
"""
-
if not all(isinstance(t, torch.Tensor) for t in state_steps):
- raise RuntimeError("API has changed, `state_steps` argument must contain a list of singleton tensors")
+ raise RuntimeError(
+ "API has changed, `state_steps` argument must contain a list of singleton tensors"
+ )
if not all(isinstance(t, torch.Tensor) for t in mu_products):
- raise RuntimeError("API has changed, `mu_products` argument must contain a list of singleton tensors")
+ raise RuntimeError(
+ "API has changed, `mu_products` argument must contain a list of singleton tensors"
+ )
if foreach is None:
- _, foreach = _default_to_fused_or_foreach(params, differentiable, use_fused=False)
+ _, foreach = _default_to_fused_or_foreach(
+ params, differentiable, use_fused=False
+ )
if foreach and torch.jit.is_scripting():
- raise RuntimeError('torch.jit.script not supported with foreach optimizers')
+ raise RuntimeError("torch.jit.script not supported with foreach optimizers")
if foreach and not torch.jit.is_scripting():
func = _multi_tensor_nadam
else:
func = _single_tensor_nadam
- func(params,
- grads,
- exp_avgs,
- exp_avg_sqs,
- mu_products,
- state_steps,
- beta1=beta1,
- beta2=beta2,
- lr=lr,
- weight_decay=weight_decay,
- momentum_decay=momentum_decay,
- decoupled_weight_decay=decoupled_weight_decay,
- eps=eps,
- capturable=capturable,
- differentiable=differentiable,
- has_complex=has_complex)
-
-
-def _single_tensor_nadam(params: List[Tensor],
- grads: List[Tensor],
- exp_avgs: List[Tensor],
- exp_avg_sqs: List[Tensor],
- mu_products: List[Tensor],
- state_steps: List[Tensor],
- *,
- beta1: float,
- beta2: float,
- lr: float,
- weight_decay: float,
- momentum_decay: float,
- eps: float,
- decoupled_weight_decay: bool,
- capturable: bool,
- differentiable: bool,
- has_complex: bool):
-
+ func(
+ params,
+ grads,
+ exp_avgs,
+ exp_avg_sqs,
+ mu_products,
+ state_steps,
+ beta1=beta1,
+ beta2=beta2,
+ lr=lr,
+ weight_decay=weight_decay,
+ momentum_decay=momentum_decay,
+ decoupled_weight_decay=decoupled_weight_decay,
+ eps=eps,
+ capturable=capturable,
+ differentiable=differentiable,
+ has_complex=has_complex,
+ )
+
+
+def _single_tensor_nadam(
+ params: List[Tensor],
+ grads: List[Tensor],
+ exp_avgs: List[Tensor],
+ exp_avg_sqs: List[Tensor],
+ mu_products: List[Tensor],
+ state_steps: List[Tensor],
+ *,
+ beta1: float,
+ beta2: float,
+ lr: float,
+ weight_decay: float,
+ momentum_decay: float,
+ eps: float,
+ decoupled_weight_decay: bool,
+ capturable: bool,
+ differentiable: bool,
+ has_complex: bool,
+):
for i, param in enumerate(params):
grad = grads[i]
exp_avg = exp_avgs[i]
@@ -285,8 +362,8 @@ def _single_tensor_nadam(params: List[Tensor],
# If compiling, the compiler will handle cudagraph checks, see note [torch.compile x capturable]
if not torch._utils.is_compiling() and capturable:
- assert (
- (param.is_cuda and mu_product.is_cuda and step_t.is_cuda) or (param.is_xla and mu_product.is_xla and step_t.is_xla)
+ assert (param.is_cuda and mu_product.is_cuda and step_t.is_cuda) or (
+ param.is_xla and mu_product.is_xla and step_t.is_xla
), "If capturable=True, params, mu_products, and state_steps must be CUDA or XLA tensors."
# update step
@@ -297,7 +374,7 @@ def _single_tensor_nadam(params: List[Tensor],
else:
step = _get_value(step_t)
- bias_correction2 = 1 - beta2 ** step
+ bias_correction2 = 1 - beta2**step
if weight_decay != 0:
if decoupled_weight_decay:
@@ -307,8 +384,8 @@ def _single_tensor_nadam(params: List[Tensor],
grad = grad.add(param, alpha=weight_decay)
# calculate the momentum cache \mu^{t} and \mu^{t+1}
- mu = beta1 * (1. - 0.5 * (0.96 ** (step * momentum_decay)))
- mu_next = beta1 * (1. - 0.5 * (0.96 ** ((step + 1) * momentum_decay)))
+ mu = beta1 * (1.0 - 0.5 * (0.96 ** (step * momentum_decay)))
+ mu_next = beta1 * (1.0 - 0.5 * (0.96 ** ((step + 1) * momentum_decay)))
# update mu_product
mu_product *= mu
@@ -324,35 +401,40 @@ def _single_tensor_nadam(params: List[Tensor],
# by updating the grad and exp_avg directly and not using the
# scalar "value" argument of addcdiv.
mu_product_next = mu_product * mu_next
- grad = grad * (-lr * (1. - mu) / (1. - mu_product))
- exp_avg = exp_avg * (-lr * mu_next / (1. - mu_product_next))
+ grad = grad * (-lr * (1.0 - mu) / (1.0 - mu_product))
+ exp_avg = exp_avg * (-lr * mu_next / (1.0 - mu_product_next))
param.addcdiv_(grad, denom)
param.addcdiv_(exp_avg, denom)
else:
mu_product_next = _get_value(mu_product) * mu_next
denom.add_(eps)
- param.addcdiv_(grad, denom, value=(-lr * (1. - mu) / (1. - _get_value(mu_product))))
- param.addcdiv_(exp_avg, denom, value=(-lr * mu_next) / (1. - mu_product_next))
-
-
-def _multi_tensor_nadam(params: List[Tensor],
- grads: List[Tensor],
- exp_avgs: List[Tensor],
- exp_avg_sqs: List[Tensor],
- mu_products: List[Tensor],
- state_steps: List[Tensor],
- *,
- beta1: float,
- beta2: float,
- lr: float,
- weight_decay: float,
- momentum_decay: float,
- eps: float,
- decoupled_weight_decay: bool,
- capturable: bool,
- differentiable: bool,
- has_complex: bool):
-
+ param.addcdiv_(
+ grad, denom, value=(-lr * (1.0 - mu) / (1.0 - _get_value(mu_product)))
+ )
+ param.addcdiv_(
+ exp_avg, denom, value=(-lr * mu_next) / (1.0 - mu_product_next)
+ )
+
+
+def _multi_tensor_nadam(
+ params: List[Tensor],
+ grads: List[Tensor],
+ exp_avgs: List[Tensor],
+ exp_avg_sqs: List[Tensor],
+ mu_products: List[Tensor],
+ state_steps: List[Tensor],
+ *,
+ beta1: float,
+ beta2: float,
+ lr: float,
+ weight_decay: float,
+ momentum_decay: float,
+ eps: float,
+ decoupled_weight_decay: bool,
+ capturable: bool,
+ differentiable: bool,
+ has_complex: bool,
+):
if len(params) == 0:
return
@@ -360,25 +442,36 @@ def _multi_tensor_nadam(params: List[Tensor],
# If compiling, the compiler will handle cudagraph checks, see note [torch.compile x capturable]
if not torch._utils.is_compiling() and capturable:
- assert all(p.is_cuda and mp.is_cuda and step.is_cuda
- for p, mp, step in zip(params, mu_products, state_steps)), \
- "If capturable=True, params, mu_products, and state_steps must be CUDA tensors."
-
-
- grouped_tensors = Optimizer._group_tensors_by_device_and_dtype([params, grads, exp_avgs, exp_avg_sqs, mu_products, state_steps])
- for ((grouped_params, grouped_grads, grouped_exp_avgs,
- grouped_exp_avg_sqs, grouped_mu_products, grouped_state_steps), _) in grouped_tensors.values():
-
+ assert all(
+ p.is_cuda and mp.is_cuda and step.is_cuda
+ for p, mp, step in zip(params, mu_products, state_steps)
+ ), "If capturable=True, params, mu_products, and state_steps must be CUDA tensors."
+
+ grouped_tensors = Optimizer._group_tensors_by_device_and_dtype(
+ [params, grads, exp_avgs, exp_avg_sqs, mu_products, state_steps]
+ )
+ for (
+ grouped_params,
+ grouped_grads,
+ grouped_exp_avgs,
+ grouped_exp_avg_sqs,
+ grouped_mu_products,
+ grouped_state_steps,
+ ), _ in grouped_tensors.values():
# handle complex
if has_complex:
- _view_as_real(grouped_params, grouped_grads, grouped_exp_avgs, grouped_exp_avg_sqs)
+ _view_as_real(
+ grouped_params, grouped_grads, grouped_exp_avgs, grouped_exp_avg_sqs
+ )
# Update steps
# If steps are on CPU, foreach will fall back to the slow path, which is a for-loop calling t.add(1) over
# and over. 1 will then be wrapped into a Tensor over and over again, which is slower than if we just
# wrapped it once now. The alpha is required to assure we go to the right overload.
if grouped_state_steps[0].is_cpu:
- torch._foreach_add_(grouped_state_steps, torch.tensor(1.0, device='cpu'), alpha=1.0)
+ torch._foreach_add_(
+ grouped_state_steps, torch.tensor(1.0, device="cpu"), alpha=1.0
+ )
else:
torch._foreach_add_(grouped_state_steps, 1)
@@ -387,13 +480,17 @@ def _multi_tensor_nadam(params: List[Tensor],
# Perform stepweight decay
torch._foreach_mul_(grouped_params, 1 - lr * weight_decay)
else:
- grouped_grads = torch._foreach_add(grouped_grads, grouped_params, alpha=weight_decay)
+ grouped_grads = torch._foreach_add(
+ grouped_grads, grouped_params, alpha=weight_decay
+ )
# Decay the first and second moment running average coefficient
torch._foreach_lerp_(grouped_exp_avgs, grouped_grads, 1 - beta1)
torch._foreach_mul_(grouped_exp_avg_sqs, beta2)
- torch._foreach_addcmul_(grouped_exp_avg_sqs, grouped_grads, grouped_grads, 1 - beta2)
+ torch._foreach_addcmul_(
+ grouped_exp_avg_sqs, grouped_grads, grouped_grads, 1 - beta2
+ )
exp_avg_sq_sqrt = torch._foreach_sqrt(grouped_exp_avg_sqs)
@@ -421,10 +518,19 @@ def _multi_tensor_nadam(params: List[Tensor],
torch._foreach_neg_(bias_correction_sqrt)
torch._foreach_sqrt_(bias_correction_sqrt)
else:
- bias_correction_sqrt = [_dispatch_sqrt(1 - beta2 ** _get_value(step)) for step in grouped_state_steps]
- mus = [beta1 * (1. - 0.5 * (0.96 ** (_get_value(step) * momentum_decay))) for step in grouped_state_steps]
- mu_nexts = [beta1 * (1. - 0.5 * (0.96 ** ((_get_value(step) + 1) * momentum_decay)))
- for step in grouped_state_steps]
+ bias_correction_sqrt = [
+ _dispatch_sqrt(1 - beta2 ** _get_value(step))
+ for step in grouped_state_steps
+ ]
+ mus = [
+ beta1 * (1.0 - 0.5 * (0.96 ** (_get_value(step) * momentum_decay)))
+ for step in grouped_state_steps
+ ]
+ mu_nexts = [
+ beta1
+ * (1.0 - 0.5 * (0.96 ** ((_get_value(step) + 1) * momentum_decay)))
+ for step in grouped_state_steps
+ ]
# update mu_products
torch._foreach_mul_(grouped_mu_products, mus)
@@ -468,10 +574,22 @@ def _multi_tensor_nadam(params: List[Tensor],
# finally, update params
torch._foreach_addcdiv_(grouped_params, numerator, exp_avg_sq_sqrt)
else:
- step_size_grads = _stack_if_compiling([(lr * (1. - mu) / (1. - _get_value(mu_product))) * -1
- for mu_product, mu in zip(grouped_mu_products, mus)])
- step_size_expavg = _stack_if_compiling([(lr * mu_next / (1. - _get_value(mu_product) * mu_next)) * -1
- for mu_product, mu_next in zip(grouped_mu_products, mu_nexts)])
-
- torch._foreach_addcdiv_(grouped_params, grouped_grads, exp_avg_sq_sqrt, step_size_grads)
- torch._foreach_addcdiv_(grouped_params, grouped_exp_avgs, exp_avg_sq_sqrt, step_size_expavg)
+ step_size_grads = _stack_if_compiling(
+ [
+ (lr * (1.0 - mu) / (1.0 - _get_value(mu_product))) * -1
+ for mu_product, mu in zip(grouped_mu_products, mus)
+ ]
+ )
+ step_size_expavg = _stack_if_compiling(
+ [
+ (lr * mu_next / (1.0 - _get_value(mu_product) * mu_next)) * -1
+ for mu_product, mu_next in zip(grouped_mu_products, mu_nexts)
+ ]
+ )
+
+ torch._foreach_addcdiv_(
+ grouped_params, grouped_grads, exp_avg_sq_sqrt, step_size_grads
+ )
+ torch._foreach_addcdiv_(
+ grouped_params, grouped_exp_avgs, exp_avg_sq_sqrt, step_size_expavg
+ )
|
2.41.0
|
91e5db7055337a9ebb9d817af099588e1976847
|
Mon, 15 Apr 2024 17:32:41 +0800
|
[PATCH 0188/1000] Part 3: UFMT fix the rest files in torch/optim due to the pr-sanity-checks (#124055)
|
Pull Request resolved: https://github.com/pytorch/pytorch/pull/124055 Approved by: https://github.com/ezyang ghstack dependencies: #124048, #124053, #124054
|
diff --git a/.lintrunner.toml b/.lintrunner.toml
index 9ddc5def54..e29854e0d7 100644
--- a/.lintrunner.toml
+++ b/.lintrunner.toml
@@ -2112,13 +2112,6 @@ exclude_patterns = [
'torch/nn/utils/rnn.py',
'torch/nn/utils/spectral_norm.py',
'torch/nn/utils/weight_norm.py',
- 'torch/optim/optimizer.py',
- 'torch/optim/radam.py',
- 'torch/optim/rmsprop.py',
- 'torch/optim/rprop.py',
- 'torch/optim/sgd.py',
- 'torch/optim/sparse_adam.py',
- 'torch/optim/swa_utils.py',
'torch/overrides.py',
'torch/quasirandom.py',
'torch/random.py',
diff --git a/torch/optim/optimizer.py b/torch/optim/optimizer.py
index cbbdf1c8f4..ca4092d9d7 100644
--- a/torch/optim/optimizer.py
+++ b/torch/optim/optimizer.py
@@ -1,62 +1,73 @@
-import math
import functools
+import math
import warnings
-from collections import OrderedDict, defaultdict
+from collections import defaultdict, OrderedDict
from copy import deepcopy
from itertools import chain
from typing import (
Any,
Callable,
+ cast,
DefaultDict,
Dict,
Hashable,
Iterable,
List,
Optional,
+ overload,
Set,
Tuple,
TypeVar,
Union,
- cast,
- overload,
)
+
from typing_extensions import ParamSpec, Self, TypeAlias
import torch
import torch.utils.hooks as hooks
-from torch.utils.hooks import RemovableHandle
+from torch._utils import is_compiling
from torch.utils._foreach_utils import (
- Indices,
- TensorListList,
_get_foreach_kernels_supported_devices,
_get_fused_kernels_supported_devices,
+ _group_tensors_by_device_and_dtype,
+ Indices,
+ TensorListList,
)
-from torch._utils import is_compiling
-from torch.utils._foreach_utils import _group_tensors_by_device_and_dtype
+from torch.utils.hooks import RemovableHandle
Args: TypeAlias = Tuple[Any, ...]
Kwargs: TypeAlias = Dict[str, Any]
StateDict: TypeAlias = Dict[str, Any]
-GlobalOptimizerPreHook: TypeAlias = Callable[["Optimizer", Args, Kwargs], Optional[Tuple[Args, Kwargs]]]
+GlobalOptimizerPreHook: TypeAlias = Callable[
+ ["Optimizer", Args, Kwargs], Optional[Tuple[Args, Kwargs]]
+]
GlobalOptimizerPostHook: TypeAlias = Callable[["Optimizer", Args, Kwargs], None]
-__all__ = ['Optimizer', 'register_optimizer_step_pre_hook', 'register_optimizer_step_post_hook']
+__all__ = [
+ "Optimizer",
+ "register_optimizer_step_pre_hook",
+ "register_optimizer_step_post_hook",
+]
_global_optimizer_pre_hooks: Dict[int, GlobalOptimizerPreHook] = OrderedDict()
_global_optimizer_post_hooks: Dict[int, GlobalOptimizerPostHook] = OrderedDict()
_foreach_supported_types = [torch.Tensor, torch.nn.parameter.Parameter]
+
class _RequiredParameter:
"""Singleton class representing a required parameter for an Optimizer."""
+
def __repr__(self) -> str:
return "<required parameter>"
+
required = _RequiredParameter()
def _use_grad_for_differentiable(func):
def _use_grad(self, *args, **kwargs):
import torch._dynamo
+
prev_grad = torch.is_grad_enabled()
try:
# Note on graph break below:
@@ -71,16 +82,18 @@ def _use_grad_for_differentiable(func):
# or 2) have a fully fused forward and backward graph, which will have no_grad by default, and we can remove this
# graph break to allow the fully fused fwd-bwd-optimizer graph to be compiled.
# see https://github.com/pytorch/pytorch/issues/104053
- torch.set_grad_enabled(self.defaults['differentiable'])
+ torch.set_grad_enabled(self.defaults["differentiable"])
torch._dynamo.graph_break()
ret = func(self, *args, **kwargs)
finally:
torch._dynamo.graph_break()
torch.set_grad_enabled(prev_grad)
return ret
+
functools.update_wrapper(_use_grad, func)
return _use_grad
+
def _get_value(x):
# item is significantly faster than a cpu tensor in eager mode
if not torch.jit.is_scripting() and is_compiling():
@@ -88,43 +101,57 @@ def _get_value(x):
else:
return x.item()
+
def _stack_if_compiling(x):
if not torch.jit.is_scripting() and is_compiling():
return torch.stack(x)
else:
return x
-def _dispatch_sqrt(x: float): # float annotation is needed because of torchscript type inference
+
+def _dispatch_sqrt(
+ x: float,
+): # float annotation is needed because of torchscript type inference
if not torch.jit.is_scripting() and isinstance(x, torch.Tensor):
return x.sqrt()
else:
return math.sqrt(x)
+
# For any optimizer with a faster implementation, we attempt to default to the
# fastest + stablest whenever possible. For foreach, the requirements are to have
# native params all on CUDA. For fused, there's currently the additional requirement
# that the tensors' dtypes must be floating point. Neither alternative supports
# torch.jit.script nor differentiable, so we fall back to the single tensor
# implementation in those cases.
-def _default_to_fused_or_foreach(params: List[torch.Tensor],
- differentiable: bool,
- use_fused: bool = False) -> Tuple[bool, bool]:
+def _default_to_fused_or_foreach(
+ params: List[torch.Tensor], differentiable: bool, use_fused: bool = False
+) -> Tuple[bool, bool]:
if torch.jit.is_scripting() or differentiable:
return False, False
fused_supported_devices = _get_fused_kernels_supported_devices()
foreach_supported_devices = _get_foreach_kernels_supported_devices()
fused = use_fused and all(
- p is None or (type(p) in _foreach_supported_types and
- p.device.type in fused_supported_devices and
- torch.is_floating_point(p)) for p in params
+ p is None
+ or (
+ type(p) in _foreach_supported_types
+ and p.device.type in fused_supported_devices
+ and torch.is_floating_point(p)
+ )
+ for p in params
)
foreach = not fused and all(
- p is None or (type(p) in _foreach_supported_types and
- p.device.type in foreach_supported_devices) for p in params
+ p is None
+ or (
+ type(p) in _foreach_supported_types
+ and p.device.type in foreach_supported_devices
+ )
+ for p in params
)
return fused, foreach
+
def _view_as_real(params, *state_and_grads):
for i, p in enumerate(params):
if torch.is_complex(p):
@@ -132,10 +159,14 @@ def _view_as_real(params, *state_and_grads):
for s in state_and_grads:
s[i] = torch.view_as_real(s[i])
+
def _get_scalar_dtype(is_fused=None):
if is_fused:
return torch.float32
- return torch.float64 if torch.get_default_dtype() == torch.float64 else torch.float32
+ return (
+ torch.float64 if torch.get_default_dtype() == torch.float64 else torch.float32
+ )
+
# Common doc strings among optimizers
_foreach_doc = r"""foreach (bool, optional): whether foreach implementation of optimizer
@@ -215,6 +246,7 @@ def register_optimizer_step_post_hook(hook: GlobalOptimizerPostHook) -> Removabl
_global_optimizer_post_hooks[handle.id] = hook
return handle
+
ParamsT: TypeAlias = Union[Iterable[torch.Tensor], Iterable[Dict[str, Any]]]
_P = ParamSpec("_P")
@@ -260,16 +292,21 @@ class Optimizer:
self._patch_step_function()
if isinstance(params, torch.Tensor):
- if self.__class__.__name__ == 'SparseAdam':
- warnings.warn(("Passing in a raw Tensor as ``params`` to SparseAdam "
- "is deprecated. In the future, this will raise an error. "
- "Please wrap your Tensor in an iterable instead."),
- FutureWarning)
+ if self.__class__.__name__ == "SparseAdam":
+ warnings.warn(
+ (
+ "Passing in a raw Tensor as ``params`` to SparseAdam "
+ "is deprecated. In the future, this will raise an error. "
+ "Please wrap your Tensor in an iterable instead."
+ ),
+ FutureWarning,
+ )
params = [params]
else:
- raise TypeError("params argument given to the optimizer should be "
- "an iterable of Tensors or dicts, but got " +
- torch.typename(params))
+ raise TypeError(
+ "params argument given to the optimizer should be "
+ "an iterable of Tensors or dicts, but got " + torch.typename(params)
+ )
self.state: DefaultDict[torch.Tensor, Any] = defaultdict(dict)
self.param_groups: List[Dict[str, Any]] = []
@@ -278,7 +315,7 @@ class Optimizer:
if len(param_groups) == 0:
raise ValueError("optimizer got an empty parameter list")
if not isinstance(param_groups[0], dict):
- param_groups = [{'params': param_groups}]
+ param_groups = [{"params": param_groups}]
for param_group in param_groups:
self.add_param_group(cast(dict, param_group))
@@ -290,37 +327,37 @@ class Optimizer:
def __getstate__(self) -> Dict[str, Any]:
return {
- 'defaults': self.defaults,
- 'state': self.state,
- 'param_groups': self.param_groups,
+ "defaults": self.defaults,
+ "state": self.state,
+ "param_groups": self.param_groups,
}
def __setstate__(self, state: Dict[str, Any]) -> None:
self.__dict__.update(state)
- if '_optimizer_step_pre_hooks' not in self.__dict__:
+ if "_optimizer_step_pre_hooks" not in self.__dict__:
self._optimizer_step_pre_hooks = OrderedDict()
- if '_optimizer_step_post_hooks' not in self.__dict__:
+ if "_optimizer_step_post_hooks" not in self.__dict__:
self._optimizer_step_post_hooks = OrderedDict()
- if '_optimizer_state_dict_pre_hooks' not in self.__dict__:
+ if "_optimizer_state_dict_pre_hooks" not in self.__dict__:
self._optimizer_state_dict_pre_hooks = OrderedDict()
- if '_optimizer_state_dict_post_hooks' not in self.__dict__:
+ if "_optimizer_state_dict_post_hooks" not in self.__dict__:
self._optimizer_state_dict_post_hooks = OrderedDict()
- if '_optimizer_load_state_dict_pre_hooks' not in self.__dict__:
+ if "_optimizer_load_state_dict_pre_hooks" not in self.__dict__:
self._optimizer_load_state_dict_pre_hooks = OrderedDict()
- if '_optimizer_load_state_dict_post_hooks' not in self.__dict__:
+ if "_optimizer_load_state_dict_post_hooks" not in self.__dict__:
self._optimizer_load_state_dict_post_hooks = OrderedDict()
self._patch_step_function() # To support multiprocessing pickle/unpickle
- self.defaults.setdefault('differentiable', False)
+ self.defaults.setdefault("differentiable", False)
def __repr__(self) -> str:
- format_string = self.__class__.__name__ + ' ('
+ format_string = self.__class__.__name__ + " ("
for i, group in enumerate(self.param_groups):
- format_string += '\n'
- format_string += f'Parameter Group {i}\n'
+ format_string += "\n"
+ format_string += f"Parameter Group {i}\n"
for key in sorted(group.keys()):
- if key != 'params':
- format_string += f' {key}: {group[key]}\n'
- format_string += ')'
+ if key != "params":
+ format_string += f" {key}: {group[key]}\n"
+ format_string += ")"
return format_string
# Currently needed by Adam and AdamW
@@ -335,17 +372,25 @@ class Optimizer:
# https://github.com/pytorch/pytorch/blob/d3ba8901d8640eb16f88b2bfef9df7fa383d4b47/torch/_inductor/compile_fx.py#L390.
# Thus, when compiling, inductor will determine if cudagraphs
# can be enabled based on whether there is input mutation or CPU tensors.
- if not is_compiling() and torch.backends.cuda.is_built() and torch.cuda.is_available():
+ if (
+ not is_compiling()
+ and torch.backends.cuda.is_built()
+ and torch.cuda.is_available()
+ ):
capturing = torch.cuda.is_current_stream_capturing()
- if capturing and not all(group['capturable'] for group in self.param_groups):
- raise RuntimeError("Attempting CUDA graph capture of step() for an instance of " +
- self.__class__.__name__ +
- " but param_groups' capturable is False.")
+ if capturing and not all(
+ group["capturable"] for group in self.param_groups
+ ):
+ raise RuntimeError(
+ "Attempting CUDA graph capture of step() for an instance of "
+ + self.__class__.__name__
+ + " but param_groups' capturable is False."
+ )
if (
(not getattr(self, "_warned_capturable_if_run_uncaptured", False))
- and all(group['capturable'] for group in self.param_groups)
+ and all(group["capturable"] for group in self.param_groups)
and (not capturing)
):
warnings.warn(
@@ -370,7 +415,6 @@ class Optimizer:
@staticmethod
def profile_hook_step(func: Callable[_P, R]) -> Callable[_P, R]:
-
@functools.wraps(func)
def wrapper(*args: _P.args, **kwargs: _P.kwargs) -> R:
self, *_ = args
@@ -378,7 +422,10 @@ class Optimizer:
profile_name = f"Optimizer.step#{self.__class__.__name__}.step"
with torch.autograd.profiler.record_function(profile_name):
# call optimizer step pre hooks
- for pre_hook in chain(_global_optimizer_pre_hooks.values(), self._optimizer_step_pre_hooks.values()):
+ for pre_hook in chain(
+ _global_optimizer_pre_hooks.values(),
+ self._optimizer_step_pre_hooks.values(),
+ ):
result = pre_hook(self, args, kwargs)
if result is not None:
if isinstance(result, tuple) and len(result) == 2:
@@ -392,7 +439,10 @@ class Optimizer:
self._optimizer_step_code()
# call optimizer step post hooks
- for post_hook in chain(self._optimizer_step_post_hooks.values(), _global_optimizer_post_hooks.values()):
+ for post_hook in chain(
+ self._optimizer_step_post_hooks.values(),
+ _global_optimizer_post_hooks.values(),
+ ):
post_hook(self, args, kwargs)
return out
@@ -408,14 +458,17 @@ class Optimizer:
Dict[Tuple[torch.device, torch.dtype], Tuple[TensorListList, Indices]],
]:
"""Groups a list of lists of tensors by device and dtype.
- Skips this step if we are compiling since this will occur during inductor lowering."""
+ Skips this step if we are compiling since this will occur during inductor lowering.
+ """
if is_compiling():
return {(None, None): (tensorlistlist, list(range(len(tensorlistlist[0]))))}
else:
return _group_tensors_by_device_and_dtype(tensorlistlist, with_indices)
def _patch_step_function(self) -> None:
- self._zero_grad_profile_name = f"Optimizer.zero_grad#{self.__class__.__name__}.zero_grad"
+ self._zero_grad_profile_name = (
+ f"Optimizer.zero_grad#{self.__class__.__name__}.zero_grad"
+ )
hooked = getattr(self.__class__.step, "hooked", None)
if not hooked:
self.__class__.step = self.profile_hook_step(self.__class__.step) # type: ignore[assignment]
@@ -463,7 +516,6 @@ class Optimizer:
self._optimizer_step_post_hooks[handle.id] = hook
return handle
-
def register_state_dict_pre_hook(
self, hook: Callable[["Optimizer"], None], prepend: bool = False
) -> RemovableHandle:
@@ -496,7 +548,6 @@ class Optimizer:
self._optimizer_state_dict_pre_hooks.move_to_end(handle.id, last=False)
return handle
-
def register_state_dict_post_hook(
self,
hook: Callable[["Optimizer", StateDict], Optional[StateDict]],
@@ -531,7 +582,6 @@ class Optimizer:
self._optimizer_state_dict_post_hooks.move_to_end(handle.id, last=False)
return handle
-
@torch._disable_dynamo
def state_dict(self) -> StateDict:
r"""Returns the state of the optimizer as a :class:`dict`.
@@ -592,20 +642,28 @@ class Optimizer:
def pack_group(group: Dict[str, Any]) -> Dict[str, Any]:
nonlocal start_index
- packed = {k: v for k, v in group.items() if k != 'params'}
- param_mappings.update({id(p): i for i, p in enumerate(group['params'], start_index)
- if id(p) not in param_mappings})
- packed['params'] = [param_mappings[id(p)] for p in group['params']]
- start_index += len(packed['params'])
+ packed = {k: v for k, v in group.items() if k != "params"}
+ param_mappings.update(
+ {
+ id(p): i
+ for i, p in enumerate(group["params"], start_index)
+ if id(p) not in param_mappings
+ }
+ )
+ packed["params"] = [param_mappings[id(p)] for p in group["params"]]
+ start_index += len(packed["params"])
return packed
+
param_groups = [pack_group(g) for g in self.param_groups]
# Remap state to use order indices as keys
- packed_state = {(param_mappings[id(k)] if isinstance(k, torch.Tensor) else k): v
- for k, v in self.state.items()}
+ packed_state = {
+ (param_mappings[id(k)] if isinstance(k, torch.Tensor) else k): v
+ for k, v in self.state.items()
+ }
state_dict = {
- 'state': packed_state,
- 'param_groups': param_groups,
+ "state": packed_state,
+ "param_groups": param_groups,
}
for post_hook in self._optimizer_state_dict_post_hooks.values():
@@ -645,7 +703,6 @@ class Optimizer:
else:
return value.to(device=param.device)
-
def register_load_state_dict_pre_hook(
self,
hook: Callable[["Optimizer", StateDict], Optional[StateDict]],
@@ -685,7 +742,6 @@ class Optimizer:
self._optimizer_load_state_dict_pre_hooks.move_to_end(handle.id, last=False)
return handle
-
def register_load_state_dict_post_hook(
self, hook: Callable[["Optimizer"], None], prepend: bool = False
) -> RemovableHandle:
@@ -720,7 +776,6 @@ class Optimizer:
self._optimizer_load_state_dict_post_hooks.move_to_end(handle.id, last=False) # type: ignore[attr-defined]
return handle
-
@torch._disable_dynamo
def load_state_dict(self, state_dict: StateDict) -> None:
r"""Loads the optimizer state.
@@ -741,27 +796,41 @@ class Optimizer:
groups = self.param_groups
# Deepcopy as we write into saved_groups later to update state
- saved_groups = deepcopy(state_dict['param_groups'])
+ saved_groups = deepcopy(state_dict["param_groups"])
if len(groups) != len(saved_groups):
- raise ValueError("loaded state dict has a different number of "
- "parameter groups")
- param_lens = (len(g['params']) for g in groups)
- saved_lens = (len(g['params']) for g in saved_groups)
+ raise ValueError(
+ "loaded state dict has a different number of " "parameter groups"
+ )
+ param_lens = (len(g["params"]) for g in groups)
+ saved_lens = (len(g["params"]) for g in saved_groups)
if any(p_len != s_len for p_len, s_len in zip(param_lens, saved_lens)):
- raise ValueError("loaded state dict contains a parameter group "
- "that doesn't match the size of optimizer's group")
+ raise ValueError(
+ "loaded state dict contains a parameter group "
+ "that doesn't match the size of optimizer's group"
+ )
# Update the state
- id_map = dict(zip(chain.from_iterable(g['params'] for g in saved_groups),
- chain.from_iterable(g['params'] for g in groups)))
+ id_map = dict(
+ zip(
+ chain.from_iterable(g["params"] for g in saved_groups),
+ chain.from_iterable(g["params"] for g in groups),
+ )
+ )
def _cast(param, value, param_id=None, param_groups=None, key=None):
r"""Make a deep copy of value, casting all tensors to device of param."""
if isinstance(value, torch.Tensor):
- return Optimizer._process_value_according_to_param_policy(param, value, param_id, param_groups, key)
+ return Optimizer._process_value_according_to_param_policy(
+ param, value, param_id, param_groups, key
+ )
elif isinstance(value, dict):
- return {k: _cast(param, v, param_id=param_id, param_groups=param_groups, key=k) for k, v in value.items()}
+ return {
+ k: _cast(
+ param, v, param_id=param_id, param_groups=param_groups, key=k
+ )
+ for k, v in value.items()
+ }
elif isinstance(value, Iterable):
return type(value)(_cast(param, v, param_id=param_id, param_groups=param_groups) for v in value) # type: ignore[call-arg]
else:
@@ -771,25 +840,28 @@ class Optimizer:
# State that is not assigned to params is copied as is (needed for
# backward compatibility).
state: DefaultDict[torch.Tensor, Dict[Any, Any]] = defaultdict(dict)
- for k, v in state_dict['state'].items():
+ for k, v in state_dict["state"].items():
if k in id_map:
param = id_map[k]
- state[param] = _cast(param, v, param_id=k, param_groups=state_dict['param_groups'])
+ state[param] = _cast(
+ param, v, param_id=k, param_groups=state_dict["param_groups"]
+ )
else:
state[k] = v
# Update parameter groups, setting their 'params' value
- def update_group(group: Dict[str, Any], new_group: Dict[str, Any]) -> Dict[str, Any]:
- new_group['params'] = group['params']
+ def update_group(
+ group: Dict[str, Any], new_group: Dict[str, Any]
+ ) -> Dict[str, Any]:
+ new_group["params"] = group["params"]
return new_group
- param_groups = [
- update_group(g, ng) for g, ng in zip(groups, saved_groups)]
- self.__setstate__({'state': state, 'param_groups': param_groups})
+
+ param_groups = [update_group(g, ng) for g, ng in zip(groups, saved_groups)]
+ self.__setstate__({"state": state, "param_groups": param_groups})
for post_hook in self._optimizer_load_state_dict_post_hooks.values():
post_hook(self)
-
@torch._disable_dynamo
def zero_grad(self, set_to_none: bool = True) -> None:
r"""Resets the gradients of all optimized :class:`torch.Tensor` s.
@@ -806,12 +878,16 @@ class Optimizer:
(in one case it does the step with a gradient of 0 and in the other it skips
the step altogether).
"""
- foreach = self.defaults.get('foreach', False) or self.defaults.get('fused', False)
+ foreach = self.defaults.get("foreach", False) or self.defaults.get(
+ "fused", False
+ )
if not hasattr(self, "_zero_grad_profile_name"):
self._patch_step_function()
- per_device_and_dtype_grads: Optional[DefaultDict[torch.device, DefaultDict[torch.dtype, List[torch.Tensor]]]]
+ per_device_and_dtype_grads: Optional[
+ DefaultDict[torch.device, DefaultDict[torch.dtype, List[torch.Tensor]]]
+ ]
if foreach:
per_device_and_dtype_grads = defaultdict(lambda: defaultdict(list))
else:
@@ -819,7 +895,7 @@ class Optimizer:
with torch.autograd.profiler.record_function(self._zero_grad_profile_name):
for group in self.param_groups:
- for p in group['params']:
+ for p in group["params"]:
if p.grad is not None:
if set_to_none:
p.grad = None
@@ -828,11 +904,13 @@ class Optimizer:
p.grad.detach_()
else:
p.grad.requires_grad_(False)
- if (not foreach or p.grad.is_sparse):
+ if not foreach or p.grad.is_sparse:
p.grad.zero_()
else:
assert per_device_and_dtype_grads is not None
- per_device_and_dtype_grads[p.grad.device][p.grad.dtype].append(p.grad)
+ per_device_and_dtype_grads[p.grad.device][
+ p.grad.dtype
+ ].append(p.grad)
if foreach:
assert per_device_and_dtype_grads is not None
for per_dtype_grads in per_device_and_dtype_grads.values():
@@ -874,39 +952,50 @@ class Optimizer:
if not isinstance(param_group, dict):
raise TypeError(f"param_group must be a dict, but got {type(param_group)}")
- params = param_group['params']
+ params = param_group["params"]
if isinstance(params, torch.Tensor):
- param_group['params'] = [params]
+ param_group["params"] = [params]
elif isinstance(params, set):
- raise TypeError('optimizer parameters need to be organized in ordered collections, but '
- 'the ordering of tensors in sets will change between runs. Please use a list instead.')
+ raise TypeError(
+ "optimizer parameters need to be organized in ordered collections, but "
+ "the ordering of tensors in sets will change between runs. Please use a list instead."
+ )
else:
- param_group['params'] = list(params)
+ param_group["params"] = list(params)
- for param in param_group['params']:
+ for param in param_group["params"]:
if not isinstance(param, torch.Tensor):
- raise TypeError("optimizer can only optimize Tensors, "
- "but one of the params is " + torch.typename(param))
- if not self.defaults.get('differentiable', None) and not (param.is_leaf or param.retains_grad):
+ raise TypeError(
+ "optimizer can only optimize Tensors, "
+ "but one of the params is " + torch.typename(param)
+ )
+ if not self.defaults.get("differentiable", None) and not (
+ param.is_leaf or param.retains_grad
+ ):
raise ValueError("can't optimize a non-leaf Tensor")
for name, default in self.defaults.items():
if default is required and name not in param_group:
- raise ValueError(f"parameter group didn't specify a value of required optimization parameter {name}")
+ raise ValueError(
+ f"parameter group didn't specify a value of required optimization parameter {name}"
+ )
else:
param_group.setdefault(name, default)
- params = param_group['params']
+ params = param_group["params"]
if len(params) != len(set(params)):
- warnings.warn("optimizer contains a parameter group with duplicate parameters; "
- "in future, this will cause an error; "
- "see github.com/pytorch/pytorch/issues/40967 for more information", stacklevel=3)
+ warnings.warn(
+ "optimizer contains a parameter group with duplicate parameters; "
+ "in future, this will cause an error; "
+ "see github.com/pytorch/pytorch/issues/40967 for more information",
+ stacklevel=3,
+ )
param_set: Set[torch.Tensor] = set()
for group in self.param_groups:
- param_set.update(set(group['params']))
+ param_set.update(set(group["params"]))
- if not param_set.isdisjoint(set(param_group['params'])):
+ if not param_set.isdisjoint(set(param_group["params"])):
raise ValueError("some parameters appear in more than one parameter group")
self.param_groups.append(param_group)
diff --git a/torch/optim/radam.py b/torch/optim/radam.py
index cfae6a58ab..de746eaaf0 100644
--- a/torch/optim/radam.py
+++ b/torch/optim/radam.py
@@ -4,16 +4,16 @@ import torch
from torch import Tensor
from .optimizer import (
- Optimizer,
+ _capturable_doc,
_default_to_fused_or_foreach,
_differentiable_doc,
- _capturable_doc,
_dispatch_sqrt,
_foreach_doc,
_get_scalar_dtype,
_get_value,
_use_grad_for_differentiable,
_view_as_real,
+ Optimizer,
)
__all__ = ["RAdam", "radam"]
@@ -65,12 +65,19 @@ class RAdam(Optimizer):
group.setdefault("capturable", False)
for p in group["params"]:
p_state = self.state.get(p, [])
- if len(p_state) != 0 and not torch.is_tensor(p_state['step']):
+ if len(p_state) != 0 and not torch.is_tensor(p_state["step"]):
step_val = float(p_state["step"])
- p_state["step"] = (torch.tensor(step_val, dtype=_get_scalar_dtype(), device=p.device) if group['capturable']
- else torch.tensor(step_val, dtype=_get_scalar_dtype()))
+ p_state["step"] = (
+ torch.tensor(
+ step_val, dtype=_get_scalar_dtype(), device=p.device
+ )
+ if group["capturable"]
+ else torch.tensor(step_val, dtype=_get_scalar_dtype())
+ )
- def _init_group(self, group, params_with_grad, grads, exp_avgs, exp_avg_sqs, state_steps):
+ def _init_group(
+ self, group, params_with_grad, grads, exp_avgs, exp_avg_sqs, state_steps
+ ):
has_complex = False
for p in group["params"]:
if p.grad is not None:
@@ -83,9 +90,9 @@ class RAdam(Optimizer):
state = self.state[p]
# Lazy state initialization
if len(state) == 0:
- state['step'] = (
+ state["step"] = (
torch.zeros((), dtype=_get_scalar_dtype(), device=p.device)
- if group['capturable']
+ if group["capturable"]
else torch.tensor(0.0, dtype=_get_scalar_dtype())
)
# Exponential moving average of gradient values
@@ -126,7 +133,9 @@ class RAdam(Optimizer):
state_steps = []
beta1, beta2 = group["betas"]
- has_complex = self._init_group(group, params_with_grad, grads, exp_avgs, exp_avg_sqs, state_steps)
+ has_complex = self._init_group(
+ group, params_with_grad, grads, exp_avgs, exp_avg_sqs, state_steps
+ )
radam(
params_with_grad,
@@ -149,7 +158,8 @@ class RAdam(Optimizer):
return loss
-RAdam.__doc__ = r"""Implements RAdam algorithm.
+RAdam.__doc__ = (
+ r"""Implements RAdam algorithm.
.. math::
\begin{aligned}
@@ -196,7 +206,8 @@ RAdam.__doc__ = r"""Implements RAdam algorithm.
corresponds more closely to the `author's implementation`_ in the RAdam paper. Further information
about decoupled weight decay can be found in `Decoupled Weight Decay Regularization`_.
- """ + fr"""
+ """
+ + rf"""
Args:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
@@ -220,6 +231,7 @@ RAdam.__doc__ = r"""Implements RAdam algorithm.
https://arxiv.org/abs/1711.05101
"""
+)
def radam(
@@ -253,7 +265,9 @@ def radam(
)
if foreach is None:
- _, foreach = _default_to_fused_or_foreach(params, differentiable, use_fused=False)
+ _, foreach = _default_to_fused_or_foreach(
+ params, differentiable, use_fused=False
+ )
if foreach and torch.jit.is_scripting():
raise RuntimeError("torch.jit.script not supported with foreach optimizers")
@@ -330,8 +344,8 @@ def _single_tensor_radam(
exp_avg.lerp_(grad, 1 - beta1)
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2)
- bias_correction1 = 1 - beta1 ** step
- bias_correction2 = 1 - beta2 ** step
+ bias_correction1 = 1 - beta1**step
+ bias_correction2 = 1 - beta2**step
# correcting bias for the first moving moment
bias_corrected_exp_avg = exp_avg / bias_correction1
@@ -339,7 +353,7 @@ def _single_tensor_radam(
# maximum length of the approximated SMA
rho_inf = 2 / (1 - beta2) - 1
# compute the length of the approximated SMA
- rho_t = rho_inf - 2 * step * (beta2 ** step) / bias_correction2
+ rho_t = rho_inf - 2 * step * (beta2**step) / bias_correction2
def _compute_rect():
return (
@@ -356,15 +370,23 @@ def _single_tensor_radam(
else:
exp_avg_sq_sqrt = exp_avg_sq_sqrt.add_(eps)
- return (bias_correction2 ** 0.5) / exp_avg_sq_sqrt
+ return (bias_correction2**0.5) / exp_avg_sq_sqrt
# Compute the variance rectification term and update parameters accordingly
if capturable:
- update = torch.where(rho_t > 5.0, _compute_rect() * _compute_adaptive_lr(), 1.0)
+ update = torch.where(
+ rho_t > 5.0, _compute_rect() * _compute_adaptive_lr(), 1.0
+ )
param.add_(bias_corrected_exp_avg * lr * update, alpha=-1.0)
else:
if rho_t > 5.0:
- param.add_(bias_corrected_exp_avg * lr * _compute_adaptive_lr() * _compute_rect(), alpha=-1.0)
+ param.add_(
+ bias_corrected_exp_avg
+ * lr
+ * _compute_adaptive_lr()
+ * _compute_rect(),
+ alpha=-1.0,
+ )
else:
param.add_(bias_corrected_exp_avg * lr, alpha=-1.0)
@@ -386,7 +408,6 @@ def _multi_tensor_radam(
capturable: bool,
has_complex: bool,
):
-
if len(params) == 0:
return
@@ -394,28 +415,35 @@ def _multi_tensor_radam(
# If compiling, the compiler will handle cudagraph checks, see note [torch.compile x capturable]
if not torch._utils.is_compiling() and capturable:
- assert all(p.is_cuda and step.is_cuda for p, step in zip(params, state_steps)), \
- "If capturable=True, params and state_steps must be CUDA tensors."
+ assert all(
+ p.is_cuda and step.is_cuda for p, step in zip(params, state_steps)
+ ), "If capturable=True, params and state_steps must be CUDA tensors."
- grouped_tensors = Optimizer._group_tensors_by_device_and_dtype([params, grads, exp_avgs, exp_avg_sqs, state_steps])
- for ((
+ grouped_tensors = Optimizer._group_tensors_by_device_and_dtype(
+ [params, grads, exp_avgs, exp_avg_sqs, state_steps]
+ )
+ for (
grouped_params,
grouped_grads,
grouped_exp_avgs,
grouped_exp_avg_sqs,
grouped_state_steps,
- ), _) in grouped_tensors.values():
+ ), _ in grouped_tensors.values():
# Update steps
# If steps are on CPU, foreach will fall back to the slow path, which is a for-loop calling t.add(1) over
# and over. 1 will then be wrapped into a Tensor over and over again, which is slower than if we just
# wrapped it once now. The alpha is required to assure we go to the right overload.
if grouped_state_steps[0].is_cpu:
- torch._foreach_add_(grouped_state_steps, torch.tensor(1.0, device='cpu'), alpha=1.0)
+ torch._foreach_add_(
+ grouped_state_steps, torch.tensor(1.0, device="cpu"), alpha=1.0
+ )
else:
torch._foreach_add_(grouped_state_steps, 1)
if has_complex:
- _view_as_real(grouped_params, grouped_grads, grouped_exp_avgs, grouped_exp_avg_sqs)
+ _view_as_real(
+ grouped_params, grouped_grads, grouped_exp_avgs, grouped_exp_avg_sqs
+ )
# maximum length of the approximated SMA
rho_inf = 2 / (1 - beta2) - 1
@@ -432,21 +460,30 @@ def _multi_tensor_radam(
torch._foreach_add_(bias_correction2, rho_inf)
rho_t_list = bias_correction2
else:
- rho_t_list = [rho_inf - 2 * _get_value(step) * (beta2 ** _get_value(step)) /
- (1 - beta2 ** _get_value(step)) for step in grouped_state_steps]
-
+ rho_t_list = [
+ rho_inf
+ - 2
+ * _get_value(step)
+ * (beta2 ** _get_value(step))
+ / (1 - beta2 ** _get_value(step))
+ for step in grouped_state_steps
+ ]
if weight_decay != 0:
if decoupled_weight_decay:
torch._foreach_mul_(grouped_params, 1 - lr * weight_decay)
else:
- grouped_grads = torch._foreach_add(grouped_grads, grouped_params, alpha=weight_decay)
+ grouped_grads = torch._foreach_add(
+ grouped_grads, grouped_params, alpha=weight_decay
+ )
# Decay the first and second moment running average coefficient
torch._foreach_lerp_(grouped_exp_avgs, grouped_grads, 1 - beta1)
torch._foreach_mul_(grouped_exp_avg_sqs, beta2)
- torch._foreach_addcmul_(grouped_exp_avg_sqs, grouped_grads, grouped_grads, 1 - beta2)
+ torch._foreach_addcmul_(
+ grouped_exp_avg_sqs, grouped_grads, grouped_grads, 1 - beta2
+ )
# Delete the local intermediate since it won't be used anymore to save on peak memory
del grouped_grads
@@ -457,14 +494,16 @@ def _multi_tensor_radam(
torch._foreach_mul_(num, sub2)
del sub2
torch._foreach_mul_(num, rho_inf)
- rho_inf = ((rho_inf - 4) * (rho_inf - 2))
+ rho_inf = (rho_inf - 4) * (rho_inf - 2)
denom = torch._foreach_mul(rho_t_list, rho_inf)
torch._foreach_div_(num, denom)
del denom
torch._foreach_sqrt_(num)
# TODO(mlazos): we should try and get a foreach_where op https://github.com/pytorch/pytorch/issues/117884
- rect = [torch.where(rho_t > 5.0, n, 0.0) for n, rho_t in zip(num, rho_t_list)]
+ rect = [
+ torch.where(rho_t > 5.0, n, 0.0) for n, rho_t in zip(num, rho_t_list)
+ ]
del num
del rho_t_list
unrect_step_size = [torch.where(rect > 0, 0.0, 1.0) for rect in rect]
@@ -501,14 +540,17 @@ def _multi_tensor_radam(
]
unrectified = [0 if rect > 0 else 1.0 for rect in rect]
- bias_correction1 = [1 - beta1 ** _get_value(step) for step in grouped_state_steps]
- unrect_step_size = [(lr * rect / bc) * -1 for rect, bc in zip(unrectified, bias_correction1)]
+ bias_correction1 = [
+ 1 - beta1 ** _get_value(step) for step in grouped_state_steps
+ ]
+ unrect_step_size = [
+ (lr * rect / bc) * -1 for rect, bc in zip(unrectified, bias_correction1)
+ ]
bias_correction2 = [
_dispatch_sqrt(1 - beta2 ** _get_value(step)) * (lr * rect / bc) * -1
for step, rect, bc in zip(grouped_state_steps, rect, bias_correction1)
]
-
buffer = torch._foreach_sqrt(grouped_exp_avg_sqs)
torch._foreach_add_(buffer, eps)
torch._foreach_div_(buffer, bias_correction2)
diff --git a/torch/optim/rmsprop.py b/torch/optim/rmsprop.py
index 84d6061286..ebb0d59244 100644
--- a/torch/optim/rmsprop.py
+++ b/torch/optim/rmsprop.py
@@ -1,8 +1,18 @@
+from typing import List, Optional
+
import torch
from torch import Tensor
-from .optimizer import (Optimizer, _default_to_fused_or_foreach, _use_grad_for_differentiable, _get_scalar_dtype,
- _differentiable_doc, _foreach_doc, _maximize_doc, _capturable_doc, _view_as_real)
-from typing import List, Optional
+from .optimizer import (
+ _capturable_doc,
+ _default_to_fused_or_foreach,
+ _differentiable_doc,
+ _foreach_doc,
+ _get_scalar_dtype,
+ _maximize_doc,
+ _use_grad_for_differentiable,
+ _view_as_real,
+ Optimizer,
+)
__all__ = ["RMSprop", "rmsprop"]
@@ -58,13 +68,26 @@ class RMSprop(Optimizer):
group.setdefault("capturable", False)
for p in group["params"]:
p_state = self.state.get(p, [])
- if len(p_state) != 0 and not torch.is_tensor(p_state['step']):
+ if len(p_state) != 0 and not torch.is_tensor(p_state["step"]):
step_val = float(p_state["step"])
- p_state["step"] = (torch.tensor(step_val, dtype=_get_scalar_dtype(), device=p.device)
- if group['capturable']
- else torch.tensor(step_val, dtype=_get_scalar_dtype()))
+ p_state["step"] = (
+ torch.tensor(
+ step_val, dtype=_get_scalar_dtype(), device=p.device
+ )
+ if group["capturable"]
+ else torch.tensor(step_val, dtype=_get_scalar_dtype())
+ )
- def _init_group(self, group, params_with_grad, grads, square_avgs, momentum_buffer_list, grad_avgs, state_steps):
+ def _init_group(
+ self,
+ group,
+ params_with_grad,
+ grads,
+ square_avgs,
+ momentum_buffer_list,
+ grad_avgs,
+ state_steps,
+ ):
has_complex = False
for p in group["params"]:
if p.grad is None:
@@ -80,8 +103,11 @@ class RMSprop(Optimizer):
# State initialization
if len(state) == 0:
- state["step"] = (torch.zeros((), dtype=_get_scalar_dtype(), device=p.device)
- if group["capturable"] else torch.zeros((), dtype=_get_scalar_dtype()))
+ state["step"] = (
+ torch.zeros((), dtype=_get_scalar_dtype(), device=p.device)
+ if group["capturable"]
+ else torch.zeros((), dtype=_get_scalar_dtype())
+ )
state["square_avg"] = torch.zeros_like(
p, memory_format=torch.preserve_format
)
@@ -126,8 +152,15 @@ class RMSprop(Optimizer):
momentum_buffer_list = []
state_steps = []
- has_complex = self._init_group(group, params_with_grad, grads, square_avgs,
- momentum_buffer_list, grad_avgs, state_steps)
+ has_complex = self._init_group(
+ group,
+ params_with_grad,
+ grads,
+ square_avgs,
+ momentum_buffer_list,
+ grad_avgs,
+ state_steps,
+ )
rmsprop(
params_with_grad,
@@ -152,7 +185,8 @@ class RMSprop(Optimizer):
return loss
-RMSprop.__doc__ = r"""Implements RMSprop algorithm.
+RMSprop.__doc__ = (
+ r"""Implements RMSprop algorithm.
.. math::
\begin{aligned}
@@ -194,7 +228,8 @@ RMSprop.__doc__ = r"""Implements RMSprop algorithm.
learning rate is thus :math:`\gamma/(\sqrt{v} + \epsilon)` where :math:`\gamma`
is the scheduled learning rate and :math:`v` is the weighted moving average
of the squared gradient.
- """ + fr"""
+ """
+ + rf"""
Args:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
@@ -212,6 +247,7 @@ RMSprop.__doc__ = r"""Implements RMSprop algorithm.
{_differentiable_doc}
"""
+)
def rmsprop(
@@ -241,11 +277,17 @@ def rmsprop(
"""
# this check is slow during compilation, so we skip it
# if it's strictly needed we can add this check back in dynamo
- if not torch._utils.is_compiling() and not all(isinstance(t, torch.Tensor) for t in state_steps):
- raise RuntimeError("API has changed, `state_steps` argument must contain a list of singleton tensors")
+ if not torch._utils.is_compiling() and not all(
+ isinstance(t, torch.Tensor) for t in state_steps
+ ):
+ raise RuntimeError(
+ "API has changed, `state_steps` argument must contain a list of singleton tensors"
+ )
if foreach is None:
- _, foreach = _default_to_fused_or_foreach(params, differentiable, use_fused=False)
+ _, foreach = _default_to_fused_or_foreach(
+ params, differentiable, use_fused=False
+ )
if foreach and torch.jit.is_scripting():
raise RuntimeError("torch.jit.script not supported with foreach optimizers")
@@ -294,14 +336,13 @@ def _single_tensor_rmsprop(
capturable: bool,
has_complex: bool,
):
-
for i, param in enumerate(params):
step = state_steps[i]
# If compiling, the compiler will handle cudagraph checks, see note [torch.compile x capturable]
if not torch._utils.is_compiling() and capturable:
- assert (
- (param.is_cuda and step.is_cuda) or (param.is_xla and step.is_xla)
+ assert (param.is_cuda and step.is_cuda) or (
+ param.is_xla and step.is_xla
), "If capturable=True, params and state_steps must be CUDA or XLA tensors."
grad = grads[i]
@@ -371,14 +412,24 @@ def _multi_tensor_rmsprop(
# If compiling, the compiler will handle cudagraph checks, see note [torch.compile x capturable]
if not torch._utils.is_compiling() and capturable:
- assert all((p.is_cuda and step.is_cuda) or (p.is_xla and step.is_xla) for p, step in zip(params, state_steps)), \
- "If capturable=True, params and state_steps must be CUDA tensors."
+ assert all(
+ (p.is_cuda and step.is_cuda) or (p.is_xla and step.is_xla)
+ for p, step in zip(params, state_steps)
+ ), "If capturable=True, params and state_steps must be CUDA tensors."
-
- grouped_tensors = Optimizer._group_tensors_by_device_and_dtype([params, grads, square_avgs, grad_avgs,
- momentum_buffer_list, state_steps])
- for (((grouped_params, grouped_grads, grouped_square_avgs, grouped_grad_avgs,
- grouped_momentum_buffer_list, grouped_state_steps)), _) in grouped_tensors.values():
+ grouped_tensors = Optimizer._group_tensors_by_device_and_dtype(
+ [params, grads, square_avgs, grad_avgs, momentum_buffer_list, state_steps]
+ )
+ for (
+ (
+ grouped_params,
+ grouped_grads,
+ grouped_square_avgs,
+ grouped_grad_avgs,
+ grouped_momentum_buffer_list,
+ grouped_state_steps,
+ )
+ ), _ in grouped_tensors.values():
if has_complex:
state_and_grads = [grouped_grads, grouped_square_avgs]
if momentum > 0:
@@ -395,7 +446,9 @@ def _multi_tensor_rmsprop(
# and over. 1 will then be wrapped into a Tensor over and over again, which is slower than if we just
# wrapped it once now. The alpha is required to assure we go to the right overload.
if grouped_state_steps[0].is_cpu:
- torch._foreach_add_(grouped_state_steps, torch.tensor(1.0, device='cpu'), alpha=1.0)
+ torch._foreach_add_(
+ grouped_state_steps, torch.tensor(1.0, device="cpu"), alpha=1.0
+ )
else:
torch._foreach_add_(grouped_state_steps, 1)
@@ -404,14 +457,20 @@ def _multi_tensor_rmsprop(
if maximize:
torch._foreach_add_(grouped_grads, grouped_params, alpha=weight_decay)
else:
- grouped_grads = torch._foreach_add(grouped_grads, grouped_params, alpha=weight_decay)
+ grouped_grads = torch._foreach_add(
+ grouped_grads, grouped_params, alpha=weight_decay
+ )
torch._foreach_mul_(grouped_square_avgs, alpha)
- torch._foreach_addcmul_(grouped_square_avgs, grouped_grads, grouped_grads, value=1 - alpha)
+ torch._foreach_addcmul_(
+ grouped_square_avgs, grouped_grads, grouped_grads, value=1 - alpha
+ )
if centered:
torch._foreach_lerp_(grouped_grad_avgs, grouped_grads, 1 - alpha)
- avg = torch._foreach_addcmul(grouped_square_avgs, grouped_grad_avgs, grouped_grad_avgs, value=-1)
+ avg = torch._foreach_addcmul(
+ grouped_square_avgs, grouped_grad_avgs, grouped_grad_avgs, value=-1
+ )
torch._foreach_sqrt_(avg)
torch._foreach_add_(avg, eps)
else:
@@ -427,7 +486,9 @@ def _multi_tensor_rmsprop(
momentum_lr = torch._foreach_mul(grouped_momentum_buffer_list, -lr)
torch._foreach_add_(grouped_params, momentum_lr)
else:
- torch._foreach_add_(grouped_params, grouped_momentum_buffer_list, alpha=-lr)
+ torch._foreach_add_(
+ grouped_params, grouped_momentum_buffer_list, alpha=-lr
+ )
else:
# If LR is a tensor, the else branch will internally call item()
# which will cause silent incorrectness if we are capturing
diff --git a/torch/optim/rprop.py b/torch/optim/rprop.py
index 76af710c6c..1ab6080026 100644
--- a/torch/optim/rprop.py
+++ b/torch/optim/rprop.py
@@ -1,8 +1,18 @@
+from typing import List, Optional
+
import torch
from torch import Tensor
-from .optimizer import (Optimizer, _use_grad_for_differentiable, _default_to_fused_or_foreach, _get_scalar_dtype,
- _differentiable_doc, _foreach_doc, _maximize_doc, _capturable_doc, _view_as_real)
-from typing import List, Optional
+from .optimizer import (
+ _capturable_doc,
+ _default_to_fused_or_foreach,
+ _differentiable_doc,
+ _foreach_doc,
+ _get_scalar_dtype,
+ _maximize_doc,
+ _use_grad_for_differentiable,
+ _view_as_real,
+ Optimizer,
+)
__all__ = ["Rprop", "rprop"]
@@ -45,11 +55,15 @@ class Rprop(Optimizer):
group.setdefault("capturable", False)
for p in group["params"]:
p_state = self.state.get(p, [])
- if len(p_state) != 0 and not torch.is_tensor(p_state['step']):
+ if len(p_state) != 0 and not torch.is_tensor(p_state["step"]):
step_val = float(p_state["step"])
- p_state["step"] = (torch.tensor(step_val, dtype=_get_scalar_dtype(), device=p.device)
- if group['capturable']
- else torch.tensor(step_val, dtype=_get_scalar_dtype()))
+ p_state["step"] = (
+ torch.tensor(
+ step_val, dtype=_get_scalar_dtype(), device=p.device
+ )
+ if group["capturable"]
+ else torch.tensor(step_val, dtype=_get_scalar_dtype())
+ )
def _init_group(self, group, params, grads, prevs, step_sizes, state_steps):
has_complex = False
@@ -67,17 +81,18 @@ class Rprop(Optimizer):
# State initialization
if len(state) == 0:
- state["step"] = (torch.zeros((), dtype=_get_scalar_dtype(), device=p.device)
- if group["capturable"] else torch.zeros((), dtype=_get_scalar_dtype()))
-
- state["prev"] = torch.zeros_like(
- p, memory_format=torch.preserve_format
+ state["step"] = (
+ torch.zeros((), dtype=_get_scalar_dtype(), device=p.device)
+ if group["capturable"]
+ else torch.zeros((), dtype=_get_scalar_dtype())
)
+
+ state["prev"] = torch.zeros_like(p, memory_format=torch.preserve_format)
if p.dtype.is_complex:
# Complex Number should be as if they are two independent real numbers.
# Hence the step_size shouldn't be zero for imaginary part.
- state["step_size"] = (
- torch.full_like(grad, complex(group["lr"], group["lr"]))
+ state["step_size"] = torch.full_like(
+ grad, complex(group["lr"], group["lr"])
)
else:
state["step_size"] = torch.full_like(grad, group["lr"])
@@ -115,7 +130,9 @@ class Rprop(Optimizer):
foreach = group["foreach"]
maximize = group["maximize"]
- has_complex = self._init_group(group, params, grads, prevs, step_sizes, state_steps)
+ has_complex = self._init_group(
+ group, params, grads, prevs, step_sizes, state_steps
+ )
rprop(
params,
@@ -137,7 +154,8 @@ class Rprop(Optimizer):
return loss
-Rprop.__doc__ = r"""Implements the resilient backpropagation algorithm.
+Rprop.__doc__ = (
+ r"""Implements the resilient backpropagation algorithm.
.. math::
\begin{aligned}
@@ -171,7 +189,8 @@ Rprop.__doc__ = r"""Implements the resilient backpropagation algorithm.
For further details regarding the algorithm we refer to the paper
`A Direct Adaptive Method for Faster Backpropagation Learning: The RPROP Algorithm
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.21.1417>`_.
- """ + fr"""
+ """
+ + rf"""
Args:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
@@ -187,6 +206,8 @@ Rprop.__doc__ = r"""Implements the resilient backpropagation algorithm.
{_differentiable_doc}
"""
+)
+
def rprop(
params: List[Tensor],
@@ -213,11 +234,17 @@ def rprop(
"""
# this check is slow during compilation, so we skip it
# if it's strictly needed we can add this check back in dynamo
- if not torch._utils.is_compiling() and not all(isinstance(t, torch.Tensor) for t in state_steps):
- raise RuntimeError("API has changed, `state_steps` argument must contain a list of singleton tensors")
+ if not torch._utils.is_compiling() and not all(
+ isinstance(t, torch.Tensor) for t in state_steps
+ ):
+ raise RuntimeError(
+ "API has changed, `state_steps` argument must contain a list of singleton tensors"
+ )
if foreach is None:
- _, foreach = _default_to_fused_or_foreach(params, differentiable, use_fused=False)
+ _, foreach = _default_to_fused_or_foreach(
+ params, differentiable, use_fused=False
+ )
if foreach and torch.jit.is_scripting():
raise RuntimeError("torch.jit.script not supported with foreach optimizers")
@@ -269,8 +296,8 @@ def _single_tensor_rprop(
# If compiling, the compiler will handle cudagraph checks, see note [torch.compile x capturable]
if not torch._utils.is_compiling() and capturable:
- assert (
- (param.is_cuda and step.is_cuda) or (param.is_xla and step.is_xla)
+ assert (param.is_cuda and step.is_cuda) or (
+ param.is_xla and step.is_xla
), "If capturable=True, params and state_steps must be CUDA or XLA tensors."
step += 1
@@ -326,7 +353,6 @@ def _multi_tensor_rprop(
differentiable: bool,
has_complex: bool,
):
-
if len(params) == 0:
return
@@ -334,24 +360,37 @@ def _multi_tensor_rprop(
# If compiling, the compiler will handle cudagraph checks, see note [torch.compile x capturable]
if not torch._utils.is_compiling() and capturable:
- assert all((p.is_cuda and step.is_cuda) or (p.is_xla and step.is_xla) for p, step in zip(params, state_steps)), \
- "If capturable=True, params and state_steps must be CUDA or XLA tensors."
-
- grouped_tensors = Optimizer._group_tensors_by_device_and_dtype([params, grads, prevs, step_sizes, state_steps])
- for ((grouped_params, grouped_grads, grouped_prevs, grouped_step_sizes, grouped_state_steps), _) in grouped_tensors.values():
+ assert all(
+ (p.is_cuda and step.is_cuda) or (p.is_xla and step.is_xla)
+ for p, step in zip(params, state_steps)
+ ), "If capturable=True, params and state_steps must be CUDA or XLA tensors."
+ grouped_tensors = Optimizer._group_tensors_by_device_and_dtype(
+ [params, grads, prevs, step_sizes, state_steps]
+ )
+ for (
+ grouped_params,
+ grouped_grads,
+ grouped_prevs,
+ grouped_step_sizes,
+ grouped_state_steps,
+ ), _ in grouped_tensors.values():
# Update steps
# If steps are on CPU, foreach will fall back to the slow path, which is a for-loop calling t.add(1) over
# and over. 1 will then be wrapped into a Tensor over and over again, which is slower than if we just
# wrapped it once now. The alpha is required to assure we go to the right overload.
if grouped_state_steps[0].is_cpu:
- torch._foreach_add_(grouped_state_steps, torch.tensor(1.0, device='cpu'), alpha=1.0)
+ torch._foreach_add_(
+ grouped_state_steps, torch.tensor(1.0, device="cpu"), alpha=1.0
+ )
else:
torch._foreach_add_(grouped_state_steps, 1)
# Handle complex params
if has_complex:
- _view_as_real(grouped_params, grouped_grads, grouped_prevs, grouped_step_sizes)
+ _view_as_real(
+ grouped_params, grouped_grads, grouped_prevs, grouped_step_sizes
+ )
signs = torch._foreach_mul(grouped_grads, grouped_prevs)
if maximize:
@@ -386,14 +425,18 @@ def _multi_tensor_rprop(
# for dir>=0 dfdx=dfdx
grouped_grads = list(grouped_grads)
for i in range(len(grouped_grads)):
- grouped_grads[i].copy_(torch.where(signs[i].eq(etaminus), 0, grouped_grads[i]))
+ grouped_grads[i].copy_(
+ torch.where(signs[i].eq(etaminus), 0, grouped_grads[i])
+ )
# explicitly del signs as it's not used after here to save memory
del signs
# update parameters
grad_signs = [grad.sign() for grad in grouped_grads]
- torch._foreach_addcmul_(grouped_params, grad_signs, grouped_step_sizes, value=-1)
+ torch._foreach_addcmul_(
+ grouped_params, grad_signs, grouped_step_sizes, value=-1
+ )
# Logically, you may expect grouped_prevs to get updated to grouped_grads, but that's
# basically already happened since we've been using grouped_prevs' memory to store
diff --git a/torch/optim/sgd.py b/torch/optim/sgd.py
index 7002d98502..9a21fceb6c 100644
--- a/torch/optim/sgd.py
+++ b/torch/optim/sgd.py
@@ -1,16 +1,35 @@
+from typing import List, Optional
+
import torch
from torch import Tensor
-from .optimizer import (Optimizer, _use_grad_for_differentiable, _default_to_fused_or_foreach,
- _differentiable_doc, _foreach_doc, _maximize_doc, _fused_doc)
-from typing import List, Optional
+from .optimizer import (
+ _default_to_fused_or_foreach,
+ _differentiable_doc,
+ _foreach_doc,
+ _fused_doc,
+ _maximize_doc,
+ _use_grad_for_differentiable,
+ Optimizer,
+)
-__all__ = ['SGD', 'sgd']
+__all__ = ["SGD", "sgd"]
class SGD(Optimizer):
- def __init__(self, params, lr=1e-3, momentum=0, dampening=0,
- weight_decay=0, nesterov=False, *, maximize: bool = False, foreach: Optional[bool] = None,
- differentiable: bool = False, fused: Optional[bool] = None):
+ def __init__(
+ self,
+ params,
+ lr=1e-3,
+ momentum=0,
+ dampening=0,
+ weight_decay=0,
+ nesterov=False,
+ *,
+ maximize: bool = False,
+ foreach: Optional[bool] = None,
+ differentiable: bool = False,
+ fused: Optional[bool] = None,
+ ):
if lr < 0.0:
raise ValueError(f"Invalid learning rate: {lr}")
if momentum < 0.0:
@@ -18,10 +37,17 @@ class SGD(Optimizer):
if weight_decay < 0.0:
raise ValueError(f"Invalid weight_decay value: {weight_decay}")
- defaults = dict(lr=lr, momentum=momentum, dampening=dampening,
- weight_decay=weight_decay, nesterov=nesterov,
- maximize=maximize, foreach=foreach,
- differentiable=differentiable, fused=fused)
+ defaults = dict(
+ lr=lr,
+ momentum=momentum,
+ dampening=dampening,
+ weight_decay=weight_decay,
+ nesterov=nesterov,
+ maximize=maximize,
+ foreach=foreach,
+ differentiable=differentiable,
+ fused=fused,
+ )
if nesterov and (momentum <= 0 or dampening != 0):
raise ValueError("Nesterov momentum requires a momentum and zero dampening")
super().__init__(params, defaults)
@@ -36,16 +62,16 @@ class SGD(Optimizer):
def __setstate__(self, state):
super().__setstate__(state)
for group in self.param_groups:
- group.setdefault('nesterov', False)
- group.setdefault('maximize', False)
- group.setdefault('foreach', None)
- group.setdefault('differentiable', False)
- group.setdefault('fused', False)
+ group.setdefault("nesterov", False)
+ group.setdefault("maximize", False)
+ group.setdefault("foreach", None)
+ group.setdefault("differentiable", False)
+ group.setdefault("fused", False)
def _init_group(self, group, params_with_grad, d_p_list, momentum_buffer_list):
has_sparse_grad = False
- for p in group['params']:
+ for p in group["params"]:
if p.grad is not None:
params_with_grad.append(p)
d_p_list.append(p.grad)
@@ -54,7 +80,7 @@ class SGD(Optimizer):
if group["momentum"] != 0:
state = self.state[p]
- momentum_buffer_list.append(state.get('momentum_buffer'))
+ momentum_buffer_list.append(state.get("momentum_buffer"))
return has_sparse_grad
@@ -76,33 +102,38 @@ class SGD(Optimizer):
d_p_list = []
momentum_buffer_list = []
- has_sparse_grad = self._init_group(group, params_with_grad, d_p_list, momentum_buffer_list)
+ has_sparse_grad = self._init_group(
+ group, params_with_grad, d_p_list, momentum_buffer_list
+ )
- sgd(params_with_grad,
+ sgd(
+ params_with_grad,
d_p_list,
momentum_buffer_list,
- weight_decay=group['weight_decay'],
- momentum=group['momentum'],
- lr=group['lr'],
- dampening=group['dampening'],
- nesterov=group['nesterov'],
- maximize=group['maximize'],
+ weight_decay=group["weight_decay"],
+ momentum=group["momentum"],
+ lr=group["lr"],
+ dampening=group["dampening"],
+ nesterov=group["nesterov"],
+ maximize=group["maximize"],
has_sparse_grad=has_sparse_grad,
- foreach=group['foreach'],
- fused=group['fused'],
+ foreach=group["foreach"],
+ fused=group["fused"],
grad_scale=getattr(self, "grad_scale", None),
- found_inf=getattr(self, "found_inf", None))
+ found_inf=getattr(self, "found_inf", None),
+ )
if group["momentum"] != 0:
# update momentum_buffers in state
for p, momentum_buffer in zip(params_with_grad, momentum_buffer_list):
state = self.state[p]
- state['momentum_buffer'] = momentum_buffer
+ state["momentum_buffer"] = momentum_buffer
return loss
-SGD.__doc__ = r"""Implements stochastic gradient descent (optionally with momentum).
+SGD.__doc__ = (
+ r"""Implements stochastic gradient descent (optionally with momentum).
.. math::
\begin{aligned}
@@ -136,7 +167,8 @@ SGD.__doc__ = r"""Implements stochastic gradient descent (optionally with moment
Nesterov momentum is based on the formula from
`On the importance of initialization and momentum in deep learning`__.
- """ + fr"""
+ """
+ + rf"""
Args:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
@@ -149,7 +181,8 @@ SGD.__doc__ = r"""Implements stochastic gradient descent (optionally with moment
{_foreach_doc}
{_differentiable_doc}
{_fused_doc}
- """ + r"""
+ """
+ + r"""
Example:
>>> # xdoctest: +SKIP
@@ -191,25 +224,28 @@ SGD.__doc__ = r"""Implements stochastic gradient descent (optionally with moment
frameworks that initialize it to all zeros.
"""
+)
-def sgd(params: List[Tensor],
- d_p_list: List[Tensor],
- momentum_buffer_list: List[Optional[Tensor]],
- # kwonly args with defaults are not supported by functions compiled with torchscript issue #70627
- # setting this as kwarg for now as functional API is compiled by torch/distributed/optim
- has_sparse_grad: bool = None,
- foreach: Optional[bool] = None,
- fused: Optional[bool] = None,
- grad_scale: Optional[Tensor] = None,
- found_inf: Optional[Tensor] = None,
- *,
- weight_decay: float,
- momentum: float,
- lr: float,
- dampening: float,
- nesterov: bool,
- maximize: bool):
+def sgd(
+ params: List[Tensor],
+ d_p_list: List[Tensor],
+ momentum_buffer_list: List[Optional[Tensor]],
+ # kwonly args with defaults are not supported by functions compiled with torchscript issue #70627
+ # setting this as kwarg for now as functional API is compiled by torch/distributed/optim
+ has_sparse_grad: bool = None,
+ foreach: Optional[bool] = None,
+ fused: Optional[bool] = None,
+ grad_scale: Optional[Tensor] = None,
+ found_inf: Optional[Tensor] = None,
+ *,
+ weight_decay: float,
+ momentum: float,
+ lr: float,
+ dampening: float,
+ nesterov: bool,
+ maximize: bool,
+):
r"""Functional API that performs SGD algorithm computation.
See :class:`~torch.optim.SGD` for details.
@@ -223,7 +259,9 @@ def sgd(params: List[Tensor],
# why must we be explicit about an if statement for torch.jit.is_scripting here?
# because JIT can't handle Optionals nor fancy conditionals when scripting
if not torch.jit.is_scripting():
- fused, foreach = _default_to_fused_or_foreach(params, differentiable=False, use_fused=False)
+ fused, foreach = _default_to_fused_or_foreach(
+ params, differentiable=False, use_fused=False
+ )
else:
foreach = False
fused = False
@@ -233,9 +271,9 @@ def sgd(params: List[Tensor],
fused = False
if foreach and torch.jit.is_scripting():
- raise RuntimeError('torch.jit.script not supported with foreach optimizers')
+ raise RuntimeError("torch.jit.script not supported with foreach optimizers")
if fused and torch.jit.is_scripting():
- raise RuntimeError('torch.jit.script not supported with fused optimizers')
+ raise RuntimeError("torch.jit.script not supported with fused optimizers")
if foreach and not torch.jit.is_scripting():
func = _multi_tensor_sgd
@@ -244,32 +282,37 @@ def sgd(params: List[Tensor],
else:
func = _single_tensor_sgd
- func(params,
- d_p_list,
- momentum_buffer_list,
- weight_decay=weight_decay,
- momentum=momentum,
- lr=lr,
- dampening=dampening,
- nesterov=nesterov,
- has_sparse_grad=has_sparse_grad,
- maximize=maximize,
- grad_scale=grad_scale,
- found_inf=found_inf)
-
-def _single_tensor_sgd(params: List[Tensor],
- d_p_list: List[Tensor],
- momentum_buffer_list: List[Optional[Tensor]],
- grad_scale: Optional[Tensor],
- found_inf: Optional[Tensor],
- *,
- weight_decay: float,
- momentum: float,
- lr: float,
- dampening: float,
- nesterov: bool,
- maximize: bool,
- has_sparse_grad: bool):
+ func(
+ params,
+ d_p_list,
+ momentum_buffer_list,
+ weight_decay=weight_decay,
+ momentum=momentum,
+ lr=lr,
+ dampening=dampening,
+ nesterov=nesterov,
+ has_sparse_grad=has_sparse_grad,
+ maximize=maximize,
+ grad_scale=grad_scale,
+ found_inf=found_inf,
+ )
+
+
+def _single_tensor_sgd(
+ params: List[Tensor],
+ d_p_list: List[Tensor],
+ momentum_buffer_list: List[Optional[Tensor]],
+ grad_scale: Optional[Tensor],
+ found_inf: Optional[Tensor],
+ *,
+ weight_decay: float,
+ momentum: float,
+ lr: float,
+ dampening: float,
+ nesterov: bool,
+ maximize: bool,
+ has_sparse_grad: bool,
+):
assert grad_scale is None and found_inf is None
for i, param in enumerate(params):
@@ -295,27 +338,37 @@ def _single_tensor_sgd(params: List[Tensor],
param.add_(d_p, alpha=-lr)
-def _multi_tensor_sgd(params: List[Tensor],
- grads: List[Tensor],
- momentum_buffer_list: List[Optional[Tensor]],
- grad_scale: Optional[Tensor],
- found_inf: Optional[Tensor],
- *,
- weight_decay: float,
- momentum: float,
- lr: float,
- dampening: float,
- nesterov: bool,
- maximize: bool,
- has_sparse_grad: bool):
+def _multi_tensor_sgd(
+ params: List[Tensor],
+ grads: List[Tensor],
+ momentum_buffer_list: List[Optional[Tensor]],
+ grad_scale: Optional[Tensor],
+ found_inf: Optional[Tensor],
+ *,
+ weight_decay: float,
+ momentum: float,
+ lr: float,
+ dampening: float,
+ nesterov: bool,
+ maximize: bool,
+ has_sparse_grad: bool,
+):
assert grad_scale is None and found_inf is None
if len(params) == 0:
return
- grouped_tensors = Optimizer._group_tensors_by_device_and_dtype([params, grads, momentum_buffer_list], with_indices=True)
- for ((device_params, device_grads, device_momentum_buffer_list), indices) in grouped_tensors.values():
- device_has_sparse_grad = has_sparse_grad and any(grad.is_sparse for grad in device_grads)
+ grouped_tensors = Optimizer._group_tensors_by_device_and_dtype(
+ [params, grads, momentum_buffer_list], with_indices=True
+ )
+ for (
+ device_params,
+ device_grads,
+ device_momentum_buffer_list,
+ ), indices in grouped_tensors.values():
+ device_has_sparse_grad = has_sparse_grad and any(
+ grad.is_sparse for grad in device_grads
+ )
if maximize:
device_grads = torch._foreach_neg(device_grads)
@@ -325,7 +378,9 @@ def _multi_tensor_sgd(params: List[Tensor],
if maximize:
torch._foreach_add_(device_grads, device_params, alpha=weight_decay)
else:
- device_grads = torch._foreach_add(device_grads, device_params, alpha=weight_decay)
+ device_grads = torch._foreach_add(
+ device_grads, device_params, alpha=weight_decay
+ )
if momentum != 0:
bufs = []
@@ -345,8 +400,9 @@ def _multi_tensor_sgd(params: List[Tensor],
bufs = []
for i in range(len(device_momentum_buffer_list)):
if device_momentum_buffer_list[i] is None:
- buf = device_momentum_buffer_list[i] = momentum_buffer_list[indices[i]] = \
- torch.clone(device_grads[i]).detach()
+ buf = device_momentum_buffer_list[i] = momentum_buffer_list[
+ indices[i]
+ ] = torch.clone(device_grads[i]).detach()
else:
buf = device_momentum_buffer_list[i]
buf.mul_(momentum).add_(device_grads[i], alpha=1 - dampening)
@@ -385,17 +441,25 @@ def _fused_sgd(
return
if has_sparse_grad:
raise RuntimeError("`_fused_sgd` does not support sparse gradients")
- grad_scale_dict = {grad_scale.device: grad_scale} if grad_scale is not None else None
+ grad_scale_dict = (
+ {grad_scale.device: grad_scale} if grad_scale is not None else None
+ )
found_inf_dict = {found_inf.device: found_inf} if found_inf is not None else None
no_momentum_buffer = momentum == 0
- is_first_step = all(t is None for t in momentum_buffer_list) and not no_momentum_buffer
+ is_first_step = (
+ all(t is None for t in momentum_buffer_list) and not no_momentum_buffer
+ )
if is_first_step:
for i, g in enumerate(grads):
momentum_buffer_list[i] = torch.empty_like(g)
grouped_tensors = Optimizer._group_tensors_by_device_and_dtype(
- [params, grads, momentum_buffer_list], with_indices=False)
- for (device, dtype), ((device_params, device_grads, device_momentum_buffer_list), _) in grouped_tensors.items():
+ [params, grads, momentum_buffer_list], with_indices=False
+ )
+ for (device, dtype), (
+ (device_params, device_grads, device_momentum_buffer_list),
+ _,
+ ) in grouped_tensors.items():
device_grad_scale, device_found_inf = None, None
if grad_scale is not None:
if device not in grad_scale_dict:
diff --git a/torch/optim/sparse_adam.py b/torch/optim/sparse_adam.py
index 685e279923..e3ee2db820 100644
--- a/torch/optim/sparse_adam.py
+++ b/torch/optim/sparse_adam.py
@@ -1,11 +1,14 @@
import torch
from . import _functional as F
-from .optimizer import Optimizer, _maximize_doc
+from .optimizer import _maximize_doc, Optimizer
+
+__all__ = ["SparseAdam"]
-__all__ = ['SparseAdam']
class SparseAdam(Optimizer):
- def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, maximize: bool = False):
+ def __init__(
+ self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, maximize: bool = False
+ ):
if not 0.0 < lr:
raise ValueError(f"Invalid learning rate: {lr}")
if not 0.0 < eps:
@@ -21,9 +24,11 @@ class SparseAdam(Optimizer):
sparse_params = []
complex_params = []
for index, param_group in enumerate(self.param_groups):
- assert isinstance(param_group, dict), f"param_groups must be a list of dicts, but got {type(param_group)}"
+ assert isinstance(
+ param_group, dict
+ ), f"param_groups must be a list of dicts, but got {type(param_group)}"
# given param group, convert given params to a list first before iterating
- for d_index, d_param in enumerate(param_group['params']):
+ for d_index, d_param in enumerate(param_group["params"]):
if d_param.is_sparse:
sparse_params.append([index, d_index])
if d_param.is_complex():
@@ -37,7 +42,6 @@ class SparseAdam(Optimizer):
f"Complex params at indices {complex_params}: SparseAdam does not support complex parameters"
)
-
@torch.no_grad()
def step(self, closure=None):
"""Perform a single optimization step.
@@ -57,50 +61,59 @@ class SparseAdam(Optimizer):
exp_avgs = []
exp_avg_sqs = []
state_steps = []
- eps = group['eps']
- lr = group['lr']
- beta1, beta2 = group['betas']
- maximize = group.get('maximize', False)
+ eps = group["eps"]
+ lr = group["lr"]
+ beta1, beta2 = group["betas"]
+ maximize = group.get("maximize", False)
- for p in group['params']:
+ for p in group["params"]:
if p.grad is not None:
params_with_grad.append(p)
if not p.grad.is_sparse:
- raise RuntimeError('SparseAdam does not support dense gradients, please consider Adam instead')
+ raise RuntimeError(
+ "SparseAdam does not support dense gradients, please consider Adam instead"
+ )
grads.append(p.grad)
state = self.state[p]
# State initialization
if len(state) == 0:
- state['step'] = 0
+ state["step"] = 0
# Exponential moving average of gradient values
- state['exp_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format)
+ state["exp_avg"] = torch.zeros_like(
+ p, memory_format=torch.preserve_format
+ )
# Exponential moving average of squared gradient values
- state['exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format)
+ state["exp_avg_sq"] = torch.zeros_like(
+ p, memory_format=torch.preserve_format
+ )
- exp_avgs.append(state['exp_avg'])
- exp_avg_sqs.append(state['exp_avg_sq'])
+ exp_avgs.append(state["exp_avg"])
+ exp_avg_sqs.append(state["exp_avg_sq"])
# update the steps for each param group update
- state['step'] += 1
+ state["step"] += 1
# record the step after step update
- state_steps.append(state['step'])
-
- F.sparse_adam(params_with_grad,
- grads,
- exp_avgs,
- exp_avg_sqs,
- state_steps,
- beta1=beta1,
- beta2=beta2,
- lr=group['lr'],
- eps=group['eps'],
- maximize=maximize)
+ state_steps.append(state["step"])
+
+ F.sparse_adam(
+ params_with_grad,
+ grads,
+ exp_avgs,
+ exp_avg_sqs,
+ state_steps,
+ beta1=beta1,
+ beta2=beta2,
+ lr=group["lr"],
+ eps=group["eps"],
+ maximize=maximize,
+ )
return loss
-SparseAdam.__doc__ = fr"""SparseAdam implements a masked version of the Adam algorithm
+
+SparseAdam.__doc__ = rf"""SparseAdam implements a masked version of the Adam algorithm
suitable for sparse gradients. Currently, due to implementation constraints (explained
below), SparseAdam is only intended for a narrow subset of use cases, specifically
parameters of a dense layout with gradients of a sparse layout. This occurs in a
diff --git a/torch/optim/swa_utils.py b/torch/optim/swa_utils.py
index 0c685e34fc..62bb93c906 100644
--- a/torch/optim/swa_utils.py
+++ b/torch/optim/swa_utils.py
@@ -1,8 +1,8 @@
import itertools
import math
-from copy import deepcopy
-from typing import Any, Callable, Iterable, List, Optional, Tuple, Union, Dict, cast
import warnings
+from copy import deepcopy
+from typing import Any, Callable, cast, Dict, Iterable, List, Optional, Tuple, Union
import torch
from torch import Tensor
@@ -12,16 +12,20 @@ from torch.utils._foreach_utils import _get_foreach_kernels_supported_devices
from .optimizer import Optimizer
__all__ = [
- 'AveragedModel',
- 'update_bn',
- 'SWALR',
- 'get_ema_multi_avg_fn',
- 'get_swa_multi_avg_fn',
- 'get_ema_avg_fn',
- 'get_swa_avg_fn'
+ "AveragedModel",
+ "update_bn",
+ "SWALR",
+ "get_ema_multi_avg_fn",
+ "get_swa_multi_avg_fn",
+ "get_ema_avg_fn",
+ "get_swa_avg_fn",
]
-from torch.utils._foreach_utils import _group_tensors_by_device_and_dtype, TensorListList, Indices
+from torch.utils._foreach_utils import (
+ _group_tensors_by_device_and_dtype,
+ Indices,
+ TensorListList,
+)
PARAM_LIST = Union[Tuple[Tensor, ...], List[Tensor]]
@@ -30,7 +34,9 @@ def get_ema_multi_avg_fn(decay=0.999):
@torch.no_grad()
def ema_update(ema_param_list: PARAM_LIST, current_param_list: PARAM_LIST, _):
# foreach lerp only handles float and complex
- if torch.is_floating_point(ema_param_list[0]) or torch.is_complex(ema_param_list[0]):
+ if torch.is_floating_point(ema_param_list[0]) or torch.is_complex(
+ ema_param_list[0]
+ ):
torch._foreach_lerp_(ema_param_list, current_param_list, 1 - decay)
else:
for p_ema, p_model in zip(ema_param_list, current_param_list):
@@ -41,16 +47,30 @@ def get_ema_multi_avg_fn(decay=0.999):
def get_swa_multi_avg_fn():
@torch.no_grad()
- def swa_update(averaged_param_list: PARAM_LIST, current_param_list: PARAM_LIST, num_averaged: Union[Tensor, int]):
+ def swa_update(
+ averaged_param_list: PARAM_LIST,
+ current_param_list: PARAM_LIST,
+ num_averaged: Union[Tensor, int],
+ ):
# foreach lerp only handles float and complex
- if torch.is_floating_point(averaged_param_list[0]) or torch.is_complex(averaged_param_list[0]):
- torch._foreach_lerp_(averaged_param_list, current_param_list, 1 / (num_averaged + 1))
+ if torch.is_floating_point(averaged_param_list[0]) or torch.is_complex(
+ averaged_param_list[0]
+ ):
+ torch._foreach_lerp_(
+ averaged_param_list, current_param_list, 1 / (num_averaged + 1)
+ )
else:
diffs = torch._foreach_sub(current_param_list, averaged_param_list)
if isinstance(num_averaged, Tensor):
- torch._foreach_addcdiv_(averaged_param_list, diffs, [num_averaged + 1] * len(averaged_param_list))
+ torch._foreach_addcdiv_(
+ averaged_param_list,
+ diffs,
+ [num_averaged + 1] * len(averaged_param_list),
+ )
else:
- torch._foreach_add_(averaged_param_list, diffs, alpha=1.0 / (num_averaged + 1))
+ torch._foreach_add_(
+ averaged_param_list, diffs, alpha=1.0 / (num_averaged + 1)
+ )
return swa_update
@@ -65,7 +85,9 @@ def get_ema_avg_fn(decay=0.999):
def get_swa_avg_fn():
@torch.no_grad()
- def swa_update(averaged_param: Tensor, current_param: Tensor, num_averaged: Union[Tensor, int]):
+ def swa_update(
+ averaged_param: Tensor, current_param: Tensor, num_averaged: Union[Tensor, int]
+ ):
return averaged_param + (current_param - averaged_param) / (num_averaged + 1)
return swa_update
@@ -175,19 +197,22 @@ class AveragedModel(Module):
self,
model: Module,
device: Optional[Union[int, torch.device]] = None,
- avg_fn: Optional[Callable[[Tensor, Tensor, Union[Tensor, int]],
- Tensor]] = None,
- multi_avg_fn: Optional[Callable[
- [PARAM_LIST, PARAM_LIST, Union[Tensor, int]], None]] = None,
+ avg_fn: Optional[Callable[[Tensor, Tensor, Union[Tensor, int]], Tensor]] = None,
+ multi_avg_fn: Optional[
+ Callable[[PARAM_LIST, PARAM_LIST, Union[Tensor, int]], None]
+ ] = None,
use_buffers=False,
):
super().__init__()
- assert avg_fn is None or multi_avg_fn is None, 'Only one of avg_fn and multi_avg_fn should be provided'
+ assert (
+ avg_fn is None or multi_avg_fn is None
+ ), "Only one of avg_fn and multi_avg_fn should be provided"
self.module = deepcopy(model)
if device is not None:
self.module = self.module.to(device)
- self.register_buffer('n_averaged',
- torch.tensor(0, dtype=torch.long, device=device))
+ self.register_buffer(
+ "n_averaged", torch.tensor(0, dtype=torch.long, device=device)
+ )
self.avg_fn = avg_fn
self.multi_avg_fn = multi_avg_fn
self.use_buffers = use_buffers
@@ -198,11 +223,13 @@ class AveragedModel(Module):
def update_parameters(self, model: Module):
self_param = (
itertools.chain(self.module.parameters(), self.module.buffers())
- if self.use_buffers else self.parameters()
+ if self.use_buffers
+ else self.parameters()
)
model_param = (
itertools.chain(model.parameters(), model.buffers())
- if self.use_buffers else model.parameters()
+ if self.use_buffers
+ else model.parameters()
)
self_param_detached = []
model_param_detached = []
@@ -216,25 +243,41 @@ class AveragedModel(Module):
if self.n_averaged > 0:
if self.multi_avg_fn is not None or self.avg_fn is None:
grouped_tensors = _group_tensors_by_device_and_dtype(
- cast(TensorListList, [self_param_detached, model_param_detached]))
+ cast(TensorListList, [self_param_detached, model_param_detached])
+ )
grouped_tensors = cast(
- Dict[Tuple[torch.device, torch.dtype], Tuple[List[List[Tensor]], Indices]],
- grouped_tensors)
- for ((device, _), ([self_params, model_params], _)) in grouped_tensors.items():
+ Dict[
+ Tuple[torch.device, torch.dtype],
+ Tuple[List[List[Tensor]], Indices],
+ ],
+ grouped_tensors,
+ )
+ for (device, _), (
+ [self_params, model_params],
+ _,
+ ) in grouped_tensors.items():
if self.multi_avg_fn:
- self.multi_avg_fn(self_params, model_params, self.n_averaged.to(device))
+ self.multi_avg_fn(
+ self_params, model_params, self.n_averaged.to(device)
+ )
elif device.type in _get_foreach_kernels_supported_devices():
multi_avg_fn = get_swa_multi_avg_fn()
- multi_avg_fn(self_params, model_params, self.n_averaged.to(device))
+ multi_avg_fn(
+ self_params, model_params, self.n_averaged.to(device)
+ )
else:
avg_fn = get_swa_avg_fn()
n_averaged = self.n_averaged.to(device)
for p_averaged, p_model in zip(self_params, model_params):
p_averaged.copy_(avg_fn(p_averaged, p_model, n_averaged))
else:
- for p_averaged, p_model in zip(self_param_detached, model_param_detached):
+ for p_averaged, p_model in zip(
+ self_param_detached, model_param_detached
+ ):
n_averaged = self.n_averaged.to(p_averaged.device)
- p_averaged.detach().copy_(self.avg_fn(p_averaged.detach(), p_model, n_averaged))
+ p_averaged.detach().copy_(
+ self.avg_fn(p_averaged.detach(), p_model, n_averaged)
+ )
if not self.use_buffers:
# If not apply running averages to the buffers,
@@ -245,7 +288,11 @@ class AveragedModel(Module):
@torch.no_grad()
-def update_bn(loader: Iterable[Any], model: Module, device: Optional[Union[int, torch.device]] = None):
+def update_bn(
+ loader: Iterable[Any],
+ model: Module,
+ device: Optional[Union[int, torch.device]] = None,
+):
r"""Updates BatchNorm running_mean, running_var buffers in the model.
It performs one pass over data in `loader` to estimate the activation
@@ -341,19 +388,31 @@ class SWALR(LRScheduler):
.. _Averaging Weights Leads to Wider Optima and Better Generalization:
https://arxiv.org/abs/1803.05407
"""
- def __init__(self, optimizer: Optimizer, swa_lr: float, anneal_epochs=10, anneal_strategy='cos', last_epoch=-1):
+
+ def __init__(
+ self,
+ optimizer: Optimizer,
+ swa_lr: float,
+ anneal_epochs=10,
+ anneal_strategy="cos",
+ last_epoch=-1,
+ ):
swa_lrs = self._format_param(optimizer, swa_lr)
for swa_lr, group in zip(swa_lrs, optimizer.param_groups):
- group['swa_lr'] = swa_lr
- if anneal_strategy not in ['cos', 'linear']:
- raise ValueError("anneal_strategy must by one of 'cos' or 'linear', "
- f"instead got {anneal_strategy}")
- elif anneal_strategy == 'cos':
+ group["swa_lr"] = swa_lr
+ if anneal_strategy not in ["cos", "linear"]:
+ raise ValueError(
+ "anneal_strategy must by one of 'cos' or 'linear', "
+ f"instead got {anneal_strategy}"
+ )
+ elif anneal_strategy == "cos":
self.anneal_func = self._cosine_anneal
- elif anneal_strategy == 'linear':
+ elif anneal_strategy == "linear":
self.anneal_func = self._linear_anneal
if not isinstance(anneal_epochs, int) or anneal_epochs < 0:
- raise ValueError(f"anneal_epochs must be equal or greater than 0, got {anneal_epochs}")
+ raise ValueError(
+ f"anneal_epochs must be equal or greater than 0, got {anneal_epochs}"
+ )
self.anneal_epochs = anneal_epochs
super().__init__(optimizer, last_epoch)
@@ -361,9 +420,11 @@ class SWALR(LRScheduler):
def _format_param(optimizer, swa_lrs):
if isinstance(swa_lrs, (list, tuple)):
if len(swa_lrs) != len(optimizer.param_groups):
- raise ValueError("swa_lr must have the same length as "
- f"optimizer.param_groups: swa_lr has {len(swa_lrs)}, "
- f"optimizer.param_groups has {len(optimizer.param_groups)}")
+ raise ValueError(
+ "swa_lr must have the same length as "
+ f"optimizer.param_groups: swa_lr has {len(swa_lrs)}, "
+ f"optimizer.param_groups has {len(optimizer.param_groups)}"
+ )
return swa_lrs
else:
return [swa_lrs] * len(optimizer.param_groups)
@@ -386,17 +447,24 @@ class SWALR(LRScheduler):
# `_get_lr_called_within_step` is only available `_enable_get_lr_call`,
# so we ignore the type error here. See `LRScheduler.step()` for more details.
if not self._get_lr_called_within_step: # type: ignore[attr-defined]
- warnings.warn("To get the last learning rate computed by the scheduler, "
- "please use `get_last_lr()`.", UserWarning)
+ warnings.warn(
+ "To get the last learning rate computed by the scheduler, "
+ "please use `get_last_lr()`.",
+ UserWarning,
+ )
# Set in `LRScheduler._initial_step()`
step = self._step_count - 1 # type: ignore[attr-defined]
if self.anneal_epochs == 0:
step = max(1, step)
prev_t = max(0, min(1, (step - 1) / max(1, self.anneal_epochs)))
prev_alpha = self.anneal_func(prev_t)
- prev_lrs = [self._get_initial_lr(group['lr'], group['swa_lr'], prev_alpha)
- for group in self.optimizer.param_groups]
+ prev_lrs = [
+ self._get_initial_lr(group["lr"], group["swa_lr"], prev_alpha)
+ for group in self.optimizer.param_groups
+ ]
t = max(0, min(1, step / max(1, self.anneal_epochs)))
alpha = self.anneal_func(t)
- return [group['swa_lr'] * alpha + lr * (1 - alpha)
- for group, lr in zip(self.optimizer.param_groups, prev_lrs)]
+ return [
+ group["swa_lr"] * alpha + lr * (1 - alpha)
+ for group, lr in zip(self.optimizer.param_groups, prev_lrs)
+ ]
|
2.41.0
|
4efa311f1c692813befc45142d668f35a66392b
|
Mon, 15 Apr 2024 14:59:51 -0700
|
[PATCH 0189/1000] Refactor test_tensor_set_data to be parametrized (#124105)
|
Signed-off-by: Edward Z. Yang <ezyang@meta.com> Pull Request resolved: https://github.com/pytorch/pytorch/pull/124105 Approved by: https://github.com/albanD
|
diff --git a/test/dynamo/test_repros.py b/test/dynamo/test_repros.py
index b5ae3c9c83..0f8561f6fb 100644
--- a/test/dynamo/test_repros.py
+++ b/test/dynamo/test_repros.py
@@ -38,6 +38,8 @@ from torch.nn import functional as F
from torch.testing._internal.common_cuda import PLATFORM_SUPPORTS_FLASH_ATTENTION
from torch.testing._internal.common_utils import (
disable_translation_validation_if_dynamic_shapes,
+ instantiate_parametrized_tests,
+ parametrize,
TEST_WITH_ROCM,
)
from torch.testing._internal.two_tensor import TwoTensor
@@ -4122,7 +4124,15 @@ class ReproTests(torch._dynamo.test_case.TestCase):
self.assertEqual(opt_fn("10"), fn("10"))
self.assertEqual(cnt.frame_count, 4)
- def test_tensor_set_data(self):
+ @parametrize(
+ "backend",
+ ["eager", "aot_eager", "inductor"],
+ )
+ @parametrize(
+ "func_name",
+ ["func1", "func2", "func3"],
+ )
+ def test_tensor_set_data(self, backend, func_name):
# https://github.com/pytorch/pytorch/issues/113030
def func1(x, y):
x.data = y
@@ -4140,43 +4150,45 @@ class ReproTests(torch._dynamo.test_case.TestCase):
y.data = torch.zeros([0])
return torch.tensor(x is z)
- for backend in ["eager", "aot_eager", "inductor"]:
- for func in [func1, func2, func3]:
- if backend != "eager" and func is func1:
- # add_ not working w/ aot_autograd?
- continue
- torch._dynamo.reset()
- cnt = torch._dynamo.testing.CompileCounterWithBackend(backend)
-
- compiled_fn = torch.compile(func, backend=cnt, fullgraph=True)
- requires_grad = func is not func1
- for i in range(0, 5):
- # Inputs
- eager_a = torch.ones([6], requires_grad=requires_grad)
- compiled_a = torch.ones([6], requires_grad=requires_grad)
-
- eager_b = torch.ones([6], requires_grad=requires_grad)
- compiled_b = torch.ones([6], requires_grad=requires_grad)
-
- # Eager
- out_eager = func(eager_a, eager_b)
- # Compiled
- out_compiled = compiled_fn(compiled_a, compiled_b)
- self.assertEqual(eager_a, compiled_a)
- self.assertEqual(eager_b, compiled_b)
- self.assertTrue(torch.equal(out_eager, out_compiled))
-
- # func1 hits a leaf Variable that requires grad is being used in an in-place operation
- if requires_grad:
- bwd_inp_eager = torch.randn([6])
- bwd_inp_compiled = torch.clone(bwd_inp_eager)
- eager_a.backward(bwd_inp_eager)
- compiled_a.backward(bwd_inp_compiled)
- self.assertEqual(eager_a.grad, compiled_a.grad)
-
- # Prove guarding works - we run the compiled_fn 5 times
- # frame_count should stay at 1.
- self.assertEqual(cnt.frame_count, 1)
+ funcs = {"func1": func1, "func2": func2, "func3": func3}
+ func = funcs[func_name]
+
+ if backend != "eager" and func is func1:
+ # add_ not working w/ aot_autograd?
+ return
+
+ torch._dynamo.reset()
+ cnt = torch._dynamo.testing.CompileCounterWithBackend(backend)
+
+ compiled_fn = torch.compile(func, backend=cnt, fullgraph=True)
+ requires_grad = func is not func1
+ for i in range(0, 5):
+ # Inputs
+ eager_a = torch.ones([6], requires_grad=requires_grad)
+ compiled_a = torch.ones([6], requires_grad=requires_grad)
+
+ eager_b = torch.ones([6], requires_grad=requires_grad)
+ compiled_b = torch.ones([6], requires_grad=requires_grad)
+
+ # Eager
+ out_eager = func(eager_a, eager_b)
+ # Compiled
+ out_compiled = compiled_fn(compiled_a, compiled_b)
+ self.assertEqual(eager_a, compiled_a)
+ self.assertEqual(eager_b, compiled_b)
+ self.assertTrue(torch.equal(out_eager, out_compiled))
+
+ # func1 hits a leaf Variable that requires grad is being used in an in-place operation
+ if requires_grad:
+ bwd_inp_eager = torch.randn([6])
+ bwd_inp_compiled = torch.clone(bwd_inp_eager)
+ eager_a.backward(bwd_inp_eager)
+ compiled_a.backward(bwd_inp_compiled)
+ self.assertEqual(eager_a.grad, compiled_a.grad)
+
+ # Prove guarding works - we run the compiled_fn 5 times
+ # frame_count should stay at 1.
+ self.assertEqual(cnt.frame_count, 1)
@unittest.skipIf(
TEST_WITH_ROCM or not PLATFORM_SUPPORTS_FLASH_ATTENTION,
@@ -4756,6 +4768,9 @@ def forward(self, s0 : torch.SymInt, s1 : torch.SymInt, L_x_ : torch.Tensor):
self.assertEqual(opt(x, y), foo(x, y))
+instantiate_parametrized_tests(ReproTests)
+
+
if __name__ == "__main__":
from torch._dynamo.test_case import run_tests
|
2.41.0
|
f5829d0babaefc6e271897d6fffd40073d8b723
|
Mon, 15 Apr 2024 17:04:16 -0700
|
[PATCH 0191/1000] [inductor] let rand_strided support fp8 (#124120)
|
I'm working on https://fb.workplace.com/groups/1075192433118967/posts/1411161629522044/ (this is a meta internal link about a inefficient inner/persistent reduction kernel generated by inductor). I found the generated benchmark code for a kernel ( https://gist.github.com/shunting314/13a0105f72a1c54d9c220370c7fd3845 ) can not be run since rand_strided failed to generate tensors for fp8. Errors are like ``` RuntimeError: "normal_kernel_cpu" not implemented for 'Float8_e4m3fn' ``` for CPU or ``` RuntimeError: "normal_kernel_cuda" not implemented for 'Float8_e4m3fn' ``` for GPU This PR work around that problem. Pull Request resolved: https://github.com/pytorch/pytorch/pull/124120 Approved by: https://github.com/Chillee, https://github.com/jansel
|
diff --git a/test/inductor/test_torchinductor.py b/test/inductor/test_torchinductor.py
index c477e36b84..786428e690 100644
--- a/test/inductor/test_torchinductor.py
+++ b/test/inductor/test_torchinductor.py
@@ -9558,6 +9558,17 @@ class CommonTemplate:
t = rand_strided((8, 1500, 1), (1504, 1, 1), device=self.device)
self.assertFalse(complex_memory_overlap(t))
+ def test_generate_rand_fp8(self):
+ """
+ PyTorch can not generate fp8 tensors with a normal distribution because of
+ missing needed kernels.
+
+ We work around that in rand_strided by generating an fp16 tensor first and
+ then do casting.
+ """
+ t = rand_strided((2, 3), (3, 1), device=self.device, dtype=torch.float8_e4m3fn)
+ self.assertTrue(t.dtype is torch.float8_e4m3fn)
+
@dataclasses.dataclass
class TestFailure:
diff --git a/torch/_dynamo/testing.py b/torch/_dynamo/testing.py
index c46304369b..e621b9abaa 100644
--- a/torch/_dynamo/testing.py
+++ b/torch/_dynamo/testing.py
@@ -284,7 +284,16 @@ def rand_strided(
+ extra_size
)
if dtype.is_floating_point:
- buffer = torch.randn(needed_size, dtype=dtype, device=device)
+ if dtype.itemsize == 1:
+ """
+ normal distribution kernel is not implemented for fp8..
+ Workaround that by creating a fp16 tensor and then cast.
+ """
+ buffer = torch.randn(needed_size, dtype=torch.float16, device=device).to(
+ dtype=dtype
+ )
+ else:
+ buffer = torch.randn(needed_size, dtype=dtype, device=device)
else:
buffer = torch.zeros(size=[needed_size], dtype=dtype, device=device)
return torch.as_strided(buffer, size, stride)
|
2.41.0
|
3ef3bb12831f7e68f65276d5ba240db508db6e9
|
Tue, 16 Apr 2024 04:35:25 +0000
|
[PATCH 0192/1000] Fix AVX512 int4pack_mm_kernel crash if weighs are unaligned (#124128)
|
By replacing `_mm256_load_si256` with `_mm256_loadu_si256`, as there are no guarantees that tensor should be aligned Fixes crash reported in https://github.com/pytorch/pytorch/issues/124034 though I'm unsure about perf implications if tensor are properly aligned Pull Request resolved: https://github.com/pytorch/pytorch/pull/124128 Approved by: https://github.com/mikekgfb
|
diff --git a/aten/src/ATen/native/cpu/int4mm_kernel.cpp b/aten/src/ATen/native/cpu/int4mm_kernel.cpp
index 436cd16c99..1e7a7c0bcb 100644
--- a/aten/src/ATen/native/cpu/int4mm_kernel.cpp
+++ b/aten/src/ATen/native/cpu/int4mm_kernel.cpp
@@ -139,7 +139,7 @@ inline void tinygemm_kernel(
// when BLOCK_N = 64, handle each row at a time
// to reduce de-quantize overhead.
if constexpr (col == 0) {
- __m256i b4 = _mm256_load_si256((__m256i*)(B + k * ldb));
+ __m256i b4 = _mm256_loadu_si256((__m256i*)(B + k * ldb));
if (k + PREFETCH_SIZE_K < K) {
_mm_prefetch(B + (k + PREFETCH_SIZE_K) * ldb, _MM_HINT_T0);
}
|
2.41.0
|
bef127c2ea49280e7fda4f9fa7cad6fa4078e7d
|
Tue, 16 Apr 2024 04:39:20 +0000
|
[PATCH 0193/1000] [Environment Variable][1/N] Use thread-safe env variable API in c10 (#119449)
|
This PR is the beginning of attempts to wrap thread-unsafe getenv and set_env functions inside a RW mutex. Pull Request resolved: https://github.com/pytorch/pytorch/pull/119449 Approved by: https://github.com/albanD
|
diff --git a/c10/core/impl/alloc_cpu.cpp b/c10/core/impl/alloc_cpu.cpp
index 9b7ae22f9f..def4c3a3a9 100644
--- a/c10/core/impl/alloc_cpu.cpp
+++ b/c10/core/impl/alloc_cpu.cpp
@@ -3,6 +3,7 @@
#include <c10/core/alignment.h>
#include <c10/util/Flags.h>
#include <c10/util/Logging.h>
+#include <c10/util/env.h>
#include <c10/util/irange.h>
#include <c10/util/numa.h>
@@ -53,8 +54,8 @@ void memset_junk(void* data, size_t num) {
#if defined(__linux__) && !defined(__ANDROID__)
static inline bool is_thp_alloc_enabled() {
static bool value = [&] {
- const char* ptr = std::getenv("THP_MEM_ALLOC_ENABLE");
- return ptr != nullptr ? std::atoi(ptr) : 0;
+ auto env = c10::utils::check_env("THP_MEM_ALLOC_ENABLE");
+ return env.has_value() ? env.value() : 0;
}();
return value;
}
diff --git a/c10/cuda/CUDAAllocatorConfig.cpp b/c10/cuda/CUDAAllocatorConfig.cpp
index 1f81ed47b6..ca38dfd6a4 100644
--- a/c10/cuda/CUDAAllocatorConfig.cpp
+++ b/c10/cuda/CUDAAllocatorConfig.cpp
@@ -234,7 +234,7 @@ size_t CUDAAllocatorConfig::parseAllocatorConfig(
return i;
}
-void CUDAAllocatorConfig::parseArgs(const char* env) {
+void CUDAAllocatorConfig::parseArgs(const std::optional<std::string>& env) {
// If empty, set the default values
m_max_split_size = std::numeric_limits<size_t>::max();
m_roundup_power2_divisions.assign(kRoundUpPowerOfTwoIntervals, 0);
@@ -242,16 +242,16 @@ void CUDAAllocatorConfig::parseArgs(const char* env) {
bool used_cudaMallocAsync = false;
bool used_native_specific_option = false;
- if (env == nullptr) {
+ if (!env.has_value()) {
return;
}
{
std::lock_guard<std::mutex> lock(m_last_allocator_settings_mutex);
- m_last_allocator_settings = env;
+ m_last_allocator_settings = env.value();
}
std::vector<std::string> config;
- lexArgs(env, config);
+ lexArgs(env.value().c_str(), config);
for (size_t i = 0; i < config.size(); i++) {
std::string_view config_item_view(config[i]);
diff --git a/c10/cuda/CUDAAllocatorConfig.h b/c10/cuda/CUDAAllocatorConfig.h
index 3106fc1b46..db5c9e1c8f 100644
--- a/c10/cuda/CUDAAllocatorConfig.h
+++ b/c10/cuda/CUDAAllocatorConfig.h
@@ -2,6 +2,7 @@
#include <c10/cuda/CUDAMacros.h>
#include <c10/util/Exception.h>
+#include <c10/util/env.h>
#include <atomic>
#include <cstddef>
@@ -72,14 +73,13 @@ class C10_CUDA_API CUDAAllocatorConfig {
static CUDAAllocatorConfig& instance() {
static CUDAAllocatorConfig* s_instance = ([]() {
auto inst = new CUDAAllocatorConfig();
- const char* env = getenv("PYTORCH_CUDA_ALLOC_CONF");
- inst->parseArgs(env);
+ inst->parseArgs(c10::utils::get_env("PYTORCH_CUDA_ALLOC_CONF"));
return inst;
})();
return *s_instance;
}
- void parseArgs(const char* env);
+ void parseArgs(const std::optional<std::string>& env);
private:
CUDAAllocatorConfig();
diff --git a/c10/cuda/CUDACachingAllocator.cpp b/c10/cuda/CUDACachingAllocator.cpp
index c472e82ce2..afac5272b6 100644
--- a/c10/cuda/CUDACachingAllocator.cpp
+++ b/c10/cuda/CUDACachingAllocator.cpp
@@ -8,6 +8,7 @@
#include <c10/util/CallOnce.h>
#include <c10/util/ScopeExit.h>
#include <c10/util/UniqueVoidPtr.h>
+#include <c10/util/env.h>
#include <c10/util/flat_hash_map.h>
#include <c10/util/hash.h>
#include <c10/util/irange.h>
@@ -2831,7 +2832,7 @@ class DeviceCachingAllocator {
// errors, since the caching allocator foils cuda-memcheck.
bool forceUncachedAllocator() {
static bool force_uncached =
- getenv("PYTORCH_NO_CUDA_MEMORY_CACHING") != nullptr;
+ c10::utils::has_env("PYTORCH_NO_CUDA_MEMORY_CACHING");
return force_uncached;
}
@@ -3363,9 +3364,9 @@ struct BackendStaticInitializer {
// version checks, to CUDAAllocatorConfig's runtime doublecheck. If this
// works, maybe we should move all of CUDAAllocatorConfig here?
CUDAAllocator* parseEnvForBackend() {
- const char* val = getenv("PYTORCH_CUDA_ALLOC_CONF");
- if (val != nullptr) {
- const std::string config(val);
+ const auto val = c10::utils::get_env("PYTORCH_CUDA_ALLOC_CONF");
+ if (val.has_value()) {
+ const std::string& config = val.value();
std::regex exp("[\\s,]+");
std::sregex_token_iterator it(config.begin(), config.end(), exp, -1);
diff --git a/c10/cuda/CUDADeviceAssertionHost.cpp b/c10/cuda/CUDADeviceAssertionHost.cpp
index 1d52af7812..ec41e6230f 100644
--- a/c10/cuda/CUDADeviceAssertionHost.cpp
+++ b/c10/cuda/CUDADeviceAssertionHost.cpp
@@ -3,6 +3,7 @@
#include <c10/cuda/CUDAFunctions.h>
#include <c10/util/Backtrace.h>
#include <c10/util/Exception.h>
+#include <c10/util/env.h>
#include <c10/util/irange.h>
#include <cuda_runtime.h>
@@ -80,8 +81,8 @@ bool dsa_check_if_all_devices_support_managed_memory() {
}
bool env_flag_set(const char* env_var_name) {
- const char* const env_string = std::getenv(env_var_name);
- return (env_string == nullptr) ? false : std::strcmp(env_string, "0");
+ const auto env_flag = c10::utils::check_env(env_var_name);
+ return env_flag.has_value() && env_flag.value();
}
/// Deleter for UVM/managed memory pointers
diff --git a/c10/cuda/CUDAMiscFunctions.cpp b/c10/cuda/CUDAMiscFunctions.cpp
index 11ea775366..9ef724813e 100644
--- a/c10/cuda/CUDAMiscFunctions.cpp
+++ b/c10/cuda/CUDAMiscFunctions.cpp
@@ -1,12 +1,14 @@
#include <c10/cuda/CUDAMiscFunctions.h>
-#include <cstdlib>
+#include <c10/util/env.h>
namespace c10::cuda {
+// NOLINTNEXTLINE(bugprone-exception-escape,-warnings-as-errors)
const char* get_cuda_check_suffix() noexcept {
- static char* device_blocking_flag = getenv("CUDA_LAUNCH_BLOCKING");
+ static auto device_blocking_flag =
+ c10::utils::check_env("CUDA_LAUNCH_BLOCKING");
static bool blocking_enabled =
- (device_blocking_flag && atoi(device_blocking_flag));
+ (device_blocking_flag.has_value() && device_blocking_flag.value());
if (blocking_enabled) {
return "";
} else {
diff --git a/c10/test/util/DeadlockDetection_test.cpp b/c10/test/util/DeadlockDetection_test.cpp
index 35c4953f6d..05ae154e22 100644
--- a/c10/test/util/DeadlockDetection_test.cpp
+++ b/c10/test/util/DeadlockDetection_test.cpp
@@ -1,9 +1,8 @@
#include <c10/util/DeadlockDetection.h>
+#include <c10/util/env.h>
#include <gtest/gtest.h>
-#include <cstdlib>
-
using namespace ::testing;
using namespace c10::impl;
@@ -23,7 +22,7 @@ TEST(DeadlockDetection, basic) {
#ifndef _WIN32
TEST(DeadlockDetection, disable) {
- setenv("TORCH_DISABLE_DEADLOCK_DETECTION", "1", 1);
+ c10::utils::set_env("TORCH_DISABLE_DEADLOCK_DETECTION", "1");
DummyPythonGILHooks hooks;
SetPythonGILHooks(&hooks);
SetPythonGILHooks(&hooks);
diff --git a/c10/util/DeadlockDetection.cpp b/c10/util/DeadlockDetection.cpp
index 320fa7873c..4b00d24534 100644
--- a/c10/util/DeadlockDetection.cpp
+++ b/c10/util/DeadlockDetection.cpp
@@ -1,6 +1,5 @@
#include <c10/util/DeadlockDetection.h>
-
-#include <cstdlib>
+#include <c10/util/env.h>
namespace c10::impl {
@@ -8,7 +7,7 @@ namespace {
PythonGILHooks* python_gil_hooks = nullptr;
bool disable_detection() {
- return std::getenv("TORCH_DISABLE_DEADLOCK_DETECTION") != nullptr;
+ return c10::utils::has_env("TORCH_DISABLE_DEADLOCK_DETECTION");
}
} // namespace
diff --git a/c10/util/Logging.cpp b/c10/util/Logging.cpp
index e9c9e9c2f3..17459f69fa 100644
--- a/c10/util/Logging.cpp
+++ b/c10/util/Logging.cpp
@@ -1,6 +1,7 @@
#include <c10/util/Backtrace.h>
#include <c10/util/Flags.h>
#include <c10/util/Logging.h>
+#include <c10/util/env.h>
#ifdef FBCODE_CAFFE2
#include <folly/synchronization/SanitizeThread.h>
#endif
@@ -10,7 +11,6 @@
#endif
#include <algorithm>
-#include <cstdlib>
#include <iostream>
// Common code that we use regardless of whether we use glog or not.
@@ -94,8 +94,8 @@ using DDPUsageLoggerType = std::function<void(const DDPLoggingData&)>;
namespace {
bool IsAPIUsageDebugMode() {
- const char* val = getenv("PYTORCH_API_USAGE_STDERR");
- return val && *val; // any non-empty value
+ auto val = c10::utils::get_env("PYTORCH_API_USAGE_STDERR");
+ return val.has_value() && !val.value().empty(); // any non-empty value
}
void APIUsageDebug(const string& event) {
@@ -438,10 +438,10 @@ namespace c10::detail {
namespace {
void setLogLevelFlagFromEnv() {
- const char* level_str = std::getenv("TORCH_CPP_LOG_LEVEL");
+ auto level_env = c10::utils::get_env("TORCH_CPP_LOG_LEVEL");
// Not set, fallback to the default level (i.e. WARNING).
- std::string level{level_str != nullptr ? level_str : ""};
+ std::string level{level_env.has_value() ? level_env.value() : ""};
if (level.empty()) {
return;
}
diff --git a/c10/util/env.cpp b/c10/util/env.cpp
new file mode 100644
index 0000000000..9d9193a552
--- /dev/null
+++ b/c10/util/env.cpp
@@ -0,0 +1,104 @@
+#include <c10/util/Exception.h>
+#include <c10/util/env.h>
+#include <fmt/format.h>
+#include <cstdlib>
+#include <shared_mutex>
+
+namespace c10::utils {
+
+static std::shared_mutex env_mutex;
+
+// Set an environment variable.
+void set_env(const char* name, const char* value, bool overwrite) {
+ std::lock_guard lk(env_mutex);
+#ifdef _MSC_VER
+#pragma warning(push)
+#pragma warning(disable : 4996)
+#endif
+#ifdef _MSC_VER
+ if (!overwrite) {
+ // NOLINTNEXTLINE(concurrency-mt-unsafe)
+ if (std::getenv(name) != nullptr) {
+ return;
+ }
+ }
+ auto full_env_variable = fmt::format("{}={}", name, value);
+ // NOLINTNEXTLINE(concurrency-mt-unsafe)
+ auto err = putenv(full_env_variable.c_str());
+ if (err != 0) {
+ TORCH_INTERNAL_ASSERT(
+ "putenv failed for environment \"", name, "\", the error is: ", err);
+ }
+#else
+ // NOLINTNEXTLINE(concurrency-mt-unsafe)
+ auto err = setenv(name, value, static_cast<int>(overwrite));
+ if (err != 0) {
+ TORCH_INTERNAL_ASSERT(
+ "setenv failed for environment \"", name, "\", the error is: ", err);
+ }
+#endif
+#ifdef _MSC_VER
+#pragma warning(pop)
+#endif
+ return;
+}
+
+// Checks an environment variable is set.
+bool has_env(const char* name) noexcept {
+ std::shared_lock lk(env_mutex);
+#ifdef _MSC_VER
+#pragma warning(push)
+#pragma warning(disable : 4996)
+#endif
+ // NOLINTNEXTLINE(concurrency-mt-unsafe)
+ auto envar = std::getenv(name);
+#ifdef _MSC_VER
+#pragma warning(pop)
+#endif
+ return envar != nullptr;
+}
+
+// Reads an environment variable and returns the content if it is set
+std::optional<std::string> get_env(const char* name) noexcept {
+ std::shared_lock lk(env_mutex);
+#ifdef _MSC_VER
+#pragma warning(push)
+#pragma warning(disable : 4996)
+#endif
+ // NOLINTNEXTLINE(concurrency-mt-unsafe)
+ auto envar = std::getenv(name);
+#ifdef _MSC_VER
+#pragma warning(pop)
+#endif
+ if (envar != nullptr) {
+ return std::string(envar);
+ }
+ return std::nullopt;
+}
+
+// Reads an environment variable and returns
+// - optional<true>, if set equal to "1"
+// - optional<false>, if set equal to "0"
+// - nullopt, otherwise
+//
+// NB:
+// Issues a warning if the value of the environment variable is not 0 or 1.
+std::optional<bool> check_env(const char* name) {
+ auto env_opt = get_env(name);
+ if (env_opt.has_value()) {
+ if (*env_opt == "0") {
+ return false;
+ }
+ if (*env_opt == "1") {
+ return true;
+ }
+ TORCH_WARN(
+ "Ignoring invalid value for boolean flag ",
+ name,
+ ": ",
+ *env_opt,
+ "valid values are 0 or 1.");
+ }
+ return std::nullopt;
+}
+} // namespace c10::utils
diff --git a/c10/util/env.h b/c10/util/env.h
index 3db116c7db..04b7585861 100644
--- a/c10/util/env.h
+++ b/c10/util/env.h
@@ -1,11 +1,20 @@
#pragma once
-#include <c10/util/Exception.h>
-#include <cstdlib>
-#include <cstring>
+#include <c10/macros/Export.h>
#include <optional>
+#include <string>
namespace c10::utils {
+
+// Set an environment variable.
+C10_API void set_env(
+ const char* name,
+ const char* value,
+ bool overwrite = true);
+
+// Checks an environment variable is set.
+C10_API bool has_env(const char* name) noexcept;
+
// Reads an environment variable and returns
// - optional<true>, if set equal to "1"
// - optional<false>, if set equal to "0"
@@ -13,29 +22,10 @@ namespace c10::utils {
//
// NB:
// Issues a warning if the value of the environment variable is not 0 or 1.
-inline std::optional<bool> check_env(const char* name) {
-#ifdef _MSC_VER
-#pragma warning(push)
-#pragma warning(disable : 4996)
-#endif
- auto envar = std::getenv(name);
-#ifdef _MSC_VER
-#pragma warning(pop)
-#endif
- if (envar) {
- if (strcmp(envar, "0") == 0) {
- return false;
- }
- if (strcmp(envar, "1") == 0) {
- return true;
- }
- TORCH_WARN(
- "Ignoring invalid value for boolean flag ",
- name,
- ": ",
- envar,
- "valid values are 0 or 1.");
- }
- return std::nullopt;
-}
+C10_API std::optional<bool> check_env(const char* name);
+
+// Reads the value of an environment variable if it is set.
+// However, check_env should be used if the value is assumed to be a flag.
+C10_API std::optional<std::string> get_env(const char* name) noexcept;
+
} // namespace c10::utils
diff --git a/c10/util/tempfile.cpp b/c10/util/tempfile.cpp
index 28c3c7f14f..f106885a88 100644
--- a/c10/util/tempfile.cpp
+++ b/c10/util/tempfile.cpp
@@ -1,4 +1,5 @@
#include <c10/util/Exception.h>
+#include <c10/util/env.h>
#include <c10/util/tempfile.h>
#include <fmt/format.h>
@@ -22,10 +23,11 @@ static std::string make_filename(std::string_view name_prefix) {
// We see if any of these environment variables is set and use their value, or
// else default the temporary directory to `/tmp`.
- const char* tmp_directory = "/tmp";
+ std::string tmp_directory = "/tmp";
for (const char* variable : {"TMPDIR", "TMP", "TEMP", "TEMPDIR"}) {
- if (const char* path = getenv(variable)) {
- tmp_directory = path;
+ auto path_opt = c10::utils::get_env(variable);
+ if (path_opt.has_value()) {
+ tmp_directory = path_opt.value();
break;
}
}
|
2.41.0
|
530c5a85d1b4621d61e49336894cf667d698c6a
|
Tue, 16 Apr 2024 05:38:20 +0000
|
[PATCH 0194/1000] [DOC] Fix example and typo (#123959)
|
Fixes #123554 and fixes #123053 Pull Request resolved: https://github.com/pytorch/pytorch/pull/123959 Approved by: https://github.com/mikaylagawarecki
|
diff --git a/torch/nn/modules/activation.py b/torch/nn/modules/activation.py
index 2302ec5ea5..bf15c3342d 100644
--- a/torch/nn/modules/activation.py
+++ b/torch/nn/modules/activation.py
@@ -1333,7 +1333,7 @@ class PReLU(Module):
.. math::
\text{PReLU}(x) =
\begin{cases}
- x, & \text{ if } x \geq 0 \\
+ x, & \text{ if } x \ge 0 \\
ax, & \text{ otherwise }
\end{cases}
diff --git a/torch/nn/modules/channelshuffle.py b/torch/nn/modules/channelshuffle.py
index d098fdc68c..ff4c8c28d1 100644
--- a/torch/nn/modules/channelshuffle.py
+++ b/torch/nn/modules/channelshuffle.py
@@ -9,38 +9,35 @@ class ChannelShuffle(Module):
r"""Divides and rearranges the channels in a tensor.
This operation divides the channels in a tensor of shape :math:`(*, C , H, W)`
- into g groups and rearranges them as :math:`(*, \frac{C}{g}, g, H, W)`,
- while keeping the original tensor shape.
+ into g groups as :math:`(*, \frac{C}{g}, g, H, W)` and shuffles them,
+ while retaining the original tensor shape in the final output.
Args:
groups (int): number of groups to divide channels in.
Examples::
- >>> # xdoctest: +IGNORE_WANT("FIXME: incorrect want")
>>> channel_shuffle = nn.ChannelShuffle(2)
- >>> input = torch.randn(1, 4, 2, 2)
- >>> print(input)
- [[[[1, 2],
- [3, 4]],
- [[5, 6],
- [7, 8]],
- [[9, 10],
- [11, 12]],
- [[13, 14],
- [15, 16]],
- ]]
+ >>> input = torch.arange(1, 17, dtype=torch.float32).view(1, 4, 2, 2)
+ >>> input
+ tensor([[[[ 1., 2.],
+ [ 3., 4.]],
+ [[ 5., 6.],
+ [ 7., 8.]],
+ [[ 9., 10.],
+ [11., 12.]],
+ [[13., 14.],
+ [15., 16.]]]])
>>> output = channel_shuffle(input)
- >>> print(output)
- [[[[1, 2],
- [3, 4]],
- [[9, 10],
- [11, 12]],
- [[5, 6],
- [7, 8]],
- [[13, 14],
- [15, 16]],
- ]]
+ >>> output
+ tensor([[[[ 1., 2.],
+ [ 3., 4.]],
+ [[ 9., 10.],
+ [11., 12.]],
+ [[ 5., 6.],
+ [ 7., 8.]],
+ [[13., 14.],
+ [15., 16.]]]])
"""
__constants__ = ['groups']
|
2.41.0
|
eab740db3e158eb7c8529684d588c9fb6b1aacb
|
Tue, 16 Apr 2024 05:51:53 +0000
|
[PATCH 0195/1000] [Docs][Distributed] Add migration notes for `--local-rank` option style change for `torchrun` in PyTorch 2.0 (#109480)
|
Fixes https://github.com/pytorch/pytorch/pull/94505#issuecomment-1722777767 Pull Request resolved: https://github.com/pytorch/pytorch/pull/109480 Approved by: https://github.com/ezyang
|
diff --git a/torch/distributed/launch.py b/torch/distributed/launch.py
index 5dbeeb998c..c95804b8e8 100644
--- a/torch/distributed/launch.py
+++ b/torch/distributed/launch.py
@@ -83,7 +83,7 @@ Parsing the local_rank argument
>>> # xdoctest: +SKIP
>>> import argparse
>>> parser = argparse.ArgumentParser()
- >>> parser.add_argument("--local-rank", type=int)
+ >>> parser.add_argument("--local-rank", "--local_rank", type=int)
>>> args = parser.parse_args()
Set your device to local rank using either
@@ -100,6 +100,19 @@ or
>>> # your code to run
>>> ...
+.. versionchanged:: 2.0.0
+
+ The launcher will passes the ``--local-rank=<rank>`` argument to your script.
+ From PyTorch 2.0.0 onwards, the dashed ``--local-rank`` is preferred over the
+ previously used underscored ``--local_rank``.
+
+ For backward compatibility, it may be necessary for users to handle both
+ cases in their argument parsing code. This means including both ``"--local-rank"``
+ and ``"--local_rank"`` in the argument parser. If only ``"--local_rank"`` is
+ provided, the launcher will trigger an error: "error: unrecognized arguments:
+ --local-rank=<rank>". For training code that only supports PyTorch 2.0.0+,
+ including ``"--local-rank"`` should be sufficient.
+
3. In your training program, you are supposed to call the following function
at the beginning to start the distributed backend. It is strongly recommended
that ``init_method=env://``. Other init methods (e.g. ``tcp://``) may work,
diff --git a/torch/distributed/run.py b/torch/distributed/run.py
index 87b9bd5d92..f2e7f8d942 100644
--- a/torch/distributed/run.py
+++ b/torch/distributed/run.py
@@ -68,6 +68,27 @@ to ``torchrun`` follow these steps:
| | |
+-------------------------------------------------------+----------------------------------------------------+
+.. versionchanged:: 2.0.0
+
+ The launcher will pass the ``--local-rank=<rank>`` argument to your script.
+ From PyTorch 2.0.0 onwards, the dashed ``--local-rank`` is preferred over the
+ previously used underscored ``--local_rank``.
+
+ For backward compatibility, it may be necessary for users to handle both
+ cases in their argument parsing code. This means including both ``"--local-rank"``
+ and ``"--local_rank"`` in the argument parser. If only ``"--local_rank"`` is
+ provided, the launcher will trigger an error: "error: unrecognized arguments:
+ --local-rank=<rank>". For training code that only supports PyTorch 2.0.0+,
+ including ``"--local-rank"`` should be sufficient.
+
+ ::
+
+ >>> # xdoctest: +SKIP
+ >>> import argparse
+ >>> parser = argparse.ArgumentParser()
+ >>> parser.add_argument("--local-rank", "--local_rank", type=int)
+ >>> args = parser.parse_args()
+
The aformentioned changes suffice to migrate from ``torch.distributed.launch`` to ``torchrun``.
To take advantage of new features such as elasticity, fault-tolerance, and error reporting of ``torchrun``
please refer to:
|
2.41.0
|
e48f7b0443451b291cc527e1473699deec3bc54
|
Sat, 13 Apr 2024 13:22:48 +0000
|
[PATCH 0196/1000] [pytree] add `tree_iter` function (#123913)
|
- Add a new `tree_iter` function. - Bump `optree` version to `0.11.0` for C++ version of `tree_iter`. This PR is split from #120300. - #120300 Pull Request resolved: https://github.com/pytorch/pytorch/pull/123913 Approved by: https://github.com/zou3519
|
diff --git a/.ci/docker/requirements-ci.txt b/.ci/docker/requirements-ci.txt
index 0b5a7d4ff1..a636d70e32 100644
--- a/.ci/docker/requirements-ci.txt
+++ b/.ci/docker/requirements-ci.txt
@@ -134,9 +134,9 @@ opt-einsum==3.3
#Pinned versions: 3.3
#test that import: test_linalg.py
-optree==0.9.1
+optree==0.11.0
#Description: A library for tree manipulation
-#Pinned versions: 0.9.1
+#Pinned versions: 0.11.0
#test that import: test_vmap.py, test_aotdispatch.py, test_dynamic_shapes.py,
#test_pytree.py, test_ops.py, test_control_flow.py, test_modules.py,
#common_utils.py, test_eager_transforms.py, test_python_dispatch.py,
diff --git a/.github/requirements/pip-requirements-iOS.txt b/.github/requirements/pip-requirements-iOS.txt
index 30e67abc5c..01290e4c71 100644
--- a/.github/requirements/pip-requirements-iOS.txt
+++ b/.github/requirements/pip-requirements-iOS.txt
@@ -1,4 +1,4 @@
# iOS simulator requirements
coremltools==5.0b5
protobuf==3.20.2
-optree==0.9.1
+optree==0.11.0
diff --git a/.github/requirements/pip-requirements-macOS.txt b/.github/requirements/pip-requirements-macOS.txt
index 35d24ae34f..f0e4890328 100644
--- a/.github/requirements/pip-requirements-macOS.txt
+++ b/.github/requirements/pip-requirements-macOS.txt
@@ -26,7 +26,7 @@ pytest-cpp==2.3.0
rockset==1.0.3
z3-solver==4.12.2.0
tensorboard==2.13.0
-optree==0.9.1
+optree==0.11.0
# NB: test_hparams_* from test_tensorboard is failing with protobuf 5.26.0 in
# which the stringify metadata is wrong when escaping double quote
protobuf==3.20.2
diff --git a/.lintrunner.toml b/.lintrunner.toml
index f30812313c..9e83a8b96e 100644
--- a/.lintrunner.toml
+++ b/.lintrunner.toml
@@ -148,7 +148,7 @@ init_command = [
'junitparser==2.1.1',
'rich==10.9.0',
'pyyaml==6.0.1',
- 'optree==0.10.0',
+ 'optree==0.11.0',
]
[[linter]]
diff --git a/requirements.txt b/requirements.txt
index 51fd003805..a32fe66cb4 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -17,4 +17,4 @@ fsspec
# setuptools was removed from default python install
setuptools ; python_version >= "3.12"
packaging
-optree>=0.9.1
+optree>=0.11.0
diff --git a/setup.py b/setup.py
index ce8f16df77..d774446780 100644
--- a/setup.py
+++ b/setup.py
@@ -1169,7 +1169,7 @@ def main():
install_requires += extra_install_requires
extras_require = {
- "optree": ["optree>=0.9.1"],
+ "optree": ["optree>=0.11.0"],
"opt-einsum": ["opt-einsum>=3.3"],
}
diff --git a/torch/utils/_cxx_pytree.py b/torch/utils/_cxx_pytree.py
index 93605d3b0b..aba15f1482 100644
--- a/torch/utils/_cxx_pytree.py
+++ b/torch/utils/_cxx_pytree.py
@@ -56,6 +56,7 @@ __all__ = [
"tree_flatten",
"tree_flatten_with_path",
"tree_unflatten",
+ "tree_iter",
"tree_leaves",
"tree_leaves_with_path",
"tree_structure",
@@ -321,6 +322,41 @@ def tree_unflatten(leaves: Iterable[Any], treespec: TreeSpec) -> PyTree:
return optree.tree_unflatten(treespec, leaves) # type: ignore[arg-type]
+def tree_iter(
+ tree: PyTree,
+ is_leaf: Optional[Callable[[PyTree], bool]] = None,
+) -> Iterable[Any]:
+ """Get an iterator over the leaves of a pytree.
+
+ See also :func:`tree_flatten`.
+
+ >>> tree = {'b': (2, [3, 4]), 'a': 1, 'c': None, 'd': 5}
+ >>> list(tree_iter(tree))
+ [1, 2, 3, 4, None, 5]
+ >>> list(tree_iter(1))
+ [1]
+ >>> list(tree_iter(None))
+ [None]
+
+ Args:
+ tree (pytree): A pytree to flatten.
+ is_leaf (callable, optional): An extra leaf predicate function that will be called at each
+ flattening step. The function should have a single argument with signature
+ ``is_leaf(node) -> bool``. If it returns :data:`True`, the whole subtree being treated
+ as a leaf. Otherwise, the default pytree registry will be used to determine a node is a
+ leaf or not. If the function is not specified, the default pytree registry will be used.
+
+ Returns:
+ An iterator over the leaf values.
+ """
+ return optree.tree_iter(
+ tree,
+ is_leaf=is_leaf,
+ none_is_leaf=True,
+ namespace="torch",
+ )
+
+
def tree_leaves(
tree: PyTree,
is_leaf: Optional[Callable[[PyTree], bool]] = None,
@@ -670,7 +706,7 @@ def tree_all(
tree: PyTree,
is_leaf: Optional[Callable[[PyTree], bool]] = None,
) -> bool:
- flat_args = tree_leaves(tree, is_leaf=is_leaf)
+ flat_args = tree_iter(tree, is_leaf=is_leaf)
return all(map(pred, flat_args))
@@ -679,7 +715,7 @@ def tree_any(
tree: PyTree,
is_leaf: Optional[Callable[[PyTree], bool]] = None,
) -> bool:
- flat_args = tree_leaves(tree, is_leaf=is_leaf)
+ flat_args = tree_iter(tree, is_leaf=is_leaf)
return any(map(pred, flat_args))
@@ -719,7 +755,7 @@ def tree_all_only(
tree: PyTree,
is_leaf: Optional[Callable[[PyTree], bool]] = None,
) -> bool:
- flat_args = tree_leaves(tree, is_leaf=is_leaf)
+ flat_args = tree_iter(tree, is_leaf=is_leaf)
return all(pred(x) for x in flat_args if isinstance(x, __type_or_types))
@@ -759,7 +795,7 @@ def tree_any_only(
tree: PyTree,
is_leaf: Optional[Callable[[PyTree], bool]] = None,
) -> bool:
- flat_args = tree_leaves(tree, is_leaf=is_leaf)
+ flat_args = tree_iter(tree, is_leaf=is_leaf)
return any(pred(x) for x in flat_args if isinstance(x, __type_or_types))
diff --git a/torch/utils/_pytree.py b/torch/utils/_pytree.py
index 77f93819b5..52f0d65ded 100644
--- a/torch/utils/_pytree.py
+++ b/torch/utils/_pytree.py
@@ -66,6 +66,7 @@ __all__ = [
"tree_flatten",
"tree_flatten_with_path",
"tree_unflatten",
+ "tree_iter",
"tree_leaves",
"tree_leaves_with_path",
"tree_structure",
@@ -865,22 +866,21 @@ def tree_unflatten(leaves: Iterable[Any], treespec: TreeSpec) -> PyTree:
return treespec.unflatten(leaves)
-def _tree_leaves_helper(
+def tree_iter(
tree: PyTree,
- leaves: List[Any],
is_leaf: Optional[Callable[[PyTree], bool]] = None,
-) -> None:
+) -> Iterable[Any]:
+ """Get an iterator over the leaves of a pytree."""
if _is_leaf(tree, is_leaf=is_leaf):
- leaves.append(tree)
- return
-
- node_type = _get_node_type(tree)
- flatten_fn = SUPPORTED_NODES[node_type].flatten_fn
- child_pytrees, _ = flatten_fn(tree)
+ yield tree
+ else:
+ node_type = _get_node_type(tree)
+ flatten_fn = SUPPORTED_NODES[node_type].flatten_fn
+ child_pytrees, _ = flatten_fn(tree)
- # Recursively flatten the children
- for child in child_pytrees:
- _tree_leaves_helper(child, leaves, is_leaf=is_leaf)
+ # Recursively flatten the children
+ for child in child_pytrees:
+ yield from tree_iter(child, is_leaf=is_leaf)
def tree_leaves(
@@ -888,9 +888,7 @@ def tree_leaves(
is_leaf: Optional[Callable[[PyTree], bool]] = None,
) -> List[Any]:
"""Get a list of leaves of a pytree."""
- leaves: List[Any] = []
- _tree_leaves_helper(tree, leaves, is_leaf=is_leaf)
- return leaves
+ return list(tree_iter(tree, is_leaf=is_leaf))
def tree_structure(
@@ -1171,7 +1169,7 @@ def tree_all(
tree: PyTree,
is_leaf: Optional[Callable[[PyTree], bool]] = None,
) -> bool:
- flat_args = tree_leaves(tree, is_leaf=is_leaf)
+ flat_args = tree_iter(tree, is_leaf=is_leaf)
return all(map(pred, flat_args))
@@ -1180,7 +1178,7 @@ def tree_any(
tree: PyTree,
is_leaf: Optional[Callable[[PyTree], bool]] = None,
) -> bool:
- flat_args = tree_leaves(tree, is_leaf=is_leaf)
+ flat_args = tree_iter(tree, is_leaf=is_leaf)
return any(map(pred, flat_args))
@@ -1220,7 +1218,7 @@ def tree_all_only(
tree: PyTree,
is_leaf: Optional[Callable[[PyTree], bool]] = None,
) -> bool:
- flat_args = tree_leaves(tree, is_leaf=is_leaf)
+ flat_args = tree_iter(tree, is_leaf=is_leaf)
return all(pred(x) for x in flat_args if isinstance(x, __type_or_types))
@@ -1260,7 +1258,7 @@ def tree_any_only(
tree: PyTree,
is_leaf: Optional[Callable[[PyTree], bool]] = None,
) -> bool:
- flat_args = tree_leaves(tree, is_leaf=is_leaf)
+ flat_args = tree_iter(tree, is_leaf=is_leaf)
return any(pred(x) for x in flat_args if isinstance(x, __type_or_types))
@@ -1468,9 +1466,9 @@ def arg_tree_leaves(*args: PyTree, **kwargs: PyTree) -> List[Any]:
"""
leaves: List[Any] = []
for a in args:
- _tree_leaves_helper(a, leaves)
+ leaves.extend(tree_iter(a))
for a in kwargs.values():
- _tree_leaves_helper(a, leaves)
+ leaves.extend(tree_iter(a))
return leaves
|
2.41.0
|
2be63eb2cb3947c00a8c4506eb961603da7d564
|
Tue, 16 Apr 2024 06:33:21 +0000
|
[PATCH 0197/1000] Revert "Enable UFMT on all of `test/distributed` (#123539)"
|
This reverts commit 89ac37fe919997e844f0baa6e28965d0d52b0682. Reverted https://github.com/pytorch/pytorch/pull/123539 on behalf of https://github.com/DanilBaibak due to Broken trunk ([comment](https://github.com/pytorch/pytorch/pull/123539#issuecomment-2058329471))
|
diff --git a/.lintrunner.toml b/.lintrunner.toml
index 9e83a8b96e..817d35f34f 100644
--- a/.lintrunner.toml
+++ b/.lintrunner.toml
@@ -1015,6 +1015,134 @@ exclude_patterns = [
'test/_nvfuser/test_python_frontend.py',
'test/_nvfuser/test_torchscript.py',
'test/delete.py',
+ 'test/distributed/_shard/sharded_optim/test_sharded_optim.py',
+ 'test/distributed/_shard/sharded_tensor/ops/test_binary_cmp.py',
+ 'test/distributed/_shard/sharded_tensor/ops/test_embedding.py',
+ 'test/distributed/_shard/sharded_tensor/ops/test_embedding_bag.py',
+ 'test/distributed/_shard/sharded_tensor/ops/test_init.py',
+ 'test/distributed/_shard/sharded_tensor/ops/test_tensor_ops.py',
+ 'test/distributed/_shard/sharded_tensor/test_logger.py',
+ 'test/distributed/_shard/sharded_tensor/test_sharded_tensor.py',
+ 'test/distributed/_shard/sharded_tensor/test_sharded_tensor_reshard.py',
+ 'test/distributed/_shard/sharding_plan/test_sharding_plan.py',
+ 'test/distributed/_shard/sharding_spec/test_sharding_spec.py',
+ 'test/distributed/_shard/test_sharder.py',
+ 'test/distributed/_tools/test_memory_tracker.py',
+ 'test/distributed/algorithms/ddp_comm_hooks/test_ddp_hooks.py',
+ 'test/distributed/algorithms/quantization/test_quantization.py',
+ 'test/distributed/algorithms/test_join.py',
+ 'test/distributed/argparse_util_test.py',
+ 'test/distributed/bin/test_script.py',
+ 'test/distributed/elastic/agent/server/test/__init__.py',
+ 'test/distributed/elastic/agent/server/test/api_test.py',
+ 'test/distributed/elastic/agent/server/test/local_elastic_agent_test.py',
+ 'test/distributed/elastic/events/lib_test.py',
+ 'test/distributed/elastic/metrics/__init__.py',
+ 'test/distributed/elastic/metrics/api_test.py',
+ 'test/distributed/elastic/multiprocessing/api_test.py',
+ 'test/distributed/elastic/multiprocessing/bin/echo1.py',
+ 'test/distributed/elastic/multiprocessing/bin/echo2.py',
+ 'test/distributed/elastic/multiprocessing/bin/echo3.py',
+ 'test/distributed/elastic/multiprocessing/bin/test_script.py',
+ 'test/distributed/elastic/multiprocessing/bin/zombie_test.py',
+ 'test/distributed/elastic/multiprocessing/errors/api_test.py',
+ 'test/distributed/elastic/multiprocessing/errors/error_handler_test.py',
+ 'test/distributed/elastic/multiprocessing/redirects_test.py',
+ 'test/distributed/elastic/multiprocessing/tail_log_test.py',
+ 'test/distributed/elastic/rendezvous/__init__.py',
+ 'test/distributed/elastic/rendezvous/api_test.py',
+ 'test/distributed/elastic/rendezvous/c10d_rendezvous_backend_test.py',
+ 'test/distributed/elastic/rendezvous/dynamic_rendezvous_test.py',
+ 'test/distributed/elastic/rendezvous/etcd_rendezvous_backend_test.py',
+ 'test/distributed/elastic/rendezvous/etcd_rendezvous_test.py',
+ 'test/distributed/elastic/rendezvous/etcd_server_test.py',
+ 'test/distributed/elastic/rendezvous/rendezvous_backend_test.py',
+ 'test/distributed/elastic/rendezvous/static_rendezvous_test.py',
+ 'test/distributed/elastic/rendezvous/utils_test.py',
+ 'test/distributed/elastic/timer/__init__.py',
+ 'test/distributed/elastic/timer/api_test.py',
+ 'test/distributed/elastic/timer/file_based_local_timer_test.py',
+ 'test/distributed/elastic/timer/local_timer_example.py',
+ 'test/distributed/elastic/timer/local_timer_test.py',
+ 'test/distributed/elastic/utils/__init__.py',
+ 'test/distributed/elastic/utils/data/__init__.py',
+ 'test/distributed/elastic/utils/data/cycling_iterator_test.py',
+ 'test/distributed/elastic/utils/distributed_test.py',
+ 'test/distributed/elastic/utils/logging_test.py',
+ 'test/distributed/elastic/utils/util_test.py',
+ 'test/distributed/launcher/__init__.py',
+ 'test/distributed/launcher/api_test.py',
+ 'test/distributed/launcher/bin/test_script.py',
+ 'test/distributed/launcher/bin/test_script_init_method.py',
+ 'test/distributed/launcher/bin/test_script_is_torchelastic_launched.py',
+ 'test/distributed/launcher/bin/test_script_local_rank.py',
+ 'test/distributed/launcher/launch_test.py',
+ 'test/distributed/launcher/run_test.py',
+ 'test/distributed/nn/jit/__init__.py',
+ 'test/distributed/nn/jit/test_instantiator.py',
+ 'test/distributed/optim/test_apply_optimizer_in_backward.py',
+ 'test/distributed/optim/test_named_optimizer.py',
+ 'test/distributed/optim/test_zero_redundancy_optimizer.py',
+ 'test/distributed/pipeline/sync/__init__.py',
+ 'test/distributed/pipeline/sync/conftest.py',
+ 'test/distributed/pipeline/sync/skip/__init__.py',
+ 'test/distributed/pipeline/sync/skip/test_api.py',
+ 'test/distributed/pipeline/sync/skip/test_gpipe.py',
+ 'test/distributed/pipeline/sync/skip/test_inspect_skip_layout.py',
+ 'test/distributed/pipeline/sync/skip/test_leak.py',
+ 'test/distributed/pipeline/sync/skip/test_portal.py',
+ 'test/distributed/pipeline/sync/skip/test_stash_pop.py',
+ 'test/distributed/pipeline/sync/skip/test_tracker.py',
+ 'test/distributed/pipeline/sync/skip/test_verify_skippables.py',
+ 'test/distributed/pipeline/sync/test_balance.py',
+ 'test/distributed/pipeline/sync/test_bugs.py',
+ 'test/distributed/pipeline/sync/test_checkpoint.py',
+ 'test/distributed/pipeline/sync/test_copy.py',
+ 'test/distributed/pipeline/sync/test_deferred_batch_norm.py',
+ 'test/distributed/pipeline/sync/test_dependency.py',
+ 'test/distributed/pipeline/sync/test_inplace.py',
+ 'test/distributed/pipeline/sync/test_microbatch.py',
+ 'test/distributed/pipeline/sync/test_phony.py',
+ 'test/distributed/pipeline/sync/test_pipe.py',
+ 'test/distributed/pipeline/sync/test_pipeline.py',
+ 'test/distributed/pipeline/sync/test_stream.py',
+ 'test/distributed/pipeline/sync/test_transparency.py',
+ 'test/distributed/pipeline/sync/test_worker.py',
+ 'test/distributed/rpc/cuda/test_tensorpipe_agent.py',
+ 'test/distributed/rpc/test_faulty_agent.py',
+ 'test/distributed/rpc/test_share_memory.py',
+ 'test/distributed/rpc/test_tensorpipe_agent.py',
+ 'test/distributed/tensor/parallel/__init__.py',
+ 'test/distributed/tensor/parallel/test_ddp_2d_parallel.py',
+ 'test/distributed/tensor/parallel/test_fsdp_2d_parallel.py',
+ 'test/distributed/tensor/parallel/test_parallelize_api.py',
+ 'test/distributed/tensor/parallel/test_tp_examples.py',
+ 'test/distributed/tensor/parallel/test_tp_random_state.py',
+ 'test/distributed/tensor/parallel/test_tp_style.py',
+ 'test/distributed/tensor/parallel/test_view_sharding_dim_change.py',
+ 'test/distributed/test_c10d_common.py',
+ 'test/distributed/test_c10d_gloo.py',
+ 'test/distributed/test_c10d_logger.py',
+ 'test/distributed/test_c10d_nccl.py',
+ 'test/distributed/test_c10d_object_collectives.py',
+ 'test/distributed/test_c10d_pypg.py',
+ 'test/distributed/test_c10d_spawn.py',
+ 'test/distributed/test_c10d_spawn_gloo.py',
+ 'test/distributed/test_c10d_spawn_nccl.py',
+ 'test/distributed/test_c10d_spawn_ucc.py',
+ 'test/distributed/test_c10d_ucc.py',
+ 'test/distributed/test_collective_utils.py',
+ 'test/distributed/test_data_parallel.py',
+ 'test/distributed/test_distributed_spawn.py',
+ 'test/distributed/test_dynamo_distributed.py',
+ 'test/distributed/test_fake_pg.py',
+ 'test/distributed/test_functional_api.py',
+ 'test/distributed/test_inductor_collectives.py',
+ 'test/distributed/test_launcher.py',
+ 'test/distributed/test_multi_threaded_pg.py',
+ 'test/distributed/test_nccl.py',
+ 'test/distributed/test_pg_wrapper.py',
+ 'test/distributed/test_store.py',
'test/expect/__init__.py',
'test/jit/__init__.py',
'test/jit/_imported_class_test/__init__.py',
diff --git a/test/distributed/_shard/sharded_optim/test_sharded_optim.py b/test/distributed/_shard/sharded_optim/test_sharded_optim.py
index 6b08479fbb..30202ee062 100644
--- a/test/distributed/_shard/sharded_optim/test_sharded_optim.py
+++ b/test/distributed/_shard/sharded_optim/test_sharded_optim.py
@@ -1,21 +1,32 @@
# Owner(s): ["oncall: distributed"]
-from copy import deepcopy
-
import torch
import torch.optim as optim
-from torch.distributed._shard import shard_parameter, sharded_tensor
-from torch.distributed._shard.sharded_optim import ShardedOptimizer
-from torch.distributed._shard.sharding_spec import ChunkShardingSpec
-from torch.testing._internal.common_distributed import requires_nccl, skip_if_lt_x_gpu
-from torch.testing._internal.common_utils import run_tests
+from torch.distributed._shard import (
+ sharded_tensor,
+ shard_parameter
+)
+
+from copy import deepcopy
+from torch.distributed._shard.sharding_spec import (
+ ChunkShardingSpec,
+)
+from torch.distributed._shard.sharded_optim import (
+ ShardedOptimizer,
+)
+from torch.testing._internal.common_distributed import (
+ requires_nccl,
+ skip_if_lt_x_gpu,
+)
+from torch.testing._internal.common_utils import (
+ run_tests,
+)
from torch.testing._internal.distributed._shard.sharded_tensor import (
ShardedTensorTestBase,
with_comms,
)
-
class MyShardedModel(torch.nn.Module):
def __init__(self, spec=None, group=None):
super().__init__()
@@ -23,11 +34,7 @@ class MyShardedModel(torch.nn.Module):
torch.manual_seed(0)
self.param = torch.nn.Parameter(torch.rand(5, 10))
if spec is not None:
- self.sharded_param = torch.nn.Parameter(
- sharded_tensor.rand(
- spec, 20, 10, requires_grad=True, process_group=group
- )
- )
+ self.sharded_param = torch.nn.Parameter(sharded_tensor.rand(spec, 20, 10, requires_grad=True, process_group=group))
else:
self.sharded_param = torch.nn.Parameter(torch.rand(5, 10))
@@ -80,6 +87,7 @@ class MyShardedLinear(torch.nn.Module):
class TestShardedOptimizer(ShardedTensorTestBase):
+
@with_comms(init_rpc=False)
@skip_if_lt_x_gpu(4)
@requires_nccl()
@@ -97,9 +105,8 @@ class TestShardedOptimizer(ShardedTensorTestBase):
sharded_model = MyShardedModel(spec=rowwise_spec).cuda()
# copy the parameters from local model
- sharded_model.sharded_param.local_shards()[0].tensor = (
+ sharded_model.sharded_param.local_shards()[0].tensor = \
local_model.sharded_param.detach().clone().requires_grad_()
- )
local_optim = optim.SGD(local_model.parameters(), lr=0.1)
sharded_model_params = dict(sharded_model.named_parameters())
@@ -130,10 +137,12 @@ class TestShardedOptimizer(ShardedTensorTestBase):
new_val = sharded_optim.named_params[key]
if isinstance(val, sharded_tensor.ShardedTensor):
self.assertNotEqual(
- val.local_shards()[0].tensor, new_val.local_shards()[0].tensor
+ val.local_shards()[0].tensor,
+ new_val.local_shards()[0].tensor
)
self.assertEqual(
- new_val.local_shards()[0].tensor, local_model.sharded_param
+ new_val.local_shards()[0].tensor,
+ local_model.sharded_param
)
else:
self.assertNotEqual(val, new_val)
@@ -170,6 +179,5 @@ class TestShardedOptimizer(ShardedTensorTestBase):
self.assertTrue("linear2.weight" in param_keys)
self.assertFalse("bias" in param_keys)
-
-if __name__ == "__main__":
+if __name__ == '__main__':
run_tests()
diff --git a/test/distributed/_shard/sharded_tensor/ops/test_binary_cmp.py b/test/distributed/_shard/sharded_tensor/ops/test_binary_cmp.py
index 1cfed1945a..33fc49f81c 100644
--- a/test/distributed/_shard/sharded_tensor/ops/test_binary_cmp.py
+++ b/test/distributed/_shard/sharded_tensor/ops/test_binary_cmp.py
@@ -1,37 +1,37 @@
# Owner(s): ["oncall: distributed"]
import sys
-
import torch
import torch.distributed as dist
from torch.distributed._shard import sharded_tensor
-from torch.distributed._shard.sharding_spec import ChunkShardingSpec
from torch.distributed.distributed_c10d import _get_default_group
-from torch.testing._internal.common_distributed import requires_nccl, skip_if_lt_x_gpu
-from torch.testing._internal.common_utils import run_tests, TEST_WITH_DEV_DBG_ASAN
+from torch.testing._internal.common_distributed import (
+ requires_nccl,
+ skip_if_lt_x_gpu,
+)
+from torch.distributed._shard.sharding_spec import (
+ ChunkShardingSpec,
+)
from torch.testing._internal.distributed._shard.sharded_tensor import (
ShardedTensorTestBase,
with_comms,
)
+from torch.testing._internal.common_utils import (
+ TEST_WITH_DEV_DBG_ASAN,
+ run_tests,
+)
if TEST_WITH_DEV_DBG_ASAN:
- print(
- "Skip dev-asan as torch + multiprocessing spawn have known issues",
- file=sys.stderr,
- )
+ print("Skip dev-asan as torch + multiprocessing spawn have known issues", file=sys.stderr)
sys.exit(0)
-
class TestShardedTensorBinaryOps(ShardedTensorTestBase):
- """Test base for binary comparison functions such as torch.equal, torch.allclose etc. for ShardedTensor"""
-
+ """ Test base for binary comparison functions such as torch.equal, torch.allclose etc. for ShardedTensor """
seed = 42
- def get_random_tensors(
- self, spec1, spec2, *sizes, pg1=None, pg2=None, seed_offset=0
- ):
+ def get_random_tensors(self, spec1, spec2, *sizes, pg1=None, pg2=None, seed_offset=0):
pg1 = _get_default_group() if pg1 is None else pg1
pg2 = _get_default_group() if pg2 is None else pg2
torch.manual_seed(TestShardedTensorBinaryOps.seed)
@@ -128,7 +128,7 @@ class TestShardedTensorBinaryOps(ShardedTensorTestBase):
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_torch_equal(self):
- """Test torch.equal(ShardedTensor, ShardedTensor)"""
+ """ Test torch.equal(ShardedTensor, ShardedTensor) """
spec, alt_spec = self.get_gpu_specs()
st1, st2 = self.get_random_tensors(spec, spec, 10, 10)
@@ -144,7 +144,7 @@ class TestShardedTensorBinaryOps(ShardedTensorTestBase):
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_torch_allclose(self):
- """Test torch.allclose(ShardedTensor, ShardedTensor)"""
+ """ Test torch.allclose(ShardedTensor, ShardedTensor) """
spec, alt_spec = self.get_gpu_specs()
@@ -158,6 +158,5 @@ class TestShardedTensorBinaryOps(ShardedTensorTestBase):
# sharded_tensor.rand produces uniform values in the [0,1] range.
self.assertTrue(torch.allclose(st1, st2, atol=1))
-
-if __name__ == "__main__":
+if __name__ == '__main__':
run_tests()
diff --git a/test/distributed/_shard/sharded_tensor/ops/test_embedding.py b/test/distributed/_shard/sharded_tensor/ops/test_embedding.py
index 98e1efee92..9291e06e31 100644
--- a/test/distributed/_shard/sharded_tensor/ops/test_embedding.py
+++ b/test/distributed/_shard/sharded_tensor/ops/test_embedding.py
@@ -4,12 +4,20 @@ import sys
import torch
import torch.distributed as dist
-from torch.distributed._shard import shard_parameter
-from torch.testing._internal.common_distributed import requires_nccl, skip_if_lt_x_gpu
-from torch.testing._internal.common_utils import run_tests, TEST_WITH_DEV_DBG_ASAN
+from torch.distributed._shard import (
+ shard_parameter,
+)
+from torch.testing._internal.common_distributed import (
+ requires_nccl,
+ skip_if_lt_x_gpu,
+)
+from torch.testing._internal.common_utils import (
+ TEST_WITH_DEV_DBG_ASAN,
+ run_tests,
+)
from torch.testing._internal.distributed._shard.sharded_tensor import (
- ShardedTensorTestBase,
TEST_GPU_NUM,
+ ShardedTensorTestBase,
with_comms,
)
from torch.testing._internal.distributed._shard.sharded_tensor._test_ops_common import (
@@ -56,7 +64,9 @@ class TestShardedEmbedding(ShardedTensorTestBase):
)
# Copy the weights from local embedding
- sharded_embedding.weight = clone_module_parameter(local_embedding, "weight")
+ sharded_embedding.weight = clone_module_parameter(
+ local_embedding, "weight"
+ )
# Shard the parameter.
shard_parameter(sharded_embedding, "weight", spec)
@@ -124,26 +134,13 @@ class TestShardedEmbedding(ShardedTensorTestBase):
self._run_sharded_embedding(spec, [34], 15, 14, padding_idx=10)
self._run_sharded_embedding(spec, [8, 6, 5, 4], 23, 13, padding_idx=12)
self._run_sharded_embedding(
- spec,
- [4, 5, 6],
- 23,
- 13,
- max_norm=2.5,
+ spec, [4, 5, 6], 23, 13, max_norm=2.5,
)
self._run_sharded_embedding(
- spec,
- [12, 7, 16],
- 23,
- 13,
- max_norm=2.5,
+ spec, [12, 7, 16], 23, 13, max_norm=2.5,
)
self._run_sharded_embedding(
- spec,
- [8, 16, 20],
- 12,
- 12,
- max_norm=1.25,
- norm_type=1.0,
+ spec, [8, 16, 20], 12, 12, max_norm=1.25, norm_type=1.0,
)
self._run_sharded_embedding(spec, [30], 15, 14, max_norm=2.0)
@@ -157,19 +154,11 @@ class TestShardedEmbedding(ShardedTensorTestBase):
self._run_sharded_embedding(spec, [5, 4], 32, 12)
self._run_sharded_embedding(spec, [6, 7, 6], 64, 11)
self._run_sharded_embedding(
- spec,
- [5, 12],
- 16,
- 22,
- max_norm=2.5,
+ spec, [5, 12], 16, 22, max_norm=2.5,
)
self._run_sharded_embedding(spec, [6, 7, 6], 64, 11, padding_idx=30)
self._run_sharded_embedding(
- spec,
- [6, 5, 3],
- 26,
- 11,
- max_norm=2.0,
+ spec, [6, 5, 3], 26, 11, max_norm=2.0,
)
# Test uneven split.
@@ -178,11 +167,7 @@ class TestShardedEmbedding(ShardedTensorTestBase):
self._run_sharded_embedding(spec, [4], 21, 11)
self._run_sharded_embedding(spec, [8, 6, 5, 4], 21, 11, padding_idx=10)
self._run_sharded_embedding(
- spec,
- [6, 5, 8],
- 28,
- 5,
- max_norm=2.0,
+ spec, [6, 5, 8], 28, 5, max_norm=2.0,
)
self._run_sharded_embedding(spec, [4], 14, 11, max_norm=2.5)
diff --git a/test/distributed/_shard/sharded_tensor/ops/test_embedding_bag.py b/test/distributed/_shard/sharded_tensor/ops/test_embedding_bag.py
index 98feeba767..4843534f68 100644
--- a/test/distributed/_shard/sharded_tensor/ops/test_embedding_bag.py
+++ b/test/distributed/_shard/sharded_tensor/ops/test_embedding_bag.py
@@ -4,12 +4,20 @@ import sys
import torch
import torch.distributed as dist
-from torch.distributed._shard import shard_parameter
-from torch.testing._internal.common_distributed import requires_nccl, skip_if_lt_x_gpu
-from torch.testing._internal.common_utils import run_tests, TEST_WITH_DEV_DBG_ASAN
+from torch.distributed._shard import (
+ shard_parameter,
+)
+from torch.testing._internal.common_distributed import (
+ requires_nccl,
+ skip_if_lt_x_gpu,
+)
+from torch.testing._internal.common_utils import (
+ TEST_WITH_DEV_DBG_ASAN,
+ run_tests,
+)
from torch.testing._internal.distributed._shard.sharded_tensor import (
- ShardedTensorTestBase,
TEST_GPU_NUM,
+ ShardedTensorTestBase,
with_comms,
)
from torch.testing._internal.distributed._shard.sharded_tensor._test_ops_common import (
@@ -176,12 +184,7 @@ class TestShardedEmbeddingBag(ShardedTensorTestBase):
self._run_sharded_embedding_bag(spec, [5, 4], 17, 12, "mean")
self._run_sharded_embedding_bag(spec, [6, 7], 21, 11, "max")
self._run_sharded_embedding_bag(
- spec,
- [5, 5],
- 17,
- 14,
- "sum",
- max_norm=2.5,
+ spec, [5, 5], 17, 14, "sum", max_norm=2.5,
)
self._run_sharded_embedding_bag(
spec,
diff --git a/test/distributed/_shard/sharded_tensor/ops/test_init.py b/test/distributed/_shard/sharded_tensor/ops/test_init.py
index 9d67233376..6cbfd04b21 100644
--- a/test/distributed/_shard/sharded_tensor/ops/test_init.py
+++ b/test/distributed/_shard/sharded_tensor/ops/test_init.py
@@ -1,34 +1,37 @@
# Owner(s): ["oncall: distributed"]
import sys
-
import torch
from torch.distributed._shard import sharded_tensor
-from torch.distributed._shard.sharding_spec import ChunkShardingSpec
-from torch.testing._internal.common_distributed import requires_nccl, skip_if_lt_x_gpu
-from torch.testing._internal.common_utils import run_tests, TEST_WITH_DEV_DBG_ASAN
+from torch.distributed._shard.sharding_spec import (
+ ChunkShardingSpec,
+)
+from torch.testing._internal.common_distributed import (
+ requires_nccl,
+ skip_if_lt_x_gpu,
+)
from torch.testing._internal.distributed._shard.sharded_tensor import (
ShardedTensorTestBase,
with_comms,
)
+from torch.testing._internal.common_utils import (
+ TEST_WITH_DEV_DBG_ASAN,
+ run_tests,
+)
if TEST_WITH_DEV_DBG_ASAN:
- print(
- "Skip dev-asan as torch + multiprocessing spawn have known issues",
- file=sys.stderr,
- )
+ print("Skip dev-asan as torch + multiprocessing spawn have known issues", file=sys.stderr)
sys.exit(0)
-
class TestShardedTensorNNInit(ShardedTensorTestBase):
- """Testing torch.nn.init functions for ShardedTensor"""
+ """ Testing torch.nn.init functions for ShardedTensor """
@with_comms
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_init_sharded_tensor_with_uniform(self):
- """Test torch.nn.init.uniform_(ShardedTensor, a, b)"""
+ """ Test torch.nn.init.uniform_(ShardedTensor, a, b) """
spec = ChunkShardingSpec(
dim=0,
@@ -63,7 +66,7 @@ class TestShardedTensorNNInit(ShardedTensorTestBase):
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_init_sharded_tensor_with_normal(self):
- """Test torch.nn.init.normal_(ShardedTensor, mean, std)"""
+ """ Test torch.nn.init.normal_(ShardedTensor, mean, std) """
spec = ChunkShardingSpec(
dim=0,
@@ -98,7 +101,7 @@ class TestShardedTensorNNInit(ShardedTensorTestBase):
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_init_sharded_tensor_with_kaiming_uniform(self):
- """Test torch.nn.init.kaiming_uniform_(ShardedTensor, a, mode, nonlinearit)"""
+ """ Test torch.nn.init.kaiming_uniform_(ShardedTensor, a, mode, nonlinearit) """
spec = ChunkShardingSpec(
dim=0,
@@ -112,7 +115,7 @@ class TestShardedTensorNNInit(ShardedTensorTestBase):
h, w = 8, 2
expected_h = 2
expected_device = torch.device(f"cuda:{self.rank}")
- a, mode, nonlinearity = 0, "fan_in", "leaky_relu"
+ a, mode, nonlinearity = 0, 'fan_in', 'leaky_relu'
seed = 1234
dtype = torch.double
@@ -126,11 +129,8 @@ class TestShardedTensorNNInit(ShardedTensorTestBase):
torch.nn.init.kaiming_uniform_(st, a=a, mode=mode, nonlinearity=nonlinearity)
torch.manual_seed(seed)
- torch.nn.init.kaiming_uniform_(
- local_tensor_clone, a=a, mode=mode, nonlinearity=nonlinearity
- )
+ torch.nn.init.kaiming_uniform_(local_tensor_clone, a=a, mode=mode, nonlinearity=nonlinearity)
self.assertEqual(local_tensor_clone, st.local_shards()[0].tensor)
-
-if __name__ == "__main__":
+if __name__ == '__main__':
run_tests()
diff --git a/test/distributed/_shard/sharded_tensor/ops/test_tensor_ops.py b/test/distributed/_shard/sharded_tensor/ops/test_tensor_ops.py
index ca49f52d08..977fa701b4 100644
--- a/test/distributed/_shard/sharded_tensor/ops/test_tensor_ops.py
+++ b/test/distributed/_shard/sharded_tensor/ops/test_tensor_ops.py
@@ -5,15 +5,22 @@ import copy
import torch
import torch.distributed._shard.sharded_tensor as sharded_tensor
-from torch.distributed._shard.sharding_spec import ChunkShardingSpec
-from torch.testing._internal.common_distributed import requires_nccl, skip_if_lt_x_gpu
-from torch.testing._internal.common_utils import run_tests
+from torch.distributed._shard.sharding_spec import (
+ ChunkShardingSpec,
+)
+from torch.testing._internal.common_distributed import (
+ requires_nccl,
+ skip_if_lt_x_gpu,
+)
from torch.testing._internal.distributed._shard.sharded_tensor import (
- ShardedTensorTestBase,
TEST_GPU_NUM,
+ ShardedTensorTestBase,
with_comms,
)
+from torch.testing._internal.common_utils import (
+ run_tests,
+)
class TestTensorOps(ShardedTensorTestBase):
diff --git a/test/distributed/_shard/sharded_tensor/test_logger.py b/test/distributed/_shard/sharded_tensor/test_logger.py
index fa946819f9..d1560261ad 100644
--- a/test/distributed/_shard/sharded_tensor/test_logger.py
+++ b/test/distributed/_shard/sharded_tensor/test_logger.py
@@ -3,7 +3,10 @@
import logging
from torch.distributed._shard.sharded_tensor.logger import _get_or_create_logger
-from torch.testing._internal.common_utils import run_tests, TestCase
+from torch.testing._internal.common_utils import (
+ TestCase,
+ run_tests,
+)
class ShardingSpecLoggerTest(TestCase):
diff --git a/test/distributed/_shard/sharded_tensor/test_sharded_tensor.py b/test/distributed/_shard/sharded_tensor/test_sharded_tensor.py
index d65958a20f..e0a71a06d6 100644
--- a/test/distributed/_shard/sharded_tensor/test_sharded_tensor.py
+++ b/test/distributed/_shard/sharded_tensor/test_sharded_tensor.py
@@ -70,7 +70,6 @@ if TEST_WITH_DEV_DBG_ASAN:
)
sys.exit(0)
-
class TestShardedTensorMetadata(TestCase):
def test_serialize_and_deserialize(self):
shard_metadatas = [
@@ -93,59 +92,34 @@ class TestShardedTensorMetadata(TestCase):
shard_offsets=[5, 5],
shard_sizes=[5, 5],
placement="rank:3/cuda:3",
- ),
+ )
]
dtypes = [
- torch.float,
- torch.double,
- torch.cfloat,
- torch.cdouble,
- torch.half,
- torch.bfloat16,
- torch.uint8,
- torch.int8,
- torch.short,
- torch.int,
- torch.long,
- torch.bool,
- ]
+ torch.float, torch.double, torch.cfloat, torch.cdouble, torch.half,
+ torch.bfloat16, torch.uint8, torch.int8, torch.short, torch.int,
+ torch.long, torch.bool]
layouts = [torch.strided, torch.sparse_coo]
requires_grads = [True, False]
- memory_formats = [
- torch.contiguous_format,
- torch.channels_last,
- torch.preserve_format,
- ]
+ memory_formats = [torch.contiguous_format, torch.channels_last, torch.preserve_format]
pin_memories = [True, False]
- for tensor_properties_input in itertools.product(
- dtypes, layouts, requires_grads, memory_formats, pin_memories
- ):
- (
- dtype,
- layout,
- requires_grad,
- memory_format,
- pin_memory,
- ) = tensor_properties_input
+ for tensor_properties_input in itertools.product(dtypes, layouts, requires_grads, memory_formats, pin_memories):
+ dtype, layout, requires_grad, memory_format, pin_memory = tensor_properties_input
expected_st_metadata = sharded_tensor.ShardedTensorMetadata(
shard_metadatas,
(10, 10),
- TensorProperties(
- dtype, layout, requires_grad, memory_format, pin_memory
- ),
+ TensorProperties(dtype, layout, requires_grad, memory_format, pin_memory)
)
pickled_obj = pickle.dumps(expected_st_metadata)
st_metadata = pickle.loads(pickled_obj)
self.assertEqual(expected_st_metadata, st_metadata)
-
class TestCreateTensorFromParams(TestCase):
- @skip_but_pass_in_sandcastle_if(not TEST_CUDA, "CUDA GPU is needed")
+ @skip_but_pass_in_sandcastle_if(not TEST_CUDA, 'CUDA GPU is needed')
def test_empty(self):
expected_dtype = torch.double
tensor_properties = TensorProperties(
@@ -153,12 +127,10 @@ class TestCreateTensorFromParams(TestCase):
layout=torch.strided,
requires_grad=False,
pin_memory=False,
- memory_format=torch.contiguous_format,
- )
- local_device = torch.device("cuda:0")
+ memory_format=torch.contiguous_format)
+ local_device = torch.device('cuda:0')
local_tensor = _create_tensor_from_params(
- 5, 10, local_device=local_device, tensor_properties=tensor_properties
- )
+ 5, 10, local_device=local_device, tensor_properties=tensor_properties)
self.assertEqual(local_device, local_tensor.device)
self.assertEqual(expected_dtype, local_tensor.dtype)
self.assertEqual(torch.strided, local_tensor.layout)
@@ -182,7 +154,7 @@ class TestShardParameter(ShardedTensorTestBase):
fc = torch.nn.Linear(12, 12).cuda(self.rank)
weight_og = fc.weight.clone()
- shard_parameter(fc, "weight", spec)
+ shard_parameter(fc, 'weight', spec)
# Verify.
self.assertTrue(isinstance(fc.weight, ShardedTensor))
@@ -191,9 +163,7 @@ class TestShardParameter(ShardedTensorTestBase):
self.assertEqual(torch.Size([3, 12]), local_shards[0].tensor.size())
self.assertEqual(3, local_shards[0].tensor.size(0))
self.assertEqual(12, local_shards[0].tensor.size(1))
- self.assertEqual(
- torch.narrow(weight_og, 0, 3 * self.rank, 3), local_shards[0].tensor
- )
+ self.assertEqual(torch.narrow(weight_og, 0, 3 * self.rank, 3), local_shards[0].tensor)
@with_comms(init_rpc=False)
@skip_if_lt_x_gpu(4)
@@ -210,22 +180,20 @@ class TestShardParameter(ShardedTensorTestBase):
)
fc = torch.nn.Linear(12, 12).cuda(self.rank)
- with self.assertRaisesRegex(ValueError, "does not match with src_rank"):
- shard_parameter(fc, "weight", spec, src_rank=self.rank)
+ with self.assertRaisesRegex(ValueError, 'does not match with src_rank'):
+ shard_parameter(fc, 'weight', spec, src_rank=self.rank)
- with self.assertRaisesRegex(AttributeError, "has no attribute"):
- shard_parameter(fc, "foo", spec)
+ with self.assertRaisesRegex(AttributeError, 'has no attribute'):
+ shard_parameter(fc, 'foo', spec)
- with self.assertRaisesRegex(
- ValueError, "Expected Linear.bias to be a Tensor, but found str"
- ):
+ with self.assertRaisesRegex(ValueError, 'Expected Linear.bias to be a Tensor, but found str'):
del fc.bias
fc.bias = "foo"
- shard_parameter(fc, "bias", spec)
+ shard_parameter(fc, 'bias', spec)
- with self.assertRaisesRegex(ValueError, "not a contiguous Tensor"):
+ with self.assertRaisesRegex(ValueError, 'not a contiguous Tensor'):
fc.bias = torch.rand(10, 10).cuda(self.rank).t()
- shard_parameter(fc, "bias", spec)
+ shard_parameter(fc, 'bias', spec)
spec = ChunkShardingSpec(
dim=0,
@@ -236,25 +204,23 @@ class TestShardParameter(ShardedTensorTestBase):
"rank:3/cuda:3",
],
)
- with self.assertRaisesRegex(ValueError, "does not match with sharding_spec"):
- shard_parameter(fc, "weight", spec)
+ with self.assertRaisesRegex(ValueError, 'does not match with sharding_spec'):
+ shard_parameter(fc, 'weight', spec)
- spec = EnumerableShardingSpec(
- [
- ShardMetadata(
- shard_offsets=[0, 0],
- shard_sizes=[5, 5],
- placement="rank:0/cuda:0",
- ),
- ShardMetadata(
- shard_offsets=[5, 0],
- shard_sizes=[5, 5],
- placement="rank:1/cuda:1",
- ),
- ]
- )
- with self.assertRaisesRegex(NotImplementedError, "not implemented yet!"):
- shard_parameter(fc, "weight", spec)
+ spec = EnumerableShardingSpec([
+ ShardMetadata(
+ shard_offsets=[0, 0],
+ shard_sizes=[5, 5],
+ placement="rank:0/cuda:0",
+ ),
+ ShardMetadata(
+ shard_offsets=[5, 0],
+ shard_sizes=[5, 5],
+ placement="rank:1/cuda:1",
+ ),
+ ])
+ with self.assertRaisesRegex(NotImplementedError, 'not implemented yet!'):
+ shard_parameter(fc, 'weight', spec)
class TestShardTensor(ShardedTensorTestBase):
@@ -327,10 +293,10 @@ class TestShardTensor(ShardedTensorTestBase):
)
tensor = torch.rand(12, 12).cuda(self.rank)
- with self.assertRaisesRegex(ValueError, "does not match with src_rank"):
+ with self.assertRaisesRegex(ValueError, 'does not match with src_rank'):
_shard_tensor(tensor, spec, src_rank=self.rank)
- with self.assertRaisesRegex(ValueError, "not a contiguous Tensor"):
+ with self.assertRaisesRegex(ValueError, 'not a contiguous Tensor'):
tensor_t = torch.rand(12, 12).cuda(self.rank).t()
_shard_tensor(tensor_t, spec)
@@ -343,24 +309,24 @@ class TestShardTensor(ShardedTensorTestBase):
"rank:3/cuda:3",
],
)
- with self.assertRaisesRegex(ValueError, "does not match with sharding_spec"):
+ with self.assertRaisesRegex(ValueError, 'does not match with sharding_spec'):
_shard_tensor(tensor, spec)
- spec = EnumerableShardingSpec(
- [
- ShardMetadata(
- shard_offsets=[0, 0],
- shard_sizes=[5, 5],
- placement="rank:0/cuda:0",
- ),
- ShardMetadata(
- shard_offsets=[5, 0],
- shard_sizes=[5, 5],
- placement="rank:1/cuda:1",
- ),
- ]
- )
- with self.assertRaisesRegex(NotImplementedError, "not implemented yet!"):
+ spec = EnumerableShardingSpec([
+ ShardMetadata(
+ shard_offsets=[0, 0],
+ shard_sizes=[5, 5],
+ placement="rank:0/cuda:0",
+ ),
+ ShardMetadata(
+ shard_offsets=[5, 0],
+ shard_sizes=[5, 5],
+ placement="rank:1/cuda:1",
+ ),
+ ])
+ with self.assertRaisesRegex(
+ NotImplementedError, 'not implemented yet!'
+ ):
_shard_tensor(tensor, spec)
@@ -459,6 +425,7 @@ class TestLocalTensor(ShardedTensorTestBase):
class TestShardedTensorChunked(ShardedTensorTestBase):
+
@with_comms
@skip_if_lt_x_gpu(4)
@requires_nccl()
@@ -512,6 +479,7 @@ class TestShardedTensorChunked(ShardedTensorTestBase):
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_complete_world_size(self):
+
for dim in [0, -2]:
spec = ChunkShardingSpec(
dim=dim,
@@ -545,9 +513,7 @@ class TestShardedTensorChunked(ShardedTensorTestBase):
self.assertEqual([1, 20], shard_metadata.shard_sizes)
else:
self.assertEqual([3, 20], shard_metadata.shard_sizes)
- self.assertEqual(
- f"rank:{rank}/cuda:{rank}", str(shard_metadata.placement)
- )
+ self.assertEqual(f'rank:{rank}/cuda:{rank}', str(shard_metadata.placement))
# Validate remote shards.
remote_shards = st.remote_shards()
@@ -558,20 +524,18 @@ class TestShardedTensorChunked(ShardedTensorTestBase):
for remote_shard in shards:
self.assertEqual(rpc_rank, remote_shard.owner().id)
shard = remote_shard.to_here()
- self.assertEqual(
- f"rank:{rpc_rank}/cuda:{rpc_rank}",
- str(shard.metadata.placement),
- )
+ self.assertEqual(f'rank:{rpc_rank}/cuda:{rpc_rank}', str(shard.metadata.placement))
if rpc_rank == 3:
self.assertEqual((1, 20), shard.tensor.size())
else:
self.assertEqual((3, 20), shard.tensor.size())
+
@with_comms
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_create_sharded_tensor_with_ones(self):
- """Test sharded_tensor.ones(...)"""
+ """ Test sharded_tensor.ones(...) """
spec = ChunkShardingSpec(
dim=0,
@@ -599,7 +563,7 @@ class TestShardedTensorChunked(ShardedTensorTestBase):
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_gather_even(self) -> None:
- """Test _sharded_tensor.gather(...) with evenly distributed._shards"""
+ """ Test _sharded_tensor.gather(...) with evenly distributed._shards"""
spec = ChunkShardingSpec(
dim=0,
@@ -632,7 +596,7 @@ class TestShardedTensorChunked(ShardedTensorTestBase):
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_gather_uneven(self) -> None:
- """Test _sharded_tensor.gather(...) with unevenly distributed._shards"""
+ """ Test _sharded_tensor.gather(...) with unevenly distributed._shards"""
spec = ChunkShardingSpec(
dim=0,
@@ -666,7 +630,7 @@ class TestShardedTensorChunked(ShardedTensorTestBase):
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_create_sharded_tensor_with_zeros(self):
- """Test sharded_tensor.zeros(...)"""
+ """ Test sharded_tensor.zeros(...) """
spec = ChunkShardingSpec(
dim=0,
@@ -690,11 +654,12 @@ class TestShardedTensorChunked(ShardedTensorTestBase):
self.assertEqual((expected_h, w), local_shard.size())
self.assertEqual(local_shard, torch.zeros(expected_h, w))
+
@with_comms
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_create_sharded_tensor_with_rand(self):
- """Test sharded_tensor.rand(...)/randn(...)"""
+ """ Test sharded_tensor.rand(...)/randn(...) """
spec = ChunkShardingSpec(
dim=0,
@@ -745,7 +710,7 @@ class TestShardedTensorChunked(ShardedTensorTestBase):
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_create_sharded_tensor_with_full(self):
- """Test sharded_tensor.full(...)"""
+ """ Test sharded_tensor.full(...) """
spec = ChunkShardingSpec(
dim=0,
@@ -758,9 +723,7 @@ class TestShardedTensorChunked(ShardedTensorTestBase):
)
h, w = 10, 20
fill_value = 1234
- st = sharded_tensor.full(
- spec, size=(h, w), fill_value=fill_value, dtype=torch.int32
- )
+ st = sharded_tensor.full(spec, size=(h, w), fill_value=fill_value, dtype=torch.int32)
# Validate local shard is initialized with torch.full
local_shards = st.local_shards()
@@ -770,16 +733,14 @@ class TestShardedTensorChunked(ShardedTensorTestBase):
# The split: for rank!=3 ceil(h/4)=3 for rank=3 1
expected_h = 1 if self.rank == 3 else math.ceil(h / 4)
self.assertEqual((expected_h, w), local_shard.size())
- self.assertEqual(
- local_shard,
- torch.full(size=(expected_h, w), fill_value=fill_value, dtype=torch.int32),
- )
+ self.assertEqual(local_shard,
+ torch.full(size=(expected_h, w), fill_value=fill_value, dtype=torch.int32))
@with_comms
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_create_sharded_tensor_like(self):
- """Test tensor like methods, i.e. torch.zeros_like(...), torch.full_like, etc."""
+ """ Test tensor like methods, i.e. torch.zeros_like(...), torch.full_like, etc. """
spec = ChunkShardingSpec(
dim=0,
@@ -802,28 +763,22 @@ class TestShardedTensorChunked(ShardedTensorTestBase):
torch.rand_like: torch.rand,
torch.randn_like: torch.randn,
torch.empty_like: torch.empty,
- torch.full_like: torch.full,
+ torch.full_like: torch.full
}
for op, expect_local_op in tensor_like_ops.items():
if op == torch.full_like:
# special handle full/full_like as it needs to have additional fill_value arg
- expect_tensor = expect_local_op(
- (expected_h, w), 8.8, device=expected_device, dtype=dtype
- )
+ expect_tensor = expect_local_op((expected_h, w), 8.8, device=expected_device, dtype=dtype)
new_op_st = op(st, 8.8, dtype=dtype)
self.assertEqual(new_op_st.local_tensor(), expect_tensor)
elif op == torch.empty_like:
# empty/empty_like we only compare the shape
- expect_tensor = expect_local_op(
- expected_h, w, device=expected_device, dtype=dtype
- )
+ expect_tensor = expect_local_op(expected_h, w, device=expected_device, dtype=dtype)
new_op_st = op(st, dtype=dtype)
self.assertEqual(new_op_st.local_tensor().shape, expect_tensor.shape)
else:
torch.manual_seed(seed)
- expect_tensor = expect_local_op(
- expected_h, w, device=expected_device, dtype=dtype
- )
+ expect_tensor = expect_local_op(expected_h, w, device=expected_device, dtype=dtype)
torch.manual_seed(seed)
new_op_st = op(st, dtype=dtype)
self.assertEqual(new_op_st.local_tensor(), expect_tensor)
@@ -832,6 +787,7 @@ class TestShardedTensorChunked(ShardedTensorTestBase):
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_partial_world_size(self):
+
spec = ChunkShardingSpec(
dim=0,
placements=[
@@ -859,10 +815,7 @@ class TestShardedTensorChunked(ShardedTensorTestBase):
for shard_rank, shard_metadata in enumerate(shards_metadata):
self.assertEqual([shard_rank * 5, 0], shard_metadata.shard_offsets)
self.assertEqual([5, 20], shard_metadata.shard_sizes)
- self.assertEqual(
- f"rank:{shard_rank + 2}/cuda:{shard_rank + 2}",
- str(shard_metadata.placement),
- )
+ self.assertEqual(f'rank:{shard_rank + 2}/cuda:{shard_rank + 2}', str(shard_metadata.placement))
# Validate remote shards.
remote_shards = st.remote_shards()
@@ -876,15 +829,14 @@ class TestShardedTensorChunked(ShardedTensorTestBase):
for remote_shard in shards:
self.assertEqual(rpc_rank, remote_shard.owner().id)
shard = remote_shard.to_here()
- self.assertEqual(
- f"rank:{rpc_rank}/cuda:{rpc_rank}", str(shard.metadata.placement)
- )
+ self.assertEqual(f'rank:{rpc_rank}/cuda:{rpc_rank}', str(shard.metadata.placement))
self.assertEqual((5, 20), shard.tensor.size())
@with_comms
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_new_group(self):
+
spec = ChunkShardingSpec(
dim=0,
placements=[
@@ -914,10 +866,7 @@ class TestShardedTensorChunked(ShardedTensorTestBase):
for shard_rank, shard_metadata in enumerate(shards_metadata):
self.assertEqual([shard_rank * 5, 0], shard_metadata.shard_offsets)
self.assertEqual([5, 20], shard_metadata.shard_sizes)
- self.assertEqual(
- f"rank:{shard_rank + 1}/cuda:{shard_rank + 2}",
- str(shard_metadata.placement),
- )
+ self.assertEqual(f'rank:{shard_rank + 1}/cuda:{shard_rank + 2}', str(shard_metadata.placement))
# Validate remote shards.
remote_shards = st.remote_shards()
@@ -931,10 +880,7 @@ class TestShardedTensorChunked(ShardedTensorTestBase):
for remote_shard in shards:
shard = remote_shard.to_here()
self.assertEqual(rpc_rank, remote_shard.owner().id)
- self.assertEqual(
- f"rank:{rpc_rank - 1}/cuda:{rpc_rank}",
- str(shard.metadata.placement),
- )
+ self.assertEqual(f'rank:{rpc_rank - 1}/cuda:{rpc_rank}', str(shard.metadata.placement))
self.assertEqual((5, 20), shard.tensor.size())
@with_comms
@@ -960,9 +906,7 @@ class TestShardedTensorChunked(ShardedTensorTestBase):
local_shards = st.local_shards()
self.assertEqual(2, len(local_shards))
for local_shard in local_shards:
- self.assertEqual(
- torch.device(f"cuda:{self.rank}"), local_shard.tensor.device
- )
+ self.assertEqual(torch.device(f"cuda:{self.rank}"), local_shard.tensor.device)
self.assertEqual((2, 20), local_shard.tensor.size())
# Validate global metadata.
@@ -973,10 +917,7 @@ class TestShardedTensorChunked(ShardedTensorTestBase):
for shard_idx, shard_metadata in enumerate(shards_metadata):
self.assertEqual([shard_idx * 2, 0], shard_metadata.shard_offsets)
self.assertEqual([2, 20], shard_metadata.shard_sizes)
- self.assertEqual(
- f"rank:{shard_idx % 4}/cuda:{shard_idx % 4}",
- str(shard_metadata.placement),
- )
+ self.assertEqual(f'rank:{shard_idx % 4}/cuda:{shard_idx % 4}', str(shard_metadata.placement))
# Validate remote shards.
remote_shards = st.remote_shards()
@@ -989,6 +930,7 @@ class TestShardedTensorChunked(ShardedTensorTestBase):
self.assertEqual((2, 20), shard.tensor.size())
self.assertEqual(rpc_rank, remote_shard.owner().id)
+
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_sharding_columns(self):
@@ -1022,72 +964,55 @@ class TestShardedTensorChunked(ShardedTensorTestBase):
for rank, shard_metadata in enumerate(shards_metadata):
self.assertEqual([0, rank * 8], shard_metadata.shard_offsets)
self.assertEqual([10, 8], shard_metadata.shard_sizes)
- self.assertEqual(
- f"rank:{rank}/cuda:{rank}", str(shard_metadata.placement)
- )
+ self.assertEqual(f'rank:{rank}/cuda:{rank}', str(shard_metadata.placement))
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_invalid_sharding(self):
self.init_pg()
- with self.assertRaisesRegex(
- NotImplementedError, "does not support named dimension"
- ):
- spec = ChunkShardingSpec(dim="H", placements=["rank:1/cuda:1"])
+ with self.assertRaisesRegex(NotImplementedError, 'does not support named dimension'):
+ spec = ChunkShardingSpec(dim='H', placements=["rank:1/cuda:1"])
sharded_tensor.empty(spec, 10, 20)
for dim in [2, 3, 4, -3, -4, -5]:
spec = ChunkShardingSpec(dim=dim, placements=["rank:1/cuda:1"])
- with self.assertRaisesRegex(ValueError, "Invalid sharding dim"):
+ with self.assertRaisesRegex(ValueError, 'Invalid sharding dim'):
sharded_tensor.empty(spec, 10, 20)
spec = ChunkShardingSpec(dim=0, placements=["rank:5/cuda:1"])
- with self.assertRaisesRegex(ValueError, "Invalid rank"):
+ with self.assertRaisesRegex(ValueError, 'Invalid rank'):
sharded_tensor.empty(spec, 10, 20)
spec = ChunkShardingSpec(dim=0, placements=["rank:0/cuda:1"])
st = sharded_tensor.empty(spec, 10, 20)
tensor = torch.empty(10, 20)
- with self.assertRaisesRegex(
- RuntimeError, r".*not supported for ShardedTensor!$"
- ):
+ with self.assertRaisesRegex(RuntimeError, r".*not supported for ShardedTensor!$"):
torch.add(st, tensor)
spec = ChunkShardingSpec(dim=0, placements=["rank:0/cuda:1"])
- with self.assertRaisesRegex(
- ValueError, "Only torch.strided layout is currently supported"
- ):
+ with self.assertRaisesRegex(ValueError, 'Only torch.strided layout is currently supported'):
sharded_tensor.empty(spec, 10, 20, layout=torch.sparse_coo)
spec = ChunkShardingSpec(dim=0, placements=["rank:0/cuda:1"])
- with self.assertRaisesRegex(
- ValueError,
- "Only torch.contiguous_format memory_format is currently supported",
- ):
+ with self.assertRaisesRegex(ValueError, 'Only torch.contiguous_format memory_format is currently supported'):
sharded_tensor.empty(spec, 10, 20, memory_format=torch.channels_last)
spec = ChunkShardingSpec(dim=0, placements=["worker0/cuda:1"])
- with self.assertRaisesRegex(
- RuntimeError, "RPC framework needs to be initialized"
- ):
+ with self.assertRaisesRegex(RuntimeError, 'RPC framework needs to be initialized'):
sharded_tensor.empty(spec, 10, 20)
spec = ChunkShardingSpec(dim=0, placements=["rank:0/cuda:1"])
- with self.assertRaisesRegex(
- RuntimeError, "RPC Framework needs to be initialized"
- ):
+ with self.assertRaisesRegex(RuntimeError, 'RPC Framework needs to be initialized'):
st = sharded_tensor.empty(spec, 10, 20, init_rrefs=True)
- with self.assertRaisesRegex(
- RuntimeError, "ShardedTensor created with init_rrefs=False"
- ):
+ with self.assertRaisesRegex(RuntimeError, 'ShardedTensor created with init_rrefs=False'):
st = sharded_tensor.empty(spec, 10, 20)
st.remote_shards()
self.init_rpc()
spec = ChunkShardingSpec(dim=0, placements=["workerfoo/cuda:1"])
- with self.assertRaisesRegex(ValueError, "Invalid worker name"):
+ with self.assertRaisesRegex(ValueError, 'Invalid worker name'):
sharded_tensor.empty(spec, 10, 20, init_rrefs=True)
@skip_if_lt_x_gpu(4)
@@ -1096,22 +1021,18 @@ class TestShardedTensorChunked(ShardedTensorTestBase):
self.init_pg()
# Init RPC with different ranks.
- rpc_backend_options = rpc.TensorPipeRpcBackendOptions(
- _transports=tp_transports()
- )
+ rpc_backend_options = rpc.TensorPipeRpcBackendOptions(_transports=tp_transports())
rpc_backend_options.init_method = f"file://{self.file_name}"
rank = (self.rank + 1) % self.world_size
rpc.init_rpc(
- name=f"worker{rank}",
+ name=f'worker{rank}',
rank=rank,
world_size=self.world_size,
rpc_backend_options=rpc_backend_options,
)
spec = ChunkShardingSpec(dim=0, placements=["rank:1/cuda:1"])
- with self.assertRaisesRegex(
- ValueError, "Default ProcessGroup and RPC ranks must be the same"
- ):
+ with self.assertRaisesRegex(ValueError, 'Default ProcessGroup and RPC ranks must be the same'):
sharded_tensor.empty(spec, 10, 20, init_rrefs=True)
@skip_if_lt_x_gpu(4)
@@ -1150,9 +1071,7 @@ class TestShardedTensorChunked(ShardedTensorTestBase):
for shard_rank, shard_metadata in enumerate(shards_metadata):
self.assertEqual([shard_rank, 0], shard_metadata.shard_offsets)
- self.assertEqual(
- f"rank:{shard_rank}/cuda:{shard_rank}", str(shard_metadata.placement)
- )
+ self.assertEqual(f'rank:{shard_rank}/cuda:{shard_rank}', str(shard_metadata.placement))
if shard_rank <= 1:
self.assertEqual([1, 20], shard_metadata.shard_sizes)
else:
@@ -1205,13 +1124,13 @@ class TestShardedTensorChunked(ShardedTensorTestBase):
self.assertEqual(st.ndim, 2)
# Test with invalid input
st = sharded_tensor.empty(spec, (10, 20), init_rrefs=True)
- with self.assertRaisesRegex(IndexError, "Dimension out of range"):
+ with self.assertRaisesRegex(IndexError, 'Dimension out of range'):
st.size(-3)
- with self.assertRaisesRegex(IndexError, "Dimension out of range"):
+ with self.assertRaisesRegex(IndexError, 'Dimension out of range'):
st.size(2)
with self.assertRaises(TypeError):
- st = sharded_tensor.empty(spec, "foo")
+ st = sharded_tensor.empty(spec, 'foo')
@with_comms
@skip_if_lt_x_gpu(4)
@@ -1252,11 +1171,7 @@ class TestShardedTensorChunked(ShardedTensorTestBase):
self.assertTrue("submodule.sharded_tensor2" in loaded_dict_keys)
# Verify after load.
self.assertTrue(torch.equal(m.sharded_tensor1, module_load.sharded_tensor1))
- self.assertTrue(
- torch.equal(
- m.submodule.sharded_tensor2, module_load.submodule.sharded_tensor2
- )
- )
+ self.assertTrue(torch.equal(m.submodule.sharded_tensor2, module_load.submodule.sharded_tensor2))
@with_comms
@skip_if_lt_x_gpu(4)
@@ -1292,11 +1207,7 @@ class TestShardedTensorChunked(ShardedTensorTestBase):
# Verify after load.
self.assertTrue(torch.equal(m.sharded_tensor1, module_load.sharded_tensor1))
- self.assertTrue(
- torch.equal(
- m.submodule.sharded_tensor2, module_load.submodule.sharded_tensor2
- )
- )
+ self.assertTrue(torch.equal(m.submodule.sharded_tensor2, module_load.submodule.sharded_tensor2))
@with_comms
@skip_if_lt_x_gpu(4)
@@ -1357,21 +1268,17 @@ class TestShardedTensorChunked(ShardedTensorTestBase):
buffer.seek(0)
if self.rank != 0:
- with self.assertRaisesRegex(RuntimeError, "Local rank at save time was"):
+ with self.assertRaisesRegex(RuntimeError, 'Local rank at save time was'):
with load_with_process_group(pg):
state_dict_deser = torch.load(buffer)
else:
- with self.assertRaisesRegex(
- RuntimeError, "Local world size at save time was"
- ):
+ with self.assertRaisesRegex(RuntimeError, 'Local world size at save time was'):
with load_with_process_group(pg):
state_dict_deser = torch.load(buffer)
dist.destroy_process_group()
buffer.seek(0)
- with self.assertRaisesRegex(
- RuntimeError, "Need to initialize default process group"
- ):
+ with self.assertRaisesRegex(RuntimeError, 'Need to initialize default process group'):
state_dict_deser = torch.load(buffer)
rpc.shutdown()
@@ -1379,6 +1286,7 @@ class TestShardedTensorChunked(ShardedTensorTestBase):
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_cleanup(self):
+
def create_tensors():
spec = ChunkShardingSpec(
dim=0,
@@ -1397,34 +1305,33 @@ class TestShardedTensorChunked(ShardedTensorTestBase):
class TestShardedTensorEnumerable(ShardedTensorTestBase):
+
@with_comms
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_sharded_tensor_metadata(self):
- spec = EnumerableShardingSpec(
- [
- ShardMetadata(
- shard_offsets=[0, 0],
- shard_sizes=[5, 5],
- placement="rank:0/cuda:0",
- ),
- ShardMetadata(
- shard_offsets=[0, 5],
- shard_sizes=[5, 5],
- placement="rank:1/cuda:1",
- ),
- ShardMetadata(
- shard_offsets=[5, 0],
- shard_sizes=[5, 5],
- placement="rank:2/cuda:2",
- ),
- ShardMetadata(
- shard_offsets=[5, 5],
- shard_sizes=[5, 5],
- placement="rank:3/cuda:3",
- ),
- ]
- )
+ spec = EnumerableShardingSpec([
+ ShardMetadata(
+ shard_offsets=[0, 0],
+ shard_sizes=[5, 5],
+ placement="rank:0/cuda:0",
+ ),
+ ShardMetadata(
+ shard_offsets=[0, 5],
+ shard_sizes=[5, 5],
+ placement="rank:1/cuda:1",
+ ),
+ ShardMetadata(
+ shard_offsets=[5, 0],
+ shard_sizes=[5, 5],
+ placement="rank:2/cuda:2",
+ ),
+ ShardMetadata(
+ shard_offsets=[5, 5],
+ shard_sizes=[5, 5],
+ placement="rank:3/cuda:3",
+ )
+ ])
st = sharded_tensor.empty(spec, 10, 10, init_rrefs=True)
st_metadata = st.metadata()
@@ -1442,30 +1349,28 @@ class TestShardedTensorEnumerable(ShardedTensorTestBase):
self.assertEqual(torch.double, st.dtype)
# Need CPU for pin_memory
- spec = EnumerableShardingSpec(
- [
- ShardMetadata(
- shard_offsets=[0, 0],
- shard_sizes=[5, 5],
- placement="rank:0/cpu",
- ),
- ShardMetadata(
- shard_offsets=[0, 5],
- shard_sizes=[5, 5],
- placement="rank:1/cpu",
- ),
- ShardMetadata(
- shard_offsets=[5, 0],
- shard_sizes=[5, 5],
- placement="rank:2/cpu",
- ),
- ShardMetadata(
- shard_offsets=[5, 5],
- shard_sizes=[5, 5],
- placement="rank:3/cpu",
- ),
- ]
- )
+ spec = EnumerableShardingSpec([
+ ShardMetadata(
+ shard_offsets=[0, 0],
+ shard_sizes=[5, 5],
+ placement="rank:0/cpu",
+ ),
+ ShardMetadata(
+ shard_offsets=[0, 5],
+ shard_sizes=[5, 5],
+ placement="rank:1/cpu",
+ ),
+ ShardMetadata(
+ shard_offsets=[5, 0],
+ shard_sizes=[5, 5],
+ placement="rank:2/cpu",
+ ),
+ ShardMetadata(
+ shard_offsets=[5, 5],
+ shard_sizes=[5, 5],
+ placement="rank:3/cpu",
+ )
+ ])
st = sharded_tensor.empty(spec, 10, 10, pin_memory=True, init_rrefs=True)
self.assertTrue(st.is_pinned())
@@ -1474,30 +1379,29 @@ class TestShardedTensorEnumerable(ShardedTensorTestBase):
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_grid_sharding(self):
- spec = EnumerableShardingSpec(
- [
- ShardMetadata(
- shard_offsets=[0, 0],
- shard_sizes=[5, 5],
- placement="rank:0/cuda:0",
- ),
- ShardMetadata(
- shard_offsets=[0, 5],
- shard_sizes=[5, 5],
- placement="rank:1/cuda:1",
- ),
- ShardMetadata(
- shard_offsets=[5, 0],
- shard_sizes=[5, 5],
- placement="rank:2/cuda:2",
- ),
- ShardMetadata(
- shard_offsets=[5, 5],
- shard_sizes=[5, 5],
- placement="rank:3/cuda:3",
- ),
- ]
- )
+
+ spec = EnumerableShardingSpec([
+ ShardMetadata(
+ shard_offsets=[0, 0],
+ shard_sizes=[5, 5],
+ placement="rank:0/cuda:0",
+ ),
+ ShardMetadata(
+ shard_offsets=[0, 5],
+ shard_sizes=[5, 5],
+ placement="rank:1/cuda:1",
+ ),
+ ShardMetadata(
+ shard_offsets=[5, 0],
+ shard_sizes=[5, 5],
+ placement="rank:2/cuda:2",
+ ),
+ ShardMetadata(
+ shard_offsets=[5, 5],
+ shard_sizes=[5, 5],
+ placement="rank:3/cuda:3",
+ )
+ ])
st = sharded_tensor.empty(spec, 10, 10, init_rrefs=True)
self.assertEqual((10, 10), st.size())
@@ -1505,29 +1409,22 @@ class TestShardedTensorEnumerable(ShardedTensorTestBase):
# Verify local shard.
local_shard = st.local_shards()[0]
- self.assertEqual(torch.device(f"cuda:{self.rank}"), local_shard.tensor.device)
+ self.assertEqual(torch.device(f'cuda:{self.rank}'), local_shard.tensor.device)
self.assertEqual((5, 5), local_shard.tensor.size())
# Verify local shard metadata.
- self.assertEqual(
- (self.rank // 2 * 5, (self.rank % 2) * 5),
- local_shard.metadata.shard_offsets,
- )
+ self.assertEqual((self.rank // 2 * 5, (self.rank % 2) * 5), local_shard.metadata.shard_offsets)
self.assertEqual((5, 5), local_shard.metadata.shard_sizes)
- self.assertEqual(
- f"rank:{self.rank}/cuda:{self.rank}", str(local_shard.metadata.placement)
- )
+ self.assertEqual(f'rank:{self.rank}/cuda:{self.rank}', str(local_shard.metadata.placement))
# Verify global metadata.
st_metadata = st.metadata()
shards_metadata = st_metadata.shards_metadata
self.assertEqual(4, len(shards_metadata))
for rank, shard_metadata in enumerate(shards_metadata):
- self.assertEqual(
- (rank // 2 * 5, (rank % 2) * 5), shard_metadata.shard_offsets
- )
+ self.assertEqual((rank // 2 * 5, (rank % 2) * 5), shard_metadata.shard_offsets)
self.assertEqual((5, 5), shard_metadata.shard_sizes)
- self.assertEqual(f"rank:{rank}/cuda:{rank}", str(shard_metadata.placement))
+ self.assertEqual(f'rank:{rank}/cuda:{rank}', str(shard_metadata.placement))
# Validate remote shards.
remote_shards = st.remote_shards()
@@ -1544,32 +1441,30 @@ class TestShardedTensorEnumerable(ShardedTensorTestBase):
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_create_sharded_tensor_with_ones(self):
- """Test sharded_tensor.ones(...)"""
+ """ Test sharded_tensor.ones(...) """
- spec = EnumerableShardingSpec(
- [
- ShardMetadata(
- shard_offsets=[0, 0],
- shard_sizes=[5, 5],
- placement="rank:0/cuda:0",
- ),
- ShardMetadata(
- shard_offsets=[0, 5],
- shard_sizes=[5, 5],
- placement="rank:1/cuda:1",
- ),
- ShardMetadata(
- shard_offsets=[5, 0],
- shard_sizes=[5, 5],
- placement="rank:2/cuda:2",
- ),
- ShardMetadata(
- shard_offsets=[5, 5],
- shard_sizes=[5, 5],
- placement="rank:3/cuda:3",
- ),
- ]
- )
+ spec = EnumerableShardingSpec([
+ ShardMetadata(
+ shard_offsets=[0, 0],
+ shard_sizes=[5, 5],
+ placement="rank:0/cuda:0",
+ ),
+ ShardMetadata(
+ shard_offsets=[0, 5],
+ shard_sizes=[5, 5],
+ placement="rank:1/cuda:1",
+ ),
+ ShardMetadata(
+ shard_offsets=[5, 0],
+ shard_sizes=[5, 5],
+ placement="rank:2/cuda:2",
+ ),
+ ShardMetadata(
+ shard_offsets=[5, 5],
+ shard_sizes=[5, 5],
+ placement="rank:3/cuda:3",
+ )
+ ])
st = sharded_tensor.ones(spec, 10, 10, init_rrefs=True)
self.assertEqual((10, 10), st.size())
@@ -1577,7 +1472,7 @@ class TestShardedTensorEnumerable(ShardedTensorTestBase):
# Verify local shard is initialized with torch.ones
local_shard = st.local_shards()[0]
- self.assertEqual(torch.device(f"cuda:{self.rank}"), local_shard.tensor.device)
+ self.assertEqual(torch.device(f'cuda:{self.rank}'), local_shard.tensor.device)
self.assertEqual((5, 5), local_shard.tensor.size())
self.assertEqual(local_shard.tensor, torch.ones(5, 5))
@@ -1585,32 +1480,30 @@ class TestShardedTensorEnumerable(ShardedTensorTestBase):
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_gather_even(self) -> None:
- """Test _sharded_tensor.gather(...) with evenly distributed._shards"""
+ """ Test _sharded_tensor.gather(...) with evenly distributed._shards"""
- spec = EnumerableShardingSpec(
- [
- ShardMetadata(
- shard_offsets=[0, 0],
- shard_sizes=[5, 5],
- placement="rank:0/cuda:0",
- ),
- ShardMetadata(
- shard_offsets=[0, 5],
- shard_sizes=[5, 5],
- placement="rank:1/cuda:1",
- ),
- ShardMetadata(
- shard_offsets=[5, 0],
- shard_sizes=[5, 5],
- placement="rank:2/cuda:2",
- ),
- ShardMetadata(
- shard_offsets=[5, 5],
- shard_sizes=[5, 5],
- placement="rank:3/cuda:3",
- ),
- ]
- )
+ spec = EnumerableShardingSpec([
+ ShardMetadata(
+ shard_offsets=[0, 0],
+ shard_sizes=[5, 5],
+ placement="rank:0/cuda:0",
+ ),
+ ShardMetadata(
+ shard_offsets=[0, 5],
+ shard_sizes=[5, 5],
+ placement="rank:1/cuda:1",
+ ),
+ ShardMetadata(
+ shard_offsets=[5, 0],
+ shard_sizes=[5, 5],
+ placement="rank:2/cuda:2",
+ ),
+ ShardMetadata(
+ shard_offsets=[5, 5],
+ shard_sizes=[5, 5],
+ placement="rank:3/cuda:3",
+ )
+ ])
h, w = 10, 10
st = sharded_tensor.ones(spec, h, w, init_rrefs=True)
@@ -1618,7 +1511,11 @@ class TestShardedTensorEnumerable(ShardedTensorTestBase):
full_tensor = None
dst = 0
if self.rank == dst:
- full_tensor = torch.zeros(h, w, device=torch.device(f"cuda:{dst}"))
+ full_tensor = torch.zeros(
+ h,
+ w,
+ device=torch.device(f"cuda:{dst}")
+ )
st.gather(dst, full_tensor)
if self.rank == dst:
@@ -1630,32 +1527,30 @@ class TestShardedTensorEnumerable(ShardedTensorTestBase):
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_gather_uneven(self) -> None:
- """Test _sharded_tensor.gather(...) with unevenly distributed._shards"""
+ """ Test _sharded_tensor.gather(...) with unevenly distributed._shards"""
- spec = EnumerableShardingSpec(
- [
- ShardMetadata(
- shard_offsets=[0, 0],
- shard_sizes=[5, 5],
- placement="rank:0/cuda:0",
- ),
- ShardMetadata(
- shard_offsets=[0, 5],
- shard_sizes=[5, 5],
- placement="rank:1/cuda:1",
- ),
- ShardMetadata(
- shard_offsets=[5, 0],
- shard_sizes=[5, 5],
- placement="rank:0/cuda:0",
- ),
- ShardMetadata(
- shard_offsets=[5, 5],
- shard_sizes=[5, 5],
- placement="rank:3/cuda:3",
- ),
- ]
- )
+ spec = EnumerableShardingSpec([
+ ShardMetadata(
+ shard_offsets=[0, 0],
+ shard_sizes=[5, 5],
+ placement="rank:0/cuda:0",
+ ),
+ ShardMetadata(
+ shard_offsets=[0, 5],
+ shard_sizes=[5, 5],
+ placement="rank:1/cuda:1",
+ ),
+ ShardMetadata(
+ shard_offsets=[5, 0],
+ shard_sizes=[5, 5],
+ placement="rank:0/cuda:0",
+ ),
+ ShardMetadata(
+ shard_offsets=[5, 5],
+ shard_sizes=[5, 5],
+ placement="rank:3/cuda:3",
+ )
+ ])
h, w = 10, 10
st = sharded_tensor.ones(spec, h, w, init_rrefs=True)
@@ -1663,7 +1558,11 @@ class TestShardedTensorEnumerable(ShardedTensorTestBase):
full_tensor = None
dst = 0
if self.rank == dst:
- full_tensor = torch.zeros(h, w, device=torch.device(f"cuda:{dst}"))
+ full_tensor = torch.zeros(
+ h,
+ w,
+ device=torch.device(f"cuda:{dst}")
+ )
st.gather(dst, full_tensor)
if self.rank == dst:
@@ -1714,9 +1613,7 @@ class TestShardedTensorEnumerable(ShardedTensorTestBase):
self.assertIsInstance(new_st._process_group, distributed_c10d.ProcessGroup)
# test specs before and after the move almost the same except placement device
self.assertEqual(spec_before_move.dim, spec_after_move.dim)
- self.assertEqual(
- len(spec_before_move.placements), len(spec_after_move.placements)
- )
+ self.assertEqual(len(spec_before_move.placements), len(spec_after_move.placements))
for i, remote_device_after in enumerate(spec_after_move.placements):
remote_device_before = spec_before_move.placements[i]
self.assertEqual(remote_device_before.rank(), remote_device_after.rank())
@@ -1800,9 +1697,7 @@ class TestShardedTensorEnumerable(ShardedTensorTestBase):
self.assertIsInstance(spec_after_move, ChunkShardingSpec)
# test specs before and after the move almost the same except placement device
self.assertEqual(spec_before_move.dim, spec_after_move.dim)
- self.assertEqual(
- len(spec_before_move.placements), len(spec_after_move.placements)
- )
+ self.assertEqual(len(spec_before_move.placements), len(spec_after_move.placements))
for i, remote_device_after in enumerate(spec_after_move.placements):
remote_device_before = spec_before_move.placements[i]
self.assertEqual(remote_device_before.rank(), remote_device_after.rank())
@@ -1917,30 +1812,28 @@ class TestShardedTensorEnumerable(ShardedTensorTestBase):
def test_uneven_shards(self):
self.init_pg()
- spec = EnumerableShardingSpec(
- [
- ShardMetadata(
- shard_offsets=[0, 0],
- shard_sizes=[2, 4],
- placement="rank:0/cuda:0",
- ),
- ShardMetadata(
- shard_offsets=[0, 4],
- shard_sizes=[4, 2],
- placement="rank:1/cuda:1",
- ),
- ShardMetadata(
- shard_offsets=[2, 0],
- shard_sizes=[4, 4],
- placement="rank:2/cuda:2",
- ),
- ShardMetadata(
- shard_offsets=[4, 4],
- shard_sizes=[2, 2],
- placement="rank:3/cuda:3",
- ),
- ]
- )
+ spec = EnumerableShardingSpec([
+ ShardMetadata(
+ shard_offsets=[0, 0],
+ shard_sizes=[2, 4],
+ placement="rank:0/cuda:0",
+ ),
+ ShardMetadata(
+ shard_offsets=[0, 4],
+ shard_sizes=[4, 2],
+ placement="rank:1/cuda:1",
+ ),
+ ShardMetadata(
+ shard_offsets=[2, 0],
+ shard_sizes=[4, 4],
+ placement="rank:2/cuda:2",
+ ),
+ ShardMetadata(
+ shard_offsets=[4, 4],
+ shard_sizes=[2, 2],
+ placement="rank:3/cuda:3",
+ ),
+ ])
st = sharded_tensor.empty(spec, 6, 6)
self.assertEqual((6, 6), st.size())
@@ -1968,15 +1861,13 @@ class TestShardedTensorEnumerable(ShardedTensorTestBase):
# Verify local shard.
local_shard = st.local_shards()[0]
- self.assertEqual(torch.device(f"cuda:{self.rank}"), local_shard.tensor.device)
+ self.assertEqual(torch.device(f'cuda:{self.rank}'), local_shard.tensor.device)
verify_size(self.rank, local_shard.tensor.size())
# Verify local shard metadata.
verify_offsets(self.rank, local_shard.metadata.shard_offsets)
verify_size(self.rank, local_shard.metadata.shard_sizes)
- self.assertEqual(
- f"rank:{self.rank}/cuda:{self.rank}", str(local_shard.metadata.placement)
- )
+ self.assertEqual(f'rank:{self.rank}/cuda:{self.rank}', str(local_shard.metadata.placement))
# Verify global metadata.
st_metadata = st.metadata()
@@ -1985,26 +1876,24 @@ class TestShardedTensorEnumerable(ShardedTensorTestBase):
for rank, shard_metadata in enumerate(shards_metadata):
verify_offsets(rank, shard_metadata.shard_offsets)
verify_size(rank, shard_metadata.shard_sizes)
- self.assertEqual(f"rank:{rank}/cuda:{rank}", str(shard_metadata.placement))
+ self.assertEqual(f'rank:{rank}/cuda:{rank}', str(shard_metadata.placement))
@with_comms
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_partial_world_size(self):
- spec = EnumerableShardingSpec(
- [
- ShardMetadata(
- shard_offsets=[0, 0],
- shard_sizes=[5, 5],
- placement="rank:0/cuda:0",
- ),
- ShardMetadata(
- shard_offsets=[5, 0],
- shard_sizes=[5, 5],
- placement="rank:1/cuda:1",
- ),
- ]
- )
+ spec = EnumerableShardingSpec([
+ ShardMetadata(
+ shard_offsets=[0, 0],
+ shard_sizes=[5, 5],
+ placement="rank:0/cuda:0",
+ ),
+ ShardMetadata(
+ shard_offsets=[5, 0],
+ shard_sizes=[5, 5],
+ placement="rank:1/cuda:1",
+ ),
+ ])
st = sharded_tensor.empty(spec, 10, 5, init_rrefs=True)
self.assertEqual((10, 5), st.size())
@@ -2016,18 +1905,13 @@ class TestShardedTensorEnumerable(ShardedTensorTestBase):
if self.rank <= 1:
# Verify local shard.
local_shard = st.local_shards()[0]
- self.assertEqual(
- torch.device(f"cuda:{self.rank}"), local_shard.tensor.device
- )
+ self.assertEqual(torch.device(f'cuda:{self.rank}'), local_shard.tensor.device)
self.assertEqual((5, 5), local_shard.tensor.size())
# Verify local shard metadata.
self.assertEqual((self.rank * 5, 0), local_shard.metadata.shard_offsets)
self.assertEqual((5, 5), local_shard.metadata.shard_sizes)
- self.assertEqual(
- f"rank:{self.rank}/cuda:{self.rank}",
- str(local_shard.metadata.placement),
- )
+ self.assertEqual(f'rank:{self.rank}/cuda:{self.rank}', str(local_shard.metadata.placement))
# Verify global metadata.
st_metadata = st.metadata()
@@ -2036,7 +1920,7 @@ class TestShardedTensorEnumerable(ShardedTensorTestBase):
for rank, shard_metadata in enumerate(shards_metadata):
self.assertEqual((rank * 5, 0), shard_metadata.shard_offsets)
self.assertEqual((5, 5), shard_metadata.shard_sizes)
- self.assertEqual(f"rank:{rank}/cuda:{rank}", str(shard_metadata.placement))
+ self.assertEqual(f'rank:{rank}/cuda:{rank}', str(shard_metadata.placement))
# Validate remote shards.
remote_shards = st.remote_shards()
@@ -2057,20 +1941,18 @@ class TestShardedTensorEnumerable(ShardedTensorTestBase):
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_new_group(self):
- spec = EnumerableShardingSpec(
- [
- ShardMetadata(
- shard_offsets=[0, 0],
- shard_sizes=[5, 5],
- placement="rank:0/cuda:1",
- ),
- ShardMetadata(
- shard_offsets=[5, 0],
- shard_sizes=[5, 5],
- placement="rank:2/cuda:3",
- ),
- ]
- )
+ spec = EnumerableShardingSpec([
+ ShardMetadata(
+ shard_offsets=[0, 0],
+ shard_sizes=[5, 5],
+ placement="rank:0/cuda:1",
+ ),
+ ShardMetadata(
+ shard_offsets=[5, 0],
+ shard_sizes=[5, 5],
+ placement="rank:2/cuda:3",
+ ),
+ ])
pg = dist.new_group(ranks=[1, 2, 3])
@@ -2079,20 +1961,13 @@ class TestShardedTensorEnumerable(ShardedTensorTestBase):
if self.rank == 1 or self.rank == 3:
# Verify local shard.
local_shard = st.local_shards()[0]
- self.assertEqual(
- torch.device(f"cuda:{self.rank}"), local_shard.tensor.device
- )
+ self.assertEqual(torch.device(f'cuda:{self.rank}'), local_shard.tensor.device)
self.assertEqual((5, 5), local_shard.tensor.size())
# Verify local shard metadata.
- self.assertEqual(
- (self.rank // 2 * 5, 0), local_shard.metadata.shard_offsets
- )
+ self.assertEqual((self.rank // 2 * 5, 0), local_shard.metadata.shard_offsets)
self.assertEqual((5, 5), local_shard.metadata.shard_sizes)
- self.assertEqual(
- f"rank:{self.rank - 1}/cuda:{self.rank}",
- str(local_shard.metadata.placement),
- )
+ self.assertEqual(f'rank:{self.rank - 1}/cuda:{self.rank}', str(local_shard.metadata.placement))
# Verify global metadata.
st_metadata = st.metadata()
@@ -2101,9 +1976,7 @@ class TestShardedTensorEnumerable(ShardedTensorTestBase):
for rank, shard_metadata in enumerate(shards_metadata):
self.assertEqual((rank * 5, 0), shard_metadata.shard_offsets)
self.assertEqual((5, 5), shard_metadata.shard_sizes)
- self.assertEqual(
- f"rank:{rank * 2}/cuda:{rank * 2 + 1}", str(shard_metadata.placement)
- )
+ self.assertEqual(f'rank:{rank * 2}/cuda:{rank * 2 + 1}', str(shard_metadata.placement))
# Validate remote shards.
remote_shards = st.remote_shards()
@@ -2125,30 +1998,28 @@ class TestShardedTensorEnumerable(ShardedTensorTestBase):
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_multiple_local_shards(self):
- spec = EnumerableShardingSpec(
- [
- ShardMetadata(
- shard_offsets=[0, 0],
- shard_sizes=[5, 5],
- placement="rank:0/cuda:0",
- ),
- ShardMetadata(
- shard_offsets=[0, 5],
- shard_sizes=[5, 5],
- placement="rank:1/cuda:1",
- ),
- ShardMetadata(
- shard_offsets=[5, 0],
- shard_sizes=[5, 5],
- placement="rank:0/cuda:0",
- ),
- ShardMetadata(
- shard_offsets=[5, 5],
- shard_sizes=[5, 5],
- placement="rank:1/cuda:1",
- ),
- ]
- )
+ spec = EnumerableShardingSpec([
+ ShardMetadata(
+ shard_offsets=[0, 0],
+ shard_sizes=[5, 5],
+ placement="rank:0/cuda:0",
+ ),
+ ShardMetadata(
+ shard_offsets=[0, 5],
+ shard_sizes=[5, 5],
+ placement="rank:1/cuda:1",
+ ),
+ ShardMetadata(
+ shard_offsets=[5, 0],
+ shard_sizes=[5, 5],
+ placement="rank:0/cuda:0",
+ ),
+ ShardMetadata(
+ shard_offsets=[5, 5],
+ shard_sizes=[5, 5],
+ placement="rank:1/cuda:1",
+ )
+ ])
st = sharded_tensor.empty(spec, 10, 10, init_rrefs=True)
self.assertEqual((10, 10), st.size())
@@ -2158,20 +2029,13 @@ class TestShardedTensorEnumerable(ShardedTensorTestBase):
# Verify local shards.
for idx, local_shard in enumerate(st.local_shards()):
- self.assertEqual(
- torch.device(f"cuda:{self.rank}"), local_shard.tensor.device
- )
+ self.assertEqual(torch.device(f'cuda:{self.rank}'), local_shard.tensor.device)
self.assertEqual((5, 5), local_shard.tensor.size())
# Verify local shard metadata.
- self.assertEqual(
- (idx * 5, self.rank * 5), local_shard.metadata.shard_offsets
- )
+ self.assertEqual((idx * 5, self.rank * 5), local_shard.metadata.shard_offsets)
self.assertEqual((5, 5), local_shard.metadata.shard_sizes)
- self.assertEqual(
- f"rank:{self.rank}/cuda:{self.rank}",
- str(local_shard.metadata.placement),
- )
+ self.assertEqual(f'rank:{self.rank}/cuda:{self.rank}', str(local_shard.metadata.placement))
else:
self.assertEqual(0, len(st.local_shards()))
@@ -2180,15 +2044,9 @@ class TestShardedTensorEnumerable(ShardedTensorTestBase):
shards_metadata = st_metadata.shards_metadata
self.assertEqual(4, len(shards_metadata))
for shard_rank, shard_metadata in enumerate(shards_metadata):
- self.assertEqual(
- (shard_rank // 2 * 5, (shard_rank % 2) * 5),
- shard_metadata.shard_offsets,
- )
+ self.assertEqual((shard_rank // 2 * 5, (shard_rank % 2) * 5), shard_metadata.shard_offsets)
self.assertEqual((5, 5), shard_metadata.shard_sizes)
- self.assertEqual(
- f"rank:{shard_rank % 2}/cuda:{shard_rank % 2}",
- str(shard_metadata.placement),
- )
+ self.assertEqual(f'rank:{shard_rank % 2}/cuda:{shard_rank % 2}', str(shard_metadata.placement))
# Validate remote shards.
remote_shards = st.remote_shards()
@@ -2209,30 +2067,28 @@ class TestShardedTensorEnumerable(ShardedTensorTestBase):
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_with_rpc_names(self):
- spec = EnumerableShardingSpec(
- [
- ShardMetadata(
- shard_offsets=[0, 0],
- shard_sizes=[5, 5],
- placement="worker0/cuda:0",
- ),
- ShardMetadata(
- shard_offsets=[0, 5],
- shard_sizes=[5, 5],
- placement="worker1/cuda:1",
- ),
- ShardMetadata(
- shard_offsets=[5, 0],
- shard_sizes=[5, 5],
- placement="worker2/cuda:2",
- ),
- ShardMetadata(
- shard_offsets=[5, 5],
- shard_sizes=[5, 5],
- placement="worker3/cuda:3",
- ),
- ]
- )
+ spec = EnumerableShardingSpec([
+ ShardMetadata(
+ shard_offsets=[0, 0],
+ shard_sizes=[5, 5],
+ placement="worker0/cuda:0",
+ ),
+ ShardMetadata(
+ shard_offsets=[0, 5],
+ shard_sizes=[5, 5],
+ placement="worker1/cuda:1",
+ ),
+ ShardMetadata(
+ shard_offsets=[5, 0],
+ shard_sizes=[5, 5],
+ placement="worker2/cuda:2",
+ ),
+ ShardMetadata(
+ shard_offsets=[5, 5],
+ shard_sizes=[5, 5],
+ placement="worker3/cuda:3",
+ )
+ ])
st = sharded_tensor.empty(spec, 10, 10, init_rrefs=True)
self.assertEqual((10, 10), st.size())
@@ -2240,29 +2096,22 @@ class TestShardedTensorEnumerable(ShardedTensorTestBase):
# Verify local shard.
local_shard = st.local_shards()[0]
- self.assertEqual(torch.device(f"cuda:{self.rank}"), local_shard.tensor.device)
+ self.assertEqual(torch.device(f'cuda:{self.rank}'), local_shard.tensor.device)
self.assertEqual((5, 5), local_shard.tensor.size())
# Verify local shard metadata.
- self.assertEqual(
- (self.rank // 2 * 5, (self.rank % 2) * 5),
- local_shard.metadata.shard_offsets,
- )
+ self.assertEqual((self.rank // 2 * 5, (self.rank % 2) * 5), local_shard.metadata.shard_offsets)
self.assertEqual((5, 5), local_shard.metadata.shard_sizes)
- self.assertEqual(
- f"worker{self.rank}/cuda:{self.rank}", str(local_shard.metadata.placement)
- )
+ self.assertEqual(f'worker{self.rank}/cuda:{self.rank}', str(local_shard.metadata.placement))
# Verify global metadata.
st_metadata = st.metadata()
shards_metadata = st_metadata.shards_metadata
self.assertEqual(4, len(shards_metadata))
for rank, shard_metadata in enumerate(shards_metadata):
- self.assertEqual(
- (rank // 2 * 5, (rank % 2) * 5), shard_metadata.shard_offsets
- )
+ self.assertEqual((rank // 2 * 5, (rank % 2) * 5), shard_metadata.shard_offsets)
self.assertEqual((5, 5), shard_metadata.shard_sizes)
- self.assertEqual(f"worker{rank}/cuda:{rank}", str(shard_metadata.placement))
+ self.assertEqual(f'worker{rank}/cuda:{rank}', str(shard_metadata.placement))
# Validate remote shards.
remote_shards = st.remote_shards()
@@ -2285,9 +2134,7 @@ class TestShardedTensorFromLocalTensor(ShardedTensorTestBase):
local_shard_metadata = None
rank_to_metadata = {}
for shard_metadata in tensor_meta.shards_metadata:
- rank, device = _parse_and_validate_remote_device(
- pg, shard_metadata.placement
- )
+ rank, device = _parse_and_validate_remote_device(pg, shard_metadata.placement)
rank_to_metadata[rank] = shard_metadata
if rank == self.rank:
local_tensor = torch.rand(shard_metadata.shard_sizes).cuda(device)
@@ -2369,7 +2216,9 @@ class TestShardedTensorFromLocalTensor(ShardedTensorTestBase):
)
st_size = [24, 12]
local_tensor = torch.rand(*st_size).cuda(self.rank)
- with self.assertRaisesRegex(ValueError, "do not cover the entire tensor"):
+ with self.assertRaisesRegex(
+ ValueError, "do not cover the entire tensor"
+ ):
ShardedTensor._init_from_local_tensor(
local_tensor,
enumerable_sharding_spec,
@@ -2387,6 +2236,7 @@ class TestShardedTensorFromLocalTensor(ShardedTensorTestBase):
class TestShardedTensorFromLocalShards(ShardedTensorTestBase):
+
@with_comms(init_rpc=False)
@skip_if_lt_x_gpu(4)
@requires_nccl()
@@ -2395,22 +2245,24 @@ class TestShardedTensorFromLocalShards(ShardedTensorTestBase):
local_shard_metadata = ShardMetadata(
shard_offsets=shard_offsets,
shard_sizes=[5, 5],
- placement=f"rank:{self.rank}/cuda:{self.rank}",
+ placement=f"rank:{self.rank}/cuda:{self.rank}"
)
local_tensor = torch.randn(5, 5, device=f"cuda:{self.rank}")
local_shard = sharded_tensor.Shard(local_tensor, local_shard_metadata)
local_shard_from_offsets = sharded_tensor.Shard.from_tensor_and_offsets(
- local_tensor, shard_offsets=shard_offsets, rank=self.rank
+ local_tensor,
+ shard_offsets=shard_offsets,
+ rank=self.rank
)
self.assertEqual(local_shard.metadata, local_shard_from_offsets.metadata)
wrong_local_shard_metadata = ShardMetadata(
shard_offsets=shard_offsets,
shard_sizes=[6, 5],
- placement=f"rank:{self.rank}/cuda:{self.rank}",
+ placement=f"rank:{self.rank}/cuda:{self.rank}"
)
- with self.assertRaisesRegex(ValueError, "Shard tensor size does not match"):
+ with self.assertRaisesRegex(ValueError, 'Shard tensor size does not match'):
local_shard_from_wrong_meta = sharded_tensor.Shard(
local_tensor,
metadata=wrong_local_shard_metadata,
@@ -2423,45 +2275,32 @@ class TestShardedTensorFromLocalShards(ShardedTensorTestBase):
local_shard_metadata = ShardMetadata(
shard_offsets=[(self.rank // 2) * 5, (self.rank % 2) * 5],
shard_sizes=[5, 5],
- placement=f"rank:{self.rank}/cuda:{self.rank}",
+ placement=f"rank:{self.rank}/cuda:{self.rank}"
)
- local_shards = [
- sharded_tensor.Shard(
- torch.randn(5, 5, device=f"cuda:{self.rank}"), local_shard_metadata
- )
- ]
+ local_shards = [sharded_tensor.Shard(torch.randn(5, 5, device=f"cuda:{self.rank}"), local_shard_metadata)]
- st = sharded_tensor.init_from_local_shards(
- local_shards, [10, 10], init_rrefs=True
- )
+ st = sharded_tensor.init_from_local_shards(local_shards, [10, 10], init_rrefs=True)
self.assertEqual((10, 10), st.size())
self.assertEqual(1, len(st.local_shards()))
# Verify local shard.
local_shard = st.local_shards()[0]
- self.assertEqual(torch.device(f"cuda:{self.rank}"), local_shard.tensor.device)
+ self.assertEqual(torch.device(f'cuda:{self.rank}'), local_shard.tensor.device)
self.assertEqual((5, 5), local_shard.tensor.size())
# Verify local shard metadata.
- self.assertEqual(
- (self.rank // 2 * 5, (self.rank % 2) * 5),
- local_shard.metadata.shard_offsets,
- )
+ self.assertEqual((self.rank // 2 * 5, (self.rank % 2) * 5), local_shard.metadata.shard_offsets)
self.assertEqual((5, 5), local_shard.metadata.shard_sizes)
- self.assertEqual(
- f"rank:{self.rank}/cuda:{self.rank}", str(local_shard.metadata.placement)
- )
+ self.assertEqual(f'rank:{self.rank}/cuda:{self.rank}', str(local_shard.metadata.placement))
# Verify global metadata.
shards_metadata = st.metadata().shards_metadata
self.assertEqual(4, len(shards_metadata))
for rank, shard_metadata in enumerate(shards_metadata):
- self.assertEqual(
- (rank // 2 * 5, (rank % 2) * 5), shard_metadata.shard_offsets
- )
+ self.assertEqual((rank // 2 * 5, (rank % 2) * 5), shard_metadata.shard_offsets)
self.assertEqual((5, 5), shard_metadata.shard_sizes)
- self.assertEqual(f"rank:{rank}/cuda:{rank}", str(shard_metadata.placement))
+ self.assertEqual(f'rank:{rank}/cuda:{rank}', str(shard_metadata.placement))
# Validate remote shards.
remote_shards = st.remote_shards()
@@ -2541,7 +2380,7 @@ class TestShardedTensorFromLocalShards(ShardedTensorTestBase):
local_shard_metadata = ShardMetadata(
shard_offsets=[(self.rank // 2) * 5, (self.rank % 2) * 5],
shard_sizes=[5, 5],
- placement=f"rank:{self.rank}/cuda:{self.rank}",
+ placement=f"rank:{self.rank}/cuda:{self.rank}"
)
shards_metadata = []
@@ -2549,19 +2388,13 @@ class TestShardedTensorFromLocalShards(ShardedTensorTestBase):
if r == self.rank:
shards_metadata.append(local_shard_metadata)
else:
- shards_metadata.append(
- ShardMetadata(
- shard_offsets=[(r // 2) * 5, (r % 2) * 5],
- shard_sizes=[5, 5],
- placement=f"rank:{r}/cuda:{r}",
- )
- )
+ shards_metadata.append(ShardMetadata(
+ shard_offsets=[(r // 2) * 5, (r % 2) * 5],
+ shard_sizes=[5, 5],
+ placement=f"rank:{r}/cuda:{r}"
+ ))
- local_shards = [
- sharded_tensor.Shard(
- torch.randn(5, 5, device=f"cuda:{self.rank}"), local_shard_metadata
- )
- ]
+ local_shards = [sharded_tensor.Shard(torch.randn(5, 5, device=f"cuda:{self.rank}"), local_shard_metadata)]
tensor_properties = TensorProperties(
dtype=torch.get_default_dtype(),
@@ -2587,28 +2420,21 @@ class TestShardedTensorFromLocalShards(ShardedTensorTestBase):
# Verify local shard.
local_shard = st.local_shards()[0]
- self.assertEqual(torch.device(f"cuda:{self.rank}"), local_shard.tensor.device)
+ self.assertEqual(torch.device(f'cuda:{self.rank}'), local_shard.tensor.device)
self.assertEqual((5, 5), local_shard.tensor.size())
# Verify local shard metadata.
- self.assertEqual(
- (self.rank // 2 * 5, (self.rank % 2) * 5),
- local_shard.metadata.shard_offsets,
- )
+ self.assertEqual((self.rank // 2 * 5, (self.rank % 2) * 5), local_shard.metadata.shard_offsets)
self.assertEqual((5, 5), local_shard.metadata.shard_sizes)
- self.assertEqual(
- f"rank:{self.rank}/cuda:{self.rank}", str(local_shard.metadata.placement)
- )
+ self.assertEqual(f'rank:{self.rank}/cuda:{self.rank}', str(local_shard.metadata.placement))
# Verify global metadata.
shards_metadata = st.metadata().shards_metadata
self.assertEqual(4, len(shards_metadata))
for rank, shard_metadata in enumerate(shards_metadata):
- self.assertEqual(
- (rank // 2 * 5, (rank % 2) * 5), shard_metadata.shard_offsets
- )
+ self.assertEqual((rank // 2 * 5, (rank % 2) * 5), shard_metadata.shard_offsets)
self.assertEqual((5, 5), shard_metadata.shard_sizes)
- self.assertEqual(f"rank:{rank}/cuda:{rank}", str(shard_metadata.placement))
+ self.assertEqual(f'rank:{rank}/cuda:{rank}', str(shard_metadata.placement))
# Validate remote shards.
remote_shards = st.remote_shards()
@@ -2631,34 +2457,21 @@ class TestShardedTensorFromLocalShards(ShardedTensorTestBase):
local_shard_metadata = ShardMetadata(
shard_offsets=[5 * (self.rank - 1), 0],
shard_sizes=[5, 5],
- placement=f"rank:{self.rank - 1}/cuda:{self.rank}",
+ placement=f"rank:{self.rank - 1}/cuda:{self.rank}"
)
- local_shards = [
- sharded_tensor.Shard(
- torch.randn(5, 5, device=f"cuda:{self.rank}"), local_shard_metadata
- )
- ]
+ local_shards = [sharded_tensor.Shard(torch.randn(5, 5, device=f"cuda:{self.rank}"), local_shard_metadata)]
- st = sharded_tensor.init_from_local_shards(
- local_shards, [15, 5], process_group=new_pg
- )
+ st = sharded_tensor.init_from_local_shards(local_shards, [15, 5], process_group=new_pg)
# Verify local shard.
local_shard = st.local_shards()[0]
- self.assertEqual(
- torch.device(f"cuda:{self.rank}"), local_shard.tensor.device
- )
+ self.assertEqual(torch.device(f'cuda:{self.rank}'), local_shard.tensor.device)
self.assertEqual((5, 5), local_shard.tensor.size())
# Verify local shard metadata.
- self.assertEqual(
- ((self.rank - 1) * 5, 0), local_shard.metadata.shard_offsets
- )
+ self.assertEqual(((self.rank - 1) * 5, 0), local_shard.metadata.shard_offsets)
self.assertEqual((5, 5), local_shard.metadata.shard_sizes)
- self.assertEqual(
- f"rank:{self.rank - 1}/cuda:{self.rank}",
- str(local_shard.metadata.placement),
- )
+ self.assertEqual(f'rank:{self.rank - 1}/cuda:{self.rank}', str(local_shard.metadata.placement))
# Verify global metadata.
st_metadata = st.metadata()
@@ -2667,9 +2480,8 @@ class TestShardedTensorFromLocalShards(ShardedTensorTestBase):
for rank, shard_metadata in enumerate(shards_metadata):
self.assertEqual((rank * 5, 0), shard_metadata.shard_offsets)
self.assertEqual((5, 5), shard_metadata.shard_sizes)
- self.assertEqual(
- f"rank:{rank}/cuda:{rank + 1}", str(shard_metadata.placement)
- )
+ self.assertEqual(f'rank:{rank}/cuda:{rank + 1}', str(shard_metadata.placement))
+
@with_comms
@skip_if_lt_x_gpu(4)
@@ -2678,57 +2490,36 @@ class TestShardedTensorFromLocalShards(ShardedTensorTestBase):
local_shard_metadata = ShardMetadata(
shard_offsets=[(self.rank // 2) * 5, (self.rank % 2) * 5],
shard_sizes=[5, 5],
- placement=f"rank:{self.rank}/cuda:{self.rank}",
+ placement=f"rank:{self.rank}/cuda:{self.rank}"
)
indices = [[0, 1, 1], [2, 0, 2]]
values = [3.2, 4.5, 5.8]
- sparse_tensor = torch.sparse_coo_tensor(
- indices, values, (5, 5), device=f"cuda:{self.rank}"
- )
+ sparse_tensor = torch.sparse_coo_tensor(indices, values, (5, 5), device=f"cuda:{self.rank}")
empty_local_shards = []
- with self.assertRaisesRegex(ValueError, "have no local shards on all ranks"):
- st = sharded_tensor.init_from_local_shards(
- empty_local_shards, [10, 10], init_rrefs=True
- )
+ with self.assertRaisesRegex(ValueError, 'have no local shards on all ranks'):
+ st = sharded_tensor.init_from_local_shards(empty_local_shards, [10, 10], init_rrefs=True)
wrong_layout_shards = [
sharded_tensor.Shard(sparse_tensor, local_shard_metadata)
]
- with self.assertRaisesRegex(
- ValueError, "Only torch.strided layout is currently supported"
- ):
+ with self.assertRaisesRegex(ValueError, 'Only torch.strided layout is currently supported'):
st = sharded_tensor.init_from_local_shards(
- wrong_layout_shards, [10, 10], init_rrefs=True
- )
+ wrong_layout_shards, [10, 10], init_rrefs=True)
wrong_memory_format_shards = [
- sharded_tensor.Shard(
- torch.randn(5, 5, device=f"cuda:{self.rank}").t(), local_shard_metadata
- )
+ sharded_tensor.Shard(torch.randn(5, 5, device=f"cuda:{self.rank}").t(), local_shard_metadata)
]
- with self.assertRaisesRegex(
- ValueError,
- "Only torch.contiguous_format memory_format is currently supported",
- ):
+ with self.assertRaisesRegex(ValueError, 'Only torch.contiguous_format memory_format is currently supported'):
st = sharded_tensor.init_from_local_shards(
- wrong_memory_format_shards, [10, 10], init_rrefs=True
- )
+ wrong_memory_format_shards, [10, 10], init_rrefs=True)
- with self.assertRaisesRegex(ValueError, "Shard tensor size does not match"):
- wrong_size_shards = [
- sharded_tensor.Shard(
- torch.randn(2, 3, device=f"cuda:{self.rank}"), local_shard_metadata
- )
- ]
+ with self.assertRaisesRegex(ValueError, 'Shard tensor size does not match'):
+ wrong_size_shards = [sharded_tensor.Shard(torch.randn(2, 3, device=f"cuda:{self.rank}"), local_shard_metadata)]
- with self.assertRaisesRegex(
- ValueError, "Local shard tensor device does not match"
- ):
- wrong_device_shards = [
- sharded_tensor.Shard(torch.randn(5, 5), local_shard_metadata)
- ]
+ with self.assertRaisesRegex(ValueError, "Local shard tensor device does not match"):
+ wrong_device_shards = [sharded_tensor.Shard(torch.randn(5, 5), local_shard_metadata)]
@with_comms
@skip_if_lt_x_gpu(4)
@@ -2737,58 +2528,37 @@ class TestShardedTensorFromLocalShards(ShardedTensorTestBase):
local_shard_metadata = ShardMetadata(
shard_offsets=[(self.rank // 2) * 5, (self.rank % 2) * 5],
shard_sizes=[5, 5],
- placement=f"rank:{self.rank}/cuda:{self.rank}",
+ placement=f"rank:{self.rank}/cuda:{self.rank}"
)
tensor_overall_size = [10, 10] if self.rank == 0 else [10, 5]
wrong_dtype_shards = [
- sharded_tensor.Shard(
- torch.ones(5, 5, device=f"cuda:{self.rank}"), local_shard_metadata
- )
+ sharded_tensor.Shard(torch.ones(5, 5, device=f"cuda:{self.rank}"), local_shard_metadata)
]
- with self.assertRaisesRegex(
- ValueError,
- "ShardedTensor global_size property does not match from different ranks!",
- ):
- st = sharded_tensor.init_from_local_shards(
- wrong_dtype_shards, tensor_overall_size, init_rrefs=True
- )
+ with self.assertRaisesRegex(ValueError, "ShardedTensor global_size property does not match from different ranks!"):
+ st = sharded_tensor.init_from_local_shards(wrong_dtype_shards, tensor_overall_size, init_rrefs=True)
tensor_dtype = torch.int if self.rank == 0 else torch.float32
wrong_dtype_shards = [
- sharded_tensor.Shard(
- torch.ones(5, 5, device=f"cuda:{self.rank}", dtype=tensor_dtype),
- local_shard_metadata,
- )
+ sharded_tensor.Shard(torch.ones(5, 5, device=f"cuda:{self.rank}", dtype=tensor_dtype), local_shard_metadata)
]
- with self.assertRaisesRegex(
- ValueError,
- "ShardedTensor dtype property does not match from different ranks!",
- ):
- st = sharded_tensor.init_from_local_shards(
- wrong_dtype_shards, [10, 10], init_rrefs=True
- )
+ with self.assertRaisesRegex(ValueError, "ShardedTensor dtype property does not match from different ranks!"):
+ st = sharded_tensor.init_from_local_shards(wrong_dtype_shards, [10, 10], init_rrefs=True)
tensor_requires_grad = True if self.rank == 0 else False
wrong_requires_grad_shards = [
sharded_tensor.Shard(
- torch.randn(
- 5, 5, device=f"cuda:{self.rank}", requires_grad=tensor_requires_grad
- ),
- local_shard_metadata,
+ torch.randn(5, 5, device=f"cuda:{self.rank}", requires_grad=tensor_requires_grad),
+ local_shard_metadata
)
]
- with self.assertRaisesRegex(
- ValueError,
- "ShardedTensor requires_grad property does not match from different ranks!",
- ):
+ with self.assertRaisesRegex(ValueError, 'ShardedTensor requires_grad property does not match from different ranks!'):
st = sharded_tensor.init_from_local_shards(
- wrong_requires_grad_shards, [10, 10], init_rrefs=True
- )
+ wrong_requires_grad_shards, [10, 10], init_rrefs=True)
local_shard_metadata = ShardMetadata(
shard_offsets=[(self.rank // 2) * 5, (self.rank % 2) * 5],
shard_sizes=[5, 5],
- placement=f"rank:{self.rank}/cpu",
+ placement=f"rank:{self.rank}/cpu"
)
@with_comms(init_rpc=False, backend="gloo")
@@ -2798,36 +2568,24 @@ class TestShardedTensorFromLocalShards(ShardedTensorTestBase):
local_shard_metadata = ShardMetadata(
shard_offsets=[(self.rank // 2) * 5, (self.rank % 2) * 5],
shard_sizes=[5, 5],
- placement=f"rank:{self.rank}/cpu",
+ placement=f"rank:{self.rank}/cpu"
)
wrong_pin_memory_local_shards = [
- sharded_tensor.Shard(
- torch.randn(5, 5, pin_memory=True), local_shard_metadata
- ),
- sharded_tensor.Shard(
- torch.randn(5, 5, pin_memory=False), local_shard_metadata
- ),
+ sharded_tensor.Shard(torch.randn(5, 5, pin_memory=True), local_shard_metadata),
+ sharded_tensor.Shard(torch.randn(5, 5, pin_memory=False), local_shard_metadata)
]
- with self.assertRaisesRegex(
- ValueError, "Local shards' tensor pin_memory property need to be the same"
- ):
+ with self.assertRaisesRegex(ValueError, "Local shards' tensor pin_memory property need to be the same"):
st = sharded_tensor.init_from_local_shards(
- wrong_pin_memory_local_shards, [10, 10], init_rrefs=True
- )
+ wrong_pin_memory_local_shards, [10, 10], init_rrefs=True)
tensor_pin_memory = True if self.rank == 0 else False
wrong_pin_memory_shards_cross_ranks = [
- sharded_tensor.Shard(
- torch.randn(5, 5, pin_memory=tensor_pin_memory), local_shard_metadata
- )
+ sharded_tensor.Shard(torch.randn(5, 5, pin_memory=tensor_pin_memory), local_shard_metadata)
]
- with self.assertRaisesRegex(
- ValueError,
- "ShardedTensor pin_memory property does not match from different ranks!",
- ):
+ with self.assertRaisesRegex(ValueError, 'ShardedTensor pin_memory property does not match from different ranks!'):
st = sharded_tensor.init_from_local_shards(
- wrong_pin_memory_shards_cross_ranks, [10, 10], init_rrefs=True
- )
+ wrong_pin_memory_shards_cross_ranks, [10, 10], init_rrefs=True)
+
@with_comms
@skip_if_lt_x_gpu(4)
@@ -2837,20 +2595,14 @@ class TestShardedTensorFromLocalShards(ShardedTensorTestBase):
local_shard_metadata = ShardMetadata(
shard_offsets=[(self.rank // 2) * 5, (self.rank % 2) * 5],
shard_sizes=local_shard_size,
- placement=f"rank:{self.rank}/cuda:{self.rank}",
+ placement=f"rank:{self.rank}/cuda:{self.rank}"
)
- local_shards = [
- sharded_tensor.Shard(
- torch.randn(local_shard_size, device=f"cuda:{self.rank}"),
- local_shard_metadata,
- )
- ]
+ local_shards = [sharded_tensor.Shard(torch.randn(local_shard_size, device=f"cuda:{self.rank}"), local_shard_metadata)]
with self.assertRaisesRegex(ValueError, "overlap"):
- sharded_tensor.init_from_local_shards(
- local_shards, [10, 10], init_rrefs=True
- )
+ sharded_tensor.init_from_local_shards(local_shards, [10, 10], init_rrefs=True)
+
@with_comms
@skip_if_lt_x_gpu(4)
@@ -2860,20 +2612,13 @@ class TestShardedTensorFromLocalShards(ShardedTensorTestBase):
local_shard_metadata = ShardMetadata(
shard_offsets=[(self.rank // 2) * 5, (self.rank % 2) * 5],
shard_sizes=local_shard_size,
- placement=f"rank:{self.rank}/cuda:{self.rank}",
+ placement=f"rank:{self.rank}/cuda:{self.rank}"
)
- local_shards = [
- sharded_tensor.Shard(
- torch.randn(local_shard_size, device=f"cuda:{self.rank}"),
- local_shard_metadata,
- )
- ]
+ local_shards = [sharded_tensor.Shard(torch.randn(local_shard_size, device=f"cuda:{self.rank}"), local_shard_metadata)]
with self.assertRaisesRegex(ValueError, "does not match tensor volume"):
- sharded_tensor.init_from_local_shards(
- local_shards, [10, 10], init_rrefs=True
- )
+ sharded_tensor.init_from_local_shards(local_shards, [10, 10], init_rrefs=True)
@with_comms
@skip_if_lt_x_gpu(4)
@@ -2882,7 +2627,7 @@ class TestShardedTensorFromLocalShards(ShardedTensorTestBase):
local_shard_metadata = ShardMetadata(
shard_offsets=[(self.rank // 2) * 5, (self.rank % 2) * 5],
shard_sizes=[5, 5],
- placement=f"rank:{self.rank}/cuda:{self.rank}",
+ placement=f"rank:{self.rank}/cuda:{self.rank}"
)
shards_metadata = []
@@ -2890,13 +2635,11 @@ class TestShardedTensorFromLocalShards(ShardedTensorTestBase):
if r == self.rank:
shards_metadata.append(local_shard_metadata)
else:
- shards_metadata.append(
- ShardMetadata(
- shard_offsets=[(r // 2) * 5, (r % 2) * 5],
- shard_sizes=[5, 5],
- placement=f"rank:{r}/cuda:{r}",
- )
- )
+ shards_metadata.append(ShardMetadata(
+ shard_offsets=[(r // 2) * 5, (r % 2) * 5],
+ shard_sizes=[5, 5],
+ placement=f"rank:{r}/cuda:{r}"
+ ))
tensor_properties = TensorProperties(
dtype=torch.get_default_dtype(),
@@ -2913,120 +2656,85 @@ class TestShardedTensorFromLocalShards(ShardedTensorTestBase):
)
empty_local_shards = []
- with self.assertRaisesRegex(
- RuntimeError, "does not match number of local shards metadata"
- ):
+ with self.assertRaisesRegex(RuntimeError, 'does not match number of local shards metadata'):
ShardedTensor._init_from_local_shards_and_global_metadata(
- empty_local_shards, sharded_tensor_metadata
+ empty_local_shards,
+ sharded_tensor_metadata
)
wrong_num_shards = [
- sharded_tensor.Shard(
- torch.randn(5, 5, device=f"cuda:{self.rank}"), local_shard_metadata
- ),
- sharded_tensor.Shard(
- torch.randn(5, 5, device=f"cuda:{self.rank}"), local_shard_metadata
- ),
+ sharded_tensor.Shard(torch.randn(5, 5, device=f"cuda:{self.rank}"), local_shard_metadata),
+ sharded_tensor.Shard(torch.randn(5, 5, device=f"cuda:{self.rank}"), local_shard_metadata)
]
- with self.assertRaisesRegex(
- RuntimeError, "does not match number of local shards metadata"
- ):
+ with self.assertRaisesRegex(RuntimeError, 'does not match number of local shards metadata'):
ShardedTensor._init_from_local_shards_and_global_metadata(
- wrong_num_shards, sharded_tensor_metadata
+ wrong_num_shards,
+ sharded_tensor_metadata
)
- with self.assertRaisesRegex(
- ValueError, "Shard tensor size does not match with metadata.shard_lengths"
- ):
- wrong_size_shards = [
- sharded_tensor.Shard(
- torch.randn(2, 3, device=f"cuda:{self.rank}"), local_shard_metadata
- )
- ]
+ with self.assertRaisesRegex(ValueError, 'Shard tensor size does not match with metadata.shard_lengths'):
+ wrong_size_shards = [sharded_tensor.Shard(torch.randn(2, 3, device=f"cuda:{self.rank}"), local_shard_metadata)]
- with self.assertRaisesRegex(
- ValueError,
- "Local shard tensor device does not match with local Shard's placement",
- ):
- wrong_device_shards = [
- sharded_tensor.Shard(torch.randn(5, 5), local_shard_metadata)
- ]
+ with self.assertRaisesRegex(ValueError, "Local shard tensor device does not match with local Shard's placement"):
+ wrong_device_shards = [sharded_tensor.Shard(torch.randn(5, 5), local_shard_metadata)]
wrong_dtype_shards = [
- sharded_tensor.Shard(
- torch.ones(5, 5, device=f"cuda:{self.rank}", dtype=torch.int),
- local_shard_metadata,
- )
+ sharded_tensor.Shard(torch.ones(5, 5, device=f"cuda:{self.rank}", dtype=torch.int), local_shard_metadata)
]
- with self.assertRaisesRegex(
- ValueError, "Local shards' tensor dtype property is incompatible with"
- ):
+ with self.assertRaisesRegex(ValueError, "Local shards' tensor dtype property is incompatible with"):
ShardedTensor._init_from_local_shards_and_global_metadata(
- wrong_dtype_shards, sharded_tensor_metadata
+ wrong_dtype_shards,
+ sharded_tensor_metadata
)
indices = [[0, 1, 1], [2, 0, 2]]
values = [3.2, 4.5, 5.8]
- sparse_tensor = torch.sparse_coo_tensor(
- indices, values, (5, 5), device=f"cuda:{self.rank}"
- )
+ sparse_tensor = torch.sparse_coo_tensor(indices, values, (5, 5), device=f"cuda:{self.rank}")
wrong_layout_shards = [
sharded_tensor.Shard(sparse_tensor, local_shard_metadata)
]
- with self.assertRaisesRegex(
- ValueError, "Local shards' tensor layout property is incompatible with"
- ):
+ with self.assertRaisesRegex(ValueError, "Local shards' tensor layout property is incompatible with"):
ShardedTensor._init_from_local_shards_and_global_metadata(
- wrong_layout_shards, sharded_tensor_metadata
+ wrong_layout_shards,
+ sharded_tensor_metadata
)
wrong_requires_grad_shards = [
- sharded_tensor.Shard(
- torch.randn(5, 5, device=f"cuda:{self.rank}", requires_grad=True),
- local_shard_metadata,
- )
+ sharded_tensor.Shard(torch.randn(5, 5, device=f"cuda:{self.rank}", requires_grad=True), local_shard_metadata)
]
- with self.assertRaisesRegex(
- ValueError,
- "Local shards' tensor requires_grad property is incompatible with",
- ):
+ with self.assertRaisesRegex(ValueError, "Local shards' tensor requires_grad property is incompatible with"):
ShardedTensor._init_from_local_shards_and_global_metadata(
- wrong_requires_grad_shards, sharded_tensor_metadata
+ wrong_requires_grad_shards,
+ sharded_tensor_metadata
)
wrong_memory_format_shards = [
- sharded_tensor.Shard(
- torch.randn(5, 5, device=f"cuda:{self.rank}").t(), local_shard_metadata
- )
+ sharded_tensor.Shard(torch.randn(5, 5, device=f"cuda:{self.rank}").t(), local_shard_metadata)
]
- with self.assertRaisesRegex(
- ValueError,
- "Only torch.contiguous_format memory_format is currently supported",
- ):
+ with self.assertRaisesRegex(ValueError, 'Only torch.contiguous_format memory_format is currently supported'):
ShardedTensor._init_from_local_shards_and_global_metadata(
- wrong_memory_format_shards, sharded_tensor_metadata
+ wrong_memory_format_shards,
+ sharded_tensor_metadata
)
# pin_memory can only be on CPU
local_shard_metadata.placement = _remote_device(f"rank:{self.rank}/cpu")
wrong_pin_memory_shards = [
- sharded_tensor.Shard(
- torch.randn(5, 5, pin_memory=True), local_shard_metadata
- )
+ sharded_tensor.Shard(torch.randn(5, 5, pin_memory=True), local_shard_metadata)
]
- with self.assertRaisesRegex(
- ValueError, "Local shards' tensor pin_memory property is incompatible with"
- ):
+ with self.assertRaisesRegex(ValueError, "Local shards' tensor pin_memory property is incompatible with"):
ShardedTensor._init_from_local_shards_and_global_metadata(
- wrong_pin_memory_shards, sharded_tensor_metadata
+ wrong_pin_memory_shards,
+ sharded_tensor_metadata
)
-
class TestShardedTensorCustomOps(ShardedTensorTestBase):
+
@with_comms
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_custom_op(self):
+
@custom_sharded_op_impl(torch.asin)
def my_sharded_asin(types, args, kwargs, process_group):
return torch.asin(args[0].local_shards()[0].tensor)
@@ -3049,6 +2757,7 @@ class TestShardedTensorCustomOps(ShardedTensorTestBase):
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_custom_op_override(self):
+
t = torch.rand(10, 10).cuda(self.rank)
from torch.distributed._shard.sharding_spec.api import custom_sharding_spec_op
@@ -3067,7 +2776,7 @@ class TestShardedTensorCustomOps(ShardedTensorTestBase):
],
)
m = torch.nn.Linear(32, 16).cuda(self.rank)
- shard_parameter(m, "weight", spec)
+ shard_parameter(m, 'weight', spec)
result = m(torch.rand(15, 32).cuda(self.rank))
self.assertEqual(t, result)
@@ -3076,19 +2785,17 @@ class TestShardedTensorCustomOps(ShardedTensorTestBase):
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_custom_op_errors(self):
- with self.assertRaisesRegex(TypeError, "expects signature"):
+ with self.assertRaisesRegex(TypeError, 'expects signature'):
@custom_sharded_op_impl(torch.nn.functional.linear)
def my_op1(types, args, kwargs, process_group, random_param):
pass
- with self.assertRaisesRegex(TypeError, "expects signature"):
-
+ with self.assertRaisesRegex(TypeError, 'expects signature'):
@custom_sharded_op_impl(torch.nn.functional.linear)
def my_op2(types):
pass
-
class TestShardMetadata(ShardedTensorTestBase):
@with_comms
@requires_nccl()
@@ -3137,8 +2844,8 @@ class TestShardedTensorSubGroupInit(TestCase):
metadata=ShardMetadata(
shard_offsets=[3 * (rank // sub_group_sz)],
shard_sizes=[3],
- placement=f"rank:{rank}/meta",
- ),
+ placement=f"rank:{rank}/meta"
+ )
)
],
6,
@@ -3162,7 +2869,8 @@ class TestShardedTensorSubGroupInit(TestCase):
for r in sub_pg_ranks:
_parse_and_validate_remote_device(
- sub_pg, _remote_device(f"rank:{r}/cuda:{r % sub_group_sz}")
+ sub_pg,
+ _remote_device(f"rank:{r}/cuda:{r % sub_group_sz}")
)
@@ -3215,10 +2923,7 @@ class TestCreateTensorNoProcessGroupMode(TestCase):
sizes = shard_metadata.shard_sizes
st_local_shards.append(
Shard(
- tensor=src[
- offsets[0] : offsets[0] + sizes[0],
- offsets[1] : offsets[1] + sizes[1],
- ],
+ tensor=src[offsets[0]:offsets[0] + sizes[0], offsets[1]:offsets[1] + sizes[1]],
metadata=shard_metadata,
)
)
@@ -3228,6 +2933,5 @@ class TestCreateTensorNoProcessGroupMode(TestCase):
sharded_tensor_metadata=st_metadata,
)
-
-if __name__ == "__main__":
+if __name__ == '__main__':
run_tests()
diff --git a/test/distributed/_shard/sharded_tensor/test_sharded_tensor_reshard.py b/test/distributed/_shard/sharded_tensor/test_sharded_tensor_reshard.py
index c3fe5ee681..ec053c95b4 100644
--- a/test/distributed/_shard/sharded_tensor/test_sharded_tensor_reshard.py
+++ b/test/distributed/_shard/sharded_tensor/test_sharded_tensor_reshard.py
@@ -4,10 +4,22 @@ import sys
from itertools import product
import torch
-from torch.distributed._shard import _shard_tensor, sharded_tensor
-from torch.distributed._shard.sharding_spec import EnumerableShardingSpec, ShardMetadata
-from torch.testing._internal.common_distributed import requires_nccl, skip_if_lt_x_gpu
-from torch.testing._internal.common_utils import run_tests, TEST_WITH_DEV_DBG_ASAN
+from torch.distributed._shard import (
+ sharded_tensor,
+ _shard_tensor,
+)
+from torch.distributed._shard.sharding_spec import (
+ EnumerableShardingSpec,
+ ShardMetadata,
+)
+from torch.testing._internal.common_distributed import (
+ requires_nccl,
+ skip_if_lt_x_gpu,
+)
+from torch.testing._internal.common_utils import (
+ TEST_WITH_DEV_DBG_ASAN,
+ run_tests,
+)
from torch.testing._internal.distributed._shard.sharded_tensor import (
ShardedTensorTestBase,
with_comms,
@@ -33,9 +45,7 @@ class TestReshard(ShardedTensorTestBase):
st.reshard(reshard_spec)
self.assertEqual(1, len(st.local_shards()))
self.assertEqual(1, len(st_compare.local_shards()))
- st_compare._metadata.shards_metadata.sort(
- key=lambda metadata: metadata.placement.rank()
- )
+ st_compare._metadata.shards_metadata.sort(key=lambda metadata: metadata.placement.rank())
self.assertEqual(st._metadata, st_compare._metadata)
self.assertEqual(st.local_tensor(), st_compare.local_tensor())
self.assertEqual(
diff --git a/test/distributed/_shard/sharding_plan/test_sharding_plan.py b/test/distributed/_shard/sharding_plan/test_sharding_plan.py
index c1ca7a6c7b..0536163a18 100644
--- a/test/distributed/_shard/sharding_plan/test_sharding_plan.py
+++ b/test/distributed/_shard/sharding_plan/test_sharding_plan.py
@@ -1,19 +1,26 @@
+
# Owner(s): ["oncall: distributed"]
import sys
import torch
-import torch.distributed as dist
import torch.nn as nn
+import torch.distributed as dist
+from torch.testing._internal.common_distributed import (
+ requires_nccl,
+ skip_if_lt_x_gpu,
+)
from torch.distributed._shard import shard_module
-from torch.distributed._shard.sharded_tensor import ShardedTensor
from torch.distributed._shard.sharding_plan import ShardingPlan, ShardingPlanner
from torch.distributed._shard.sharding_spec import ChunkShardingSpec
-from torch.testing._internal.common_distributed import requires_nccl, skip_if_lt_x_gpu
+from torch.distributed._shard.sharded_tensor import ShardedTensor
-from torch.testing._internal.common_utils import run_tests, TEST_WITH_DEV_DBG_ASAN
+from torch.testing._internal.common_utils import (
+ TEST_WITH_DEV_DBG_ASAN,
+ run_tests,
+)
from torch.testing._internal.distributed._shard.sharded_tensor import (
- ShardedTensorTestBase,
TEST_GPU_NUM,
+ ShardedTensorTestBase,
with_comms,
)
from torch.testing._internal.distributed._shard.sharded_tensor._test_ops_common import (
@@ -58,7 +65,9 @@ class TestShardingPlan(ShardedTensorTestBase):
plan={
"fc1.weight": torch.randn(3, 4),
},
- output_plan={"": rowwise_sharding_spec},
+ output_plan={
+ "": rowwise_sharding_spec
+ },
)
megatron_lm = SimpleMegatronLM([[17, 12], [12, 29]]).cuda(self.rank)
@@ -73,7 +82,9 @@ class TestShardingPlan(ShardedTensorTestBase):
plan={
"fc1.weight": rowwise_sharding_spec,
},
- output_plan={"": torch.randn(3, 4)},
+ output_plan={
+ "": torch.randn(3, 4)
+ },
)
with self.assertRaisesRegex(
@@ -87,7 +98,9 @@ class TestShardingPlan(ShardedTensorTestBase):
"fc3.weight": rowwise_sharding_spec,
},
)
- with self.assertRaisesRegex(AttributeError, "has no attribute"):
+ with self.assertRaisesRegex(
+ AttributeError, "has no attribute"
+ ):
# shard the module with the provided sharding plan
shard_module(megatron_lm, sharding_plan_wrong_module_path)
@@ -96,7 +109,9 @@ class TestShardingPlan(ShardedTensorTestBase):
"fc1.biass": rowwise_sharding_spec,
},
)
- with self.assertRaisesRegex(AttributeError, "has no attribute"):
+ with self.assertRaisesRegex(
+ AttributeError, "has no attribute"
+ ):
# shard the module with the provided sharding plan
shard_module(megatron_lm, sharding_plan_wrong_param_path)
@@ -140,7 +155,7 @@ class TestShardingPlan(ShardedTensorTestBase):
sharding_plan = ShardingPlan(
plan={
"fc1.weight": colwise_sharding_spec,
- "fc2.weight": rowwise_sharding_spec,
+ "fc2.weight": rowwise_sharding_spec
}
)
@@ -149,6 +164,5 @@ class TestShardingPlan(ShardedTensorTestBase):
if self.rank >= 2:
shard_module(megatron_lm, sharding_plan, process_group=pg)
-
if __name__ == "__main__":
run_tests()
diff --git a/test/distributed/_shard/sharding_spec/test_sharding_spec.py b/test/distributed/_shard/sharding_spec/test_sharding_spec.py
index 8502a63f25..7ff27a0f30 100644
--- a/test/distributed/_shard/sharding_spec/test_sharding_spec.py
+++ b/test/distributed/_shard/sharding_spec/test_sharding_spec.py
@@ -1,48 +1,51 @@
# Owner(s): ["oncall: distributed"]
-import copy
-from dataclasses import dataclass
from typing import List, Union
+from dataclasses import dataclass
+import copy
import torch
-from torch.distributed._shard import _shard_tensor, sharded_tensor
-from torch.distributed._shard.sharded_tensor import (
- ShardedTensor,
- ShardedTensorMetadata,
- TensorProperties,
+from torch.testing._internal.common_utils import TestCase
+from torch.testing._internal.common_distributed import (
+ requires_nccl,
+ skip_if_lt_x_gpu,
)
+from torch.distributed._shard import sharded_tensor, _shard_tensor
from torch.distributed._shard.sharding_spec import (
- _infer_sharding_spec_from_shards_metadata,
+ ShardingSpec,
ChunkShardingSpec,
DevicePlacementSpec,
EnumerableShardingSpec,
- ShardingSpec,
ShardMetadata,
+ _infer_sharding_spec_from_shards_metadata,
+)
+from torch.distributed._shard.sharded_tensor import (
+ TensorProperties,
+ ShardedTensor,
+ ShardedTensorMetadata,
)
from torch.distributed._shard.sharding_spec._internals import (
check_tensor,
- get_chunk_sharding_params,
- get_chunked_dim_size,
get_split_size,
+ get_chunked_dim_size,
+ get_chunk_sharding_params,
validate_non_overlapping_shards_metadata,
)
from torch.testing._internal.common_cuda import TEST_MULTIGPU
-from torch.testing._internal.common_distributed import requires_nccl, skip_if_lt_x_gpu
from torch.testing._internal.common_utils import (
run_tests,
skip_but_pass_in_sandcastle_if,
- TestCase,
+)
+from torch.testing._internal.distributed._shard.sharded_tensor._test_st_common import (
+ _chunk_sharding_specs_list_for_test,
)
from torch.testing._internal.distributed._shard.sharded_tensor import (
ShardedTensorTestBase,
with_comms,
)
-from torch.testing._internal.distributed._shard.sharded_tensor._test_st_common import (
- _chunk_sharding_specs_list_for_test,
-)
-
class TestShardingSpec(TestCase):
- @skip_but_pass_in_sandcastle_if(not TEST_MULTIGPU, "2 CUDA GPUs are needed")
+
+ @skip_but_pass_in_sandcastle_if(not TEST_MULTIGPU, '2 CUDA GPUs are needed')
def test_device_placement(self):
# valid devices
DevicePlacementSpec("cuda:0")
@@ -62,7 +65,7 @@ class TestShardingSpec(TestCase):
with self.assertRaisesRegex(RuntimeError, "Invalid device string"):
DevicePlacementSpec("rank:0/cpu2")
- @skip_but_pass_in_sandcastle_if(not TEST_MULTIGPU, "2 CUDA GPUs are needed")
+ @skip_but_pass_in_sandcastle_if(not TEST_MULTIGPU, '2 CUDA GPUs are needed')
def test_chunked_sharding_spec(self):
# Test valid specs.
ChunkShardingSpec(0, [torch.device(0), torch.device(1)])
@@ -95,173 +98,165 @@ class TestShardingSpec(TestCase):
with self.assertRaisesRegex(RuntimeError, "Invalid device string"):
ChunkShardingSpec(0, ["rank:0/cuda:foo", "cuda:1"])
- @skip_but_pass_in_sandcastle_if(not TEST_MULTIGPU, "2 CUDA GPUs are needed")
+ @skip_but_pass_in_sandcastle_if(not TEST_MULTIGPU, '2 CUDA GPUs are needed')
def test_enumerable_sharding_spec(self):
# test valid specs
# test row-wise sharding
- spec = EnumerableShardingSpec(
- [
- ShardMetadata(
- shard_offsets=[0, 0],
- shard_sizes=[5, 5],
- placement="cuda:0",
- ),
- ShardMetadata(
- shard_offsets=[5, 0],
- shard_sizes=[5, 5],
- placement="cuda:1",
- ),
- ]
- )
+ spec = EnumerableShardingSpec([
+ ShardMetadata(
+ shard_offsets=[0, 0],
+ shard_sizes=[5, 5],
+ placement="cuda:0",
+ ),
+ ShardMetadata(
+ shard_offsets=[5, 0],
+ shard_sizes=[5, 5],
+ placement="cuda:1",
+ )
+ ])
check_tensor(spec.shards, torch.rand(10, 5).size())
# test row and column sharding
- spec = EnumerableShardingSpec(
- [
- ShardMetadata(
- shard_offsets=[0, 0],
- shard_sizes=[3, 3],
- placement="cuda:0",
- ),
- ShardMetadata(
- shard_offsets=[0, 3],
- shard_sizes=[3, 3],
- placement="cuda:1",
- ),
- ShardMetadata(
- shard_offsets=[3, 0],
- shard_sizes=[3, 3],
- placement="cuda:2",
- ),
- ShardMetadata(
- shard_offsets=[3, 3],
- shard_sizes=[3, 3],
- placement="cuda:3",
- ),
- ]
- )
+ spec = EnumerableShardingSpec([
+ ShardMetadata(
+ shard_offsets=[0, 0],
+ shard_sizes=[3, 3],
+ placement="cuda:0",
+ ),
+ ShardMetadata(
+ shard_offsets=[0, 3],
+ shard_sizes=[3, 3],
+ placement="cuda:1",
+ ),
+ ShardMetadata(
+ shard_offsets=[3, 0],
+ shard_sizes=[3, 3],
+ placement="cuda:2",
+ ),
+ ShardMetadata(
+ shard_offsets=[3, 3],
+ shard_sizes=[3, 3],
+ placement="cuda:3",
+ ),
+ ])
check_tensor(spec.shards, torch.rand(6, 6).size())
# test uneven shard sizes.
- spec = EnumerableShardingSpec(
- [
- ShardMetadata(
- shard_offsets=[0, 0],
- shard_sizes=[2, 4],
- placement="cuda:0",
- ),
- ShardMetadata(
- shard_offsets=[0, 4],
- shard_sizes=[4, 2],
- placement="cuda:1",
- ),
- ShardMetadata(
- shard_offsets=[2, 0],
- shard_sizes=[4, 4],
- placement="cuda:2",
- ),
- ShardMetadata(
- shard_offsets=[4, 4],
- shard_sizes=[2, 2],
- placement="cuda:3",
- ),
- ]
- )
+ spec = EnumerableShardingSpec([
+ ShardMetadata(
+ shard_offsets=[0, 0],
+ shard_sizes=[2, 4],
+ placement="cuda:0",
+ ),
+ ShardMetadata(
+ shard_offsets=[0, 4],
+ shard_sizes=[4, 2],
+ placement="cuda:1",
+ ),
+ ShardMetadata(
+ shard_offsets=[2, 0],
+ shard_sizes=[4, 4],
+ placement="cuda:2",
+ ),
+ ShardMetadata(
+ shard_offsets=[4, 4],
+ shard_sizes=[2, 2],
+ placement="cuda:3",
+ ),
+ ])
check_tensor(spec.shards, torch.rand(6, 6).size())
# test invalid sharding
- with self.assertRaisesRegex(ValueError, "Could not parse remote_device"):
+ with self.assertRaisesRegex(ValueError, 'Could not parse remote_device'):
ShardMetadata(shard_offsets=[0], shard_sizes=[1], placement="cuda:foo")
- with self.assertRaisesRegex(ValueError, "same number of elements"):
+ with self.assertRaisesRegex(ValueError, 'same number of elements'):
ShardMetadata(shard_offsets=[0, 0], shard_sizes=[1], placement="cuda:0")
- with self.assertRaisesRegex(ValueError, "shard_offsets should be >=0"):
+ with self.assertRaisesRegex(ValueError, 'shard_offsets should be >=0'):
ShardMetadata(shard_offsets=[-1, 0], shard_sizes=[1, 1], placement="cuda:0")
- with self.assertRaisesRegex(ValueError, "shard_sizes should be >= 0"):
+ with self.assertRaisesRegex(ValueError, 'shard_sizes should be >= 0'):
ShardMetadata(shard_offsets=[0, 0], shard_sizes=[-1, 1], placement="cuda:0")
- with self.assertRaisesRegex(ValueError, "Empty shard list provided"):
+ with self.assertRaisesRegex(ValueError, 'Empty shard list provided'):
EnumerableShardingSpec([])
- with self.assertRaisesRegex(ValueError, "Found inconsistent ranks for shards"):
- EnumerableShardingSpec(
- [
- ShardMetadata(
- shard_offsets=[0, 0], shard_sizes=[1, 1], placement="cpu"
- ),
- ShardMetadata(
- shard_offsets=[0, 0, 0], shard_sizes=[1, 1, 1], placement="cpu"
- ),
- ]
- )
-
- with self.assertRaisesRegex(ValueError, "Shards.*overlap"):
- EnumerableShardingSpec(
- [
- ShardMetadata(
- shard_offsets=[0, 0], shard_sizes=[3, 3], placement="cpu"
- ),
- ShardMetadata(
- shard_offsets=[2, 0], shard_sizes=[3, 3], placement="cpu"
- ),
- ]
- )
-
- spec = EnumerableShardingSpec(
- [
+ with self.assertRaisesRegex(ValueError, 'Found inconsistent ranks for shards'):
+ EnumerableShardingSpec([
ShardMetadata(
shard_offsets=[0, 0],
- shard_sizes=[5, 5],
- placement="cuda:0",
+ shard_sizes=[1, 1],
+ placement="cpu"
),
ShardMetadata(
- shard_offsets=[5, 0],
- shard_sizes=[5, 5],
- placement="cuda:1",
+ shard_offsets=[0, 0, 0],
+ shard_sizes=[1, 1, 1],
+ placement="cpu"
),
- ]
- )
+ ])
- with self.assertRaisesRegex(ValueError, "Rank of tensor is.*but shards rank"):
- check_tensor(spec.shards, torch.rand(10, 10, 10).size())
-
- spec = EnumerableShardingSpec(
- [
+ with self.assertRaisesRegex(ValueError, 'Shards.*overlap'):
+ EnumerableShardingSpec([
ShardMetadata(
shard_offsets=[0, 0],
- shard_sizes=[5, 5],
- placement="cuda:0",
+ shard_sizes=[3, 3],
+ placement="cpu"
),
ShardMetadata(
- shard_offsets=[5, 0],
- shard_sizes=[5, 5],
- placement="cuda:1",
+ shard_offsets=[2, 0],
+ shard_sizes=[3, 3],
+ placement="cpu"
),
- ]
- )
+ ])
- with self.assertRaisesRegex(ValueError, "exceeds tensor dim"):
+ spec = EnumerableShardingSpec([
+ ShardMetadata(
+ shard_offsets=[0, 0],
+ shard_sizes=[5, 5],
+ placement="cuda:0",
+ ),
+ ShardMetadata(
+ shard_offsets=[5, 0],
+ shard_sizes=[5, 5],
+ placement="cuda:1",
+ )
+ ])
+
+ with self.assertRaisesRegex(ValueError, 'Rank of tensor is.*but shards rank'):
+ check_tensor(spec.shards, torch.rand(10, 10, 10).size())
+
+ spec = EnumerableShardingSpec([
+ ShardMetadata(
+ shard_offsets=[0, 0],
+ shard_sizes=[5, 5],
+ placement="cuda:0",
+ ),
+ ShardMetadata(
+ shard_offsets=[5, 0],
+ shard_sizes=[5, 5],
+ placement="cuda:1",
+ )
+ ])
+
+ with self.assertRaisesRegex(ValueError, 'exceeds tensor dim'):
check_tensor(spec.shards, torch.rand(10, 3).size())
- spec = EnumerableShardingSpec(
- [
- ShardMetadata(
- shard_offsets=[0, 0],
- shard_sizes=[5, 5],
- placement="cuda:0",
- ),
- ShardMetadata(
- shard_offsets=[5, 5],
- shard_sizes=[5, 5],
- placement="cuda:1",
- ),
- ]
- )
+ spec = EnumerableShardingSpec([
+ ShardMetadata(
+ shard_offsets=[0, 0],
+ shard_sizes=[5, 5],
+ placement="cuda:0",
+ ),
+ ShardMetadata(
+ shard_offsets=[5, 5],
+ shard_sizes=[5, 5],
+ placement="cuda:1",
+ )
+ ])
- with self.assertRaisesRegex(ValueError, "does not match tensor volume"):
+ with self.assertRaisesRegex(ValueError, 'does not match tensor volume'):
check_tensor(spec.shards, torch.rand(10, 10).size())
def test_get_split_size(self):
@@ -318,7 +313,7 @@ class TestShardingSpec(TestCase):
shard_offsets=[5, 0],
shard_sizes=[10, 5],
placement="cuda:1",
- ),
+ )
]
spec = _infer_sharding_spec_from_shards_metadata(shards_metadata)
self.assertTrue(isinstance(spec, EnumerableShardingSpec))
@@ -334,7 +329,7 @@ class TestShardingSpec(TestCase):
shard_offsets=[16],
shard_sizes=[9],
placement="cuda:1",
- ),
+ )
]
spec = _infer_sharding_spec_from_shards_metadata(shards_metadata)
self.assertTrue(isinstance(spec, EnumerableShardingSpec))
@@ -374,9 +369,7 @@ class TestShardingSpec(TestCase):
shard_size = copy.deepcopy(st_size)
offsets = [0] * len(st_size)
offsets[sharding_dim] = split_size * idx
- shard_size[sharding_dim] = get_chunked_dim_size(
- st_size[sharding_dim], split_size, idx
- )
+ shard_size[sharding_dim] = get_chunked_dim_size(st_size[sharding_dim], split_size, idx)
shards_metadata[placement.rank()] = ShardMetadata(
shard_offsets=offsets,
shard_sizes=shard_size,
@@ -397,100 +390,73 @@ class TestShardingSpec(TestCase):
self._infer_chunk_sharding_spec_case(spec.placements, 1, [12, 16])
self._infer_chunk_sharding_spec_case(spec.placements, 2, [4, 18, 15])
self._infer_chunk_sharding_spec_case(spec.placements, 3, [7, 12, 16, 37])
- self._infer_chunk_sharding_spec_case(
- spec.placements, 4, [50, 4, 18, 15, 77]
- )
+ self._infer_chunk_sharding_spec_case(spec.placements, 4, [50, 4, 18, 15, 77])
def test_check_overlapping(self):
shards = [
ShardMetadata(
- shard_offsets=[0, 0],
- shard_sizes=[5, 5],
- placement="cuda:0",
+ shard_offsets=[0, 0], shard_sizes=[5, 5], placement="cuda:0",
),
ShardMetadata(
- shard_offsets=[5, 0],
- shard_sizes=[5, 5],
- placement="cuda:1",
- ),
+ shard_offsets=[5, 0], shard_sizes=[5, 5], placement="cuda:1",
+ )
]
validate_non_overlapping_shards_metadata(shards)
shards = [
ShardMetadata(
- shard_offsets=[0, 0],
- shard_sizes=[5, 5],
- placement="cuda:0",
+ shard_offsets=[0, 0], shard_sizes=[5, 5], placement="cuda:0",
),
ShardMetadata(
- shard_offsets=[4, 0],
- shard_sizes=[5, 5],
- placement="cuda:1",
- ),
+ shard_offsets=[4, 0], shard_sizes=[5, 5], placement="cuda:1",
+ )
]
with self.assertRaisesRegex(ValueError, "overlap"):
validate_non_overlapping_shards_metadata(shards)
shards = [
ShardMetadata(
- shard_offsets=[0, 0],
- shard_sizes=[5, 5],
- placement="cuda:0",
+ shard_offsets=[0, 0], shard_sizes=[5, 5], placement="cuda:0",
),
ShardMetadata(
- shard_offsets=[0, 4],
- shard_sizes=[5, 5],
- placement="cuda:1",
- ),
+ shard_offsets=[0, 4], shard_sizes=[5, 5], placement="cuda:1",
+ )
]
with self.assertRaisesRegex(ValueError, "overlap"):
validate_non_overlapping_shards_metadata(shards)
shards = [
ShardMetadata(
- shard_offsets=[5, 0, 5],
- shard_sizes=[5, 5, 5],
- placement="cuda:0",
+ shard_offsets=[5, 0, 5], shard_sizes=[5, 5, 5], placement="cuda:0",
),
ShardMetadata(
- shard_offsets=[5, 5, 5],
- shard_sizes=[5, 5, 5],
- placement="cuda:1",
- ),
+ shard_offsets=[5, 5, 5], shard_sizes=[5, 5, 5], placement="cuda:1",
+ )
]
validate_non_overlapping_shards_metadata(shards)
shards = [
ShardMetadata(
- shard_offsets=[5, 0, 5],
- shard_sizes=[5, 5, 5],
- placement="cuda:0",
+ shard_offsets=[5, 0, 5], shard_sizes=[5, 5, 5], placement="cuda:0",
),
ShardMetadata(
- shard_offsets=[5, 4, 5],
- shard_sizes=[5, 5, 5],
- placement="cuda:1",
- ),
+ shard_offsets=[5, 4, 5], shard_sizes=[5, 5, 5], placement="cuda:1",
+ )
]
with self.assertRaisesRegex(ValueError, "overlap"):
validate_non_overlapping_shards_metadata(shards)
shards = [
ShardMetadata(
- shard_offsets=[5, 0, 5],
- shard_sizes=[5, 5, 5],
- placement="cuda:0",
+ shard_offsets=[5, 0, 5], shard_sizes=[5, 5, 5], placement="cuda:0",
),
ShardMetadata(
- shard_offsets=[5, 4, 9],
- shard_sizes=[5, 5, 5],
- placement="cuda:1",
- ),
+ shard_offsets=[5, 4, 9], shard_sizes=[5, 5, 5], placement="cuda:1",
+ )
]
with self.assertRaisesRegex(ValueError, "overlap"):
validate_non_overlapping_shards_metadata(shards)
-
# Custom ShardingSpec, an simple example to do grid sharding
@dataclass
class GridShardingSpec(ShardingSpec):
@@ -502,11 +468,10 @@ class GridShardingSpec(ShardingSpec):
if not isinstance(remote_device, torch.distributed._remote_device):
self.placements[i] = torch.distributed._remote_device(remote_device)
- def build_metadata(
- self,
- tensor_sizes: torch.Size,
- tensor_properties: TensorProperties,
- ) -> ShardedTensorMetadata:
+ def build_metadata(self,
+ tensor_sizes: torch.Size,
+ tensor_properties: TensorProperties,
+ ) -> ShardedTensorMetadata:
tensor_num_dim = len(tensor_sizes)
assert tensor_num_dim == 2, "only support 2-dim tensor for grid sharding"
shards_metadata = []
@@ -523,25 +488,24 @@ class GridShardingSpec(ShardingSpec):
for col_idx in range(col_chunks):
shards_metadata.append(
ShardMetadata(
- shard_offsets=[
- row_idx * self.grid_size,
- col_idx * self.grid_size,
- ],
+ shard_offsets=[row_idx * self.grid_size, col_idx * self.grid_size],
shard_sizes=[self.grid_size, self.grid_size],
- placement=self.placements[row_idx * row_chunks + col_idx],
+ placement=self.placements[row_idx * row_chunks + col_idx]
)
)
return ShardedTensorMetadata(
shards_metadata=shards_metadata,
size=tensor_sizes,
- tensor_properties=tensor_properties,
+ tensor_properties=tensor_properties
)
- def shard(
- self, tensor: torch.Tensor, src_rank: int = 0, process_group=None
- ) -> ShardedTensor:
- raise NotImplementedError("GridShardingSpec.shard not implemented yet!")
+ def shard(self,
+ tensor: torch.Tensor,
+ src_rank: int = 0,
+ process_group=None) -> ShardedTensor:
+
+ raise NotImplementedError("GridShardingSpec.shard not implemented yet!")
class TestCustomShardingSpec(ShardedTensorTestBase):
def test_custom_sharding_spec(self):
@@ -552,7 +516,10 @@ class TestCustomShardingSpec(ShardedTensorTestBase):
"rank:3/cuda:3",
]
- grid_spec = GridShardingSpec(grid_size=4, placements=ranks)
+ grid_spec = GridShardingSpec(
+ grid_size=4,
+ placements=ranks
+ )
tensor_properties = TensorProperties(
dtype=torch.get_default_dtype(),
@@ -569,8 +536,8 @@ class TestCustomShardingSpec(ShardedTensorTestBase):
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_custom_sharding_spec_tensor_ctor(self):
- """Test sharded_tensor.ones(...) with the custom
- grid sharding spec.
+ """ Test sharded_tensor.ones(...) with the custom
+ grid sharding spec.
"""
ranks = [
@@ -580,7 +547,10 @@ class TestCustomShardingSpec(ShardedTensorTestBase):
"rank:3/cuda:3",
]
- grid_spec = GridShardingSpec(grid_size=2, placements=ranks)
+ grid_spec = GridShardingSpec(
+ grid_size=2,
+ placements=ranks
+ )
st = sharded_tensor.ones(grid_spec, 4, 4)
@@ -596,8 +566,8 @@ class TestCustomShardingSpec(ShardedTensorTestBase):
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_custom_sharding_spec_shard_tensor(self):
- """Test custom spec can be invoked from the
- _shard_tensor callsite.
+ """ Test custom spec can be invoked from the
+ _shard_tensor callsite.
"""
ranks = [
@@ -607,11 +577,14 @@ class TestCustomShardingSpec(ShardedTensorTestBase):
"rank:3/cuda:3",
]
- grid_spec = GridShardingSpec(grid_size=2, placements=ranks)
+ grid_spec = GridShardingSpec(
+ grid_size=2,
+ placements=ranks
+ )
- with self.assertRaisesRegex(NotImplementedError, "not implemented"):
+ with self.assertRaisesRegex(NotImplementedError, 'not implemented'):
_shard_tensor(torch.randn(8, 8), grid_spec)
-if __name__ == "__main__":
+if __name__ == '__main__':
run_tests()
diff --git a/test/distributed/_shard/test_sharder.py b/test/distributed/_shard/test_sharder.py
index 9a59f891bc..79bcfe56f3 100644
--- a/test/distributed/_shard/test_sharder.py
+++ b/test/distributed/_shard/test_sharder.py
@@ -1,20 +1,24 @@
+
# Owner(s): ["oncall: distributed"]
-import copy
import sys
+import copy
import torch
import torch.nn as nn
+from torch.testing._internal.common_distributed import (
+ requires_nccl,
+ skip_if_lt_x_gpu,
+)
from torch.distributed._shard import shard_module
-from torch.distributed._shard.sharded_tensor import ShardedTensor
-from torch.distributed._shard.sharder import Sharder
from torch.distributed._shard.sharding_plan import ShardingPlan
+from torch.distributed._shard.sharder import Sharder
from torch.distributed._shard.sharding_spec import ChunkShardingSpec
-from torch.testing._internal.common_distributed import requires_nccl, skip_if_lt_x_gpu
+from torch.distributed._shard.sharded_tensor import ShardedTensor
-from torch.testing._internal.common_utils import run_tests, TEST_WITH_DEV_DBG_ASAN
+from torch.testing._internal.common_utils import TEST_WITH_DEV_DBG_ASAN, run_tests
from torch.testing._internal.distributed._shard.sharded_tensor import (
- ShardedTensorTestBase,
TEST_GPU_NUM,
+ ShardedTensorTestBase,
with_comms,
)
@@ -25,7 +29,6 @@ if TEST_WITH_DEV_DBG_ASAN:
)
sys.exit(0)
-
# a simple collection of embedding bag implementation
class CustomEmbeddingBagCollection(nn.Module):
def __init__(self, num_bags, num_embeddings_per_bag, num_dims):
@@ -35,8 +38,9 @@ class CustomEmbeddingBagCollection(nn.Module):
for i in range(num_bags):
self.embedding_bags[f"embedding_bag_{i}"] = nn.EmbeddingBag(
- num_embeddings_per_bag, num_dims, mode="sum"
- )
+ num_embeddings_per_bag,
+ num_dims,
+ mode="sum")
def forward(self, inputs):
outputs = []
@@ -44,7 +48,6 @@ class CustomEmbeddingBagCollection(nn.Module):
outputs.append(bag(inputs))
return torch.cat(outputs)
-
# a simple sharded version of EBC
class CustomShardedEBC(nn.Module):
def __init__(self, ebc, split_idx, specs):
@@ -59,19 +62,9 @@ class CustomShardedEBC(nn.Module):
for i in range(ebc.num_bags):
bag_key = f"embedding_bag_{i}"
if i < self.split_idx:
- shard_module(
- ebc,
- plan=ShardingPlan(
- plan={f"embedding_bags.{bag_key}.weight": row_spec}
- ),
- )
+ shard_module(ebc, plan=ShardingPlan(plan={f"embedding_bags.{bag_key}.weight": row_spec}))
else:
- shard_module(
- ebc,
- plan=ShardingPlan(
- plan={f"embedding_bags.{bag_key}.weight": col_spec}
- ),
- )
+ shard_module(ebc, plan=ShardingPlan(plan={f"embedding_bags.{bag_key}.weight": col_spec}))
self.embedding_bags[bag_key] = ebc.embedding_bags[bag_key]
@@ -85,16 +78,13 @@ class CustomSharder(Sharder):
def shard(self, ebc: nn.Module) -> nn.Module:
if not isinstance(ebc, CustomEmbeddingBagCollection):
- raise RuntimeError(
- "The custom sharder only supports CustomEmbeddingBagCollection"
- )
+ raise RuntimeError("The custom sharder only supports CustomEmbeddingBagCollection")
- return CustomShardedEBC(
- ebc, self.split_sharding_idx, (self.rowwise_spec, self.colwise_spec)
- )
+ return CustomShardedEBC(ebc, self.split_sharding_idx, (self.rowwise_spec, self.colwise_spec))
class TestCustomSharder(ShardedTensorTestBase):
+
@with_comms(init_rpc=False)
@skip_if_lt_x_gpu(TEST_GPU_NUM)
@requires_nccl()
@@ -109,14 +99,13 @@ class TestCustomSharder(ShardedTensorTestBase):
custom_sharder = CustomSharder(
devices=[f"rank:{i}/cuda:{i}" for i in range(TEST_GPU_NUM)],
- split_sharding_idx=TEST_GPU_NUM // 2,
+ split_sharding_idx=TEST_GPU_NUM // 2
)
sharding_plan = ShardingPlan(
plan={
"ebc": custom_sharder,
- }
- )
+ })
local_model = MyModule().cuda(self.rank)
sharded_model = copy.deepcopy(local_model)
@@ -128,14 +117,8 @@ class TestCustomSharder(ShardedTensorTestBase):
emb_bags = sharded_model.ebc.embedding_bags
self.assertTrue(isinstance(emb_bags["embedding_bag_0"].weight, ShardedTensor))
self.assertTrue(isinstance(emb_bags["embedding_bag_9"].weight, ShardedTensor))
- self.assertEqual(
- emb_bags["embedding_bag_0"].weight.sharding_spec(),
- custom_sharder.rowwise_spec,
- )
- self.assertEqual(
- emb_bags["embedding_bag_9"].weight.sharding_spec(),
- custom_sharder.colwise_spec,
- )
+ self.assertEqual(emb_bags["embedding_bag_0"].weight.sharding_spec(), custom_sharder.rowwise_spec)
+ self.assertEqual(emb_bags["embedding_bag_9"].weight.sharding_spec(), custom_sharder.colwise_spec)
# make sure we can run sharded computation and compare outputs
# with the local model version
@@ -151,14 +134,13 @@ class TestCustomSharder(ShardedTensorTestBase):
def test_custom_sharder_errors(self):
custom_sharder = CustomSharder(
devices=[f"rank:{i}/cuda:{i}" for i in range(TEST_GPU_NUM)],
- split_sharding_idx=TEST_GPU_NUM // 2,
+ split_sharding_idx=TEST_GPU_NUM // 2
)
sharding_plan = ShardingPlan(
plan={
"": custom_sharder,
- }
- )
+ })
sharded_model = CustomEmbeddingBagCollection(10, 10, 8).cuda(self.rank)
@@ -174,8 +156,7 @@ class TestCustomSharder(ShardedTensorTestBase):
plan={
"embedding_bags.embedding_bag_0.weight": spec,
"embedding_bags": custom_sharder,
- }
- )
+ })
with self.assertRaisesRegex(
RuntimeError, "should not conflict with the submodule tree"
diff --git a/test/distributed/_tools/test_memory_tracker.py b/test/distributed/_tools/test_memory_tracker.py
index 3523e51e36..90dded6797 100644
--- a/test/distributed/_tools/test_memory_tracker.py
+++ b/test/distributed/_tools/test_memory_tracker.py
@@ -1,14 +1,17 @@
# Owner(s): ["oncall: distributed"]
import os
-
-import unittest
+from torch.testing._internal.common_cuda import TEST_CUDA
+from torch.testing._internal.common_utils import (
+ TestCase,
+ run_tests,
+)
import torch
import torch.nn as nn
from torch.distributed._tools import MemoryTracker
-from torch.testing._internal.common_cuda import TEST_CUDA
-from torch.testing._internal.common_utils import run_tests, TestCase
+
+import unittest
class TestMemoryTracker(TestCase):
diff --git a/test/distributed/algorithms/ddp_comm_hooks/test_ddp_hooks.py b/test/distributed/algorithms/ddp_comm_hooks/test_ddp_hooks.py
index 80cb52a7e4..2d6a17bf8d 100644
--- a/test/distributed/algorithms/ddp_comm_hooks/test_ddp_hooks.py
+++ b/test/distributed/algorithms/ddp_comm_hooks/test_ddp_hooks.py
@@ -4,8 +4,8 @@ import os
import sys
import torch
-import torch.distributed as dist
from torch import nn
+import torch.distributed as dist
if not dist.is_available():
print("Distributed not available, skipping tests", file=sys.stderr)
@@ -21,13 +21,15 @@ from torch.testing._internal.common_distributed import (
requires_nccl,
skip_if_lt_x_gpu,
)
-from torch.testing._internal.common_utils import run_tests, TEST_WITH_DEV_DBG_ASAN
+from torch.testing._internal.common_utils import (
+ run_tests,
+ TEST_WITH_DEV_DBG_ASAN,
+)
if TEST_WITH_DEV_DBG_ASAN:
print("Multiprocessing spawn is not compatible with dev/dbg asan", file=sys.stderr)
sys.exit(0)
-
def gpus_for_rank(world_size):
visible_devices = list(range(torch.cuda.device_count()))
gpus_per_process = torch.cuda.device_count() // world_size
@@ -183,6 +185,7 @@ class DistributedDataParallelCommHookTest(MultiProcessTestCase):
torch.testing.assert_close(hook_grads, reference_grads, rtol=1e-5, atol=1e-4)
+
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_ddp_comm_hook_noop_hook(self):
@@ -217,7 +220,7 @@ class DistributedDataParallelCommHookTest(MultiProcessTestCase):
device_id = gpus_for_rank(self.world_size)[self.rank][0]
model = nn.Sequential(
nn.Linear(2, 4000, bias=False),
- *[nn.Linear(4000, 4000, bias=False) for _ in range(10)],
+ *[nn.Linear(4000, 4000, bias=False) for _ in range(10)]
)
gpu_model = DistributedDataParallel(
model.to(device_id),
diff --git a/test/distributed/algorithms/quantization/test_quantization.py b/test/distributed/algorithms/quantization/test_quantization.py
index daa8e9ffca..d0b6656ec9 100644
--- a/test/distributed/algorithms/quantization/test_quantization.py
+++ b/test/distributed/algorithms/quantization/test_quantization.py
@@ -1,26 +1,25 @@
# Owner(s): ["oncall: distributed"]
-import os
-import sys
-
import torch
+import os
import torch.cuda
+import sys
import torch.distributed as dist
import torch.distributed.algorithms._quantization.quantization as quant
from torch.distributed.algorithms._quantization.quantization import DQuantType
from torch.testing._internal.common_distributed import (
- init_multigpu_helper,
MultiProcessTestCase,
+ init_multigpu_helper,
requires_gloo,
- requires_nccl,
- skip_if_lt_x_gpu,
skip_if_rocm,
+ skip_if_lt_x_gpu,
+ requires_nccl,
)
from torch.testing._internal.common_utils import (
- NO_MULTIPROCESSING_SPAWN,
- run_tests,
skip_but_pass_in_sandcastle_if,
+ run_tests,
TEST_WITH_DEV_DBG_ASAN,
+ NO_MULTIPROCESSING_SPAWN,
)
torch.backends.cuda.matmul.allow_tf32 = False
diff --git a/test/distributed/algorithms/test_join.py b/test/distributed/algorithms/test_join.py
index 89a8e9e04a..77ac3de4fb 100644
--- a/test/distributed/algorithms/test_join.py
+++ b/test/distributed/algorithms/test_join.py
@@ -20,10 +20,7 @@ from torch.testing._internal.common_distributed import (
from torch.testing._internal.common_utils import run_tests, TEST_WITH_DEV_DBG_ASAN
if TEST_WITH_DEV_DBG_ASAN:
- print(
- "Skip dev-asan as torch + multiprocessing spawn have known issues",
- file=sys.stderr,
- )
+ print("Skip dev-asan as torch + multiprocessing spawn have known issues", file=sys.stderr)
sys.exit(0)
BACKEND = dist.Backend.NCCL if torch.cuda.is_available() else dist.Backend.GLOO
@@ -45,8 +42,12 @@ class AllReducerJoinHook(JoinHook):
iteration.
run_post_hook (bool): a flag enabling the post-hook logic.
"""
-
- def __init__(self, allreducer, num_allreduces, run_post_hook):
+ def __init__(
+ self,
+ allreducer,
+ num_allreduces,
+ run_post_hook
+ ):
self.allreducer = allreducer
self.num_allreduces = num_allreduces
self.run_post_hook = run_post_hook
@@ -72,9 +73,7 @@ class AllReducerJoinHook(JoinHook):
common_rank = self.allreducer.find_common_rank(rank, is_last_joiner)
device = self.allreducer.device
if rank == common_rank:
- self.allreducer.post_hook_tensor = torch.tensor(
- [AFTER_CONSTANT], device=device
- )
+ self.allreducer.post_hook_tensor = torch.tensor([AFTER_CONSTANT], device=device)
dist.broadcast(self.allreducer.post_hook_tensor, src=common_rank)
@@ -83,7 +82,6 @@ class AllReducer(Joinable):
Example :class:`Joinable` that performs some number of all-reduces as its
per-iteration collective communication.
"""
-
def __init__(self, device, process_group):
super().__init__()
self.device = device
@@ -111,7 +109,11 @@ class AllReducer(Joinable):
"""
num_allreduces = kwargs.get("num_allreduces", 1)
run_post_hook = kwargs.get("run_post_hooks", False)
- return AllReducerJoinHook(self, num_allreduces, run_post_hook)
+ return AllReducerJoinHook(
+ self,
+ num_allreduces,
+ run_post_hook
+ )
@property
def join_device(self) -> torch.device:
@@ -125,16 +127,17 @@ class AllReducer(Joinable):
r"""
Returns the max rank of the ones to consider over the process group.
"""
- common_rank = torch.tensor([rank if to_consider else -1], device=self.device)
+ common_rank = torch.tensor(
+ [rank if to_consider else -1],
+ device=self.device
+ )
dist.all_reduce(common_rank, op=dist.ReduceOp.MAX, group=self.process_group)
common_rank = common_rank.item()
assert common_rank >= 0
return common_rank
-
class TestJoin(MultiProcessTestCase):
r"""Test cases for the generic join context."""
-
def setUp(self):
super().setUp()
os.environ["WORLD_SIZE"] = str(self.world_size)
@@ -143,11 +146,8 @@ class TestJoin(MultiProcessTestCase):
@property
def device(self):
- return (
- torch.device(self.rank)
- if BACKEND == dist.Backend.NCCL
+ return torch.device(self.rank) if BACKEND == dist.Backend.NCCL \
else torch.device("cpu")
- )
@property
def world_size(self):
@@ -170,7 +170,10 @@ class TestJoin(MultiProcessTestCase):
def dist_init(self, rank, world_size, backend=BACKEND):
store = dist.FileStore(self.file_name, world_size)
return dist.init_process_group(
- backend=backend, store=store, rank=rank, world_size=world_size
+ backend=backend,
+ store=store,
+ rank=rank,
+ world_size=world_size
)
def construct_uneven_inputs(self, base, offset, device=None):
@@ -228,35 +231,32 @@ class TestJoin(MultiProcessTestCase):
self.dist_init(self.rank, self.world_size)
allreducers = [
- AllReducer(self.device, self.process_group) for _ in range(num_joinables)
+ AllReducer(self.device, self.process_group)
+ for _ in range(num_joinables)
]
for allreducer in allreducers:
self.assertEqual(allreducer.post_hook_tensor.item(), BEFORE_CONSTANT)
- inputs = (
- self.construct_uneven_inputs(self.base_num_inputs, self.offset)
- if uneven_inputs
+ inputs = self.construct_uneven_inputs(self.base_num_inputs, self.offset) \
+ if uneven_inputs \
else self.construct_even_inputs(self.base_num_inputs)
- )
allreduce_total = 0
# Expect a `RuntimeError` if `throw_on_early_termination=True`
# Rank 0 exhausts its inputs first
- expected_msg = (
- "Rank 0 exhausted all inputs."
- if self.rank == 0
- else "Detected at least one rank that exhausted inputs. "
+ expected_msg = "Rank 0 exhausted all inputs." if self.rank == 0 \
+ else "Detected at least one rank that exhausted inputs. " \
"Throwing across all ranks."
- )
with self.assertRaisesRegex(
- RuntimeError, expected_msg
+ RuntimeError,
+ expected_msg
) if throw_on_early_termination else contextlib.nullcontext():
with Join(
allreducers,
enable=enable,
throw_on_early_termination=throw_on_early_termination,
num_allreduces=num_allreduces,
- run_post_hooks=run_post_hooks,
+ run_post_hooks=run_post_hooks
):
for _ in inputs:
for allreducer in allreducers:
@@ -275,7 +275,9 @@ class TestJoin(MultiProcessTestCase):
for allreducer in allreducers:
self.assertEqual(allreducer.post_hook_tensor.item(), AFTER_CONSTANT)
- @require_n_gpus_for_nccl_backend(WORLD_SIZE, BACKEND)
+ @require_n_gpus_for_nccl_backend(
+ WORLD_SIZE, BACKEND
+ )
def test_single_joinable_main_hooks(self):
r"""Tests the main hooks of a single :class:`Joinable`."""
num_joinables = 1
@@ -296,10 +298,12 @@ class TestJoin(MultiProcessTestCase):
throw_on_early_termination=False,
num_allreduces=num_allreduces,
run_post_hooks=run_post_hooks,
- expected_total=expected_total,
+ expected_total=expected_total
)
- @require_n_gpus_for_nccl_backend(WORLD_SIZE, BACKEND)
+ @require_n_gpus_for_nccl_backend(
+ WORLD_SIZE, BACKEND
+ )
def test_single_joinable_post_hooks(self):
r"""Tests the post-hooks of a single :class:`Joinable`."""
num_joinables = 1
@@ -313,10 +317,12 @@ class TestJoin(MultiProcessTestCase):
throw_on_early_termination=False,
num_allreduces=num_allreduces,
run_post_hooks=run_post_hooks,
- expected_total=None,
+ expected_total=None
)
- @require_n_gpus_for_nccl_backend(WORLD_SIZE, BACKEND)
+ @require_n_gpus_for_nccl_backend(
+ WORLD_SIZE, BACKEND
+ )
def test_single_joinable(self):
r"""
Tests the main hooks and post-hooks of a single :class:`Joinable`
@@ -341,10 +347,12 @@ class TestJoin(MultiProcessTestCase):
throw_on_early_termination=False,
num_allreduces=num_allreduces,
run_post_hooks=run_post_hooks,
- expected_total=expected_total,
+ expected_total=expected_total
)
- @require_n_gpus_for_nccl_backend(WORLD_SIZE, BACKEND)
+ @require_n_gpus_for_nccl_backend(
+ WORLD_SIZE, BACKEND
+ )
def test_multiple_joinables(self):
r"""
Tests the main hooks and post-hooks of multiple :class:`Joinable` s
@@ -370,10 +378,12 @@ class TestJoin(MultiProcessTestCase):
throw_on_early_termination=False,
num_allreduces=num_allreduces,
run_post_hooks=run_post_hooks,
- expected_total=expected_total,
+ expected_total=expected_total
)
- @require_n_gpus_for_nccl_backend(WORLD_SIZE, BACKEND)
+ @require_n_gpus_for_nccl_backend(
+ WORLD_SIZE, BACKEND
+ )
def test_single_joinable_disable(self):
r"""Tests ``enable=False`` for a single :class:`Joinable`."""
num_joinables = 1
@@ -391,10 +401,12 @@ class TestJoin(MultiProcessTestCase):
throw_on_early_termination=False,
num_allreduces=num_allreduces,
run_post_hooks=run_post_hooks,
- expected_total=expected_total,
+ expected_total=expected_total
)
- @require_n_gpus_for_nccl_backend(WORLD_SIZE, BACKEND)
+ @require_n_gpus_for_nccl_backend(
+ WORLD_SIZE, BACKEND
+ )
def test_multiple_joinable_disable(self):
r"""
Tests ``enable=False`` for multiple :class:`Joinable` s.
@@ -417,10 +429,12 @@ class TestJoin(MultiProcessTestCase):
throw_on_early_termination=False,
num_allreduces=num_allreduces,
run_post_hooks=run_post_hooks,
- expected_total=expected_total,
+ expected_total=expected_total
)
- @require_n_gpus_for_nccl_backend(WORLD_SIZE, BACKEND)
+ @require_n_gpus_for_nccl_backend(
+ WORLD_SIZE, BACKEND
+ )
def test_single_joinable_throw(self):
r"""
Tests ``throw_on_early_termination=True`` for a single
@@ -438,10 +452,12 @@ class TestJoin(MultiProcessTestCase):
throw_on_early_termination=throw_on_early_termination,
num_allreduces=num_allreduces,
run_post_hooks=run_post_hooks,
- expected_total=None,
+ expected_total=None
)
- @require_n_gpus_for_nccl_backend(WORLD_SIZE, BACKEND)
+ @require_n_gpus_for_nccl_backend(
+ WORLD_SIZE, BACKEND
+ )
def test_multiple_joinables_throw(self):
r"""
Tests ``throw_on_early_termination=True`` for multiple
@@ -462,10 +478,12 @@ class TestJoin(MultiProcessTestCase):
throw_on_early_termination=throw_on_early_termination,
num_allreduces=num_allreduces,
run_post_hooks=run_post_hooks,
- expected_total=None,
+ expected_total=None
)
- @require_n_gpus_for_nccl_backend(WORLD_SIZE, BACKEND)
+ @require_n_gpus_for_nccl_backend(
+ WORLD_SIZE, BACKEND
+ )
def test_join_kwargs(self):
r"""
Tests passing keyword arguments to the context manager.
@@ -487,9 +505,8 @@ class TestJoin(MultiProcessTestCase):
throw_on_early_termination=False,
num_allreduces=num_allreduces,
run_post_hooks=run_post_hooks,
- expected_total=expected_total,
+ expected_total=expected_total
)
-
if __name__ == "__main__":
run_tests()
diff --git a/test/distributed/elastic/agent/server/test/local_elastic_agent_test.py b/test/distributed/elastic/agent/server/test/local_elastic_agent_test.py
index 2d64b90515..038f4cdcec 100644
--- a/test/distributed/elastic/agent/server/test/local_elastic_agent_test.py
+++ b/test/distributed/elastic/agent/server/test/local_elastic_agent_test.py
@@ -413,7 +413,8 @@ class LocalElasticAgentTest(unittest.TestCase):
"max_restarts": 0,
"exit_barrier_timeout": exit_barrier_timeout,
"is_host": node_idx == 0,
- "log_line_prefix_template": log_line_prefix_template,
+ "log_line_prefix_template": log_line_prefix_template
+
}
p = mp.Process(target=self.run_agent, kwargs=run_agent_args)
procs.append(p)
@@ -453,21 +454,15 @@ class LocalElasticAgentTest(unittest.TestCase):
self.assertIsInstance(return_value, torch.Tensor)
self.assertEqual((100, 100), return_value.shape)
- @skip_but_pass_in_sandcastle_if(
- TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
- )
+ @skip_but_pass_in_sandcastle_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_dummy_compute_c10d(self):
self.run_test_with_backend(backend="c10d", test_to_run=self.dummy_compute)
- @skip_but_pass_in_sandcastle_if(
- TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
- )
+ @skip_but_pass_in_sandcastle_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_dummy_compute_etcd(self):
self.run_test_with_backend(backend="etcd", test_to_run=self.dummy_compute)
- @skip_but_pass_in_sandcastle_if(
- TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
- )
+ @skip_but_pass_in_sandcastle_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_dummy_compute_etcd_v2(self):
self.run_test_with_backend(backend="etcd-v2", test_to_run=self.dummy_compute)
@@ -477,21 +472,15 @@ class LocalElasticAgentTest(unittest.TestCase):
self.assertIsNone(res.return_values[0])
self.assertIsNone(res.return_values[1])
- @skip_but_pass_in_sandcastle_if(
- TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
- )
+ @skip_but_pass_in_sandcastle_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_run_happy_function_c10d(self):
self.run_test_with_backend(backend="c10d", test_to_run=self.run_happy_function)
- @skip_but_pass_in_sandcastle_if(
- TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
- )
+ @skip_but_pass_in_sandcastle_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_run_happy_function_etcd(self):
self.run_test_with_backend(backend="etcd", test_to_run=self.run_happy_function)
- @skip_but_pass_in_sandcastle_if(
- TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
- )
+ @skip_but_pass_in_sandcastle_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_run_happy_function_etcd_v2(self):
self.run_test_with_backend(
backend="etcd-v2", test_to_run=self.run_happy_function
@@ -512,17 +501,13 @@ class LocalElasticAgentTest(unittest.TestCase):
self.assertFalse(res.is_failed())
self.assertIsNone(res.return_values[0])
- @skip_but_pass_in_sandcastle_if(
- TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
- )
+ @skip_but_pass_in_sandcastle_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_check_master_addr_port_override_etcd(self):
self.run_test_with_backend(
backend="etcd", test_to_run=self.check_master_addr_port_override
)
- @skip_but_pass_in_sandcastle_if(
- TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
- )
+ @skip_but_pass_in_sandcastle_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_check_master_addr_port_override_etcd_v2(self):
self.run_test_with_backend(
backend="etcd-v2", test_to_run=self.check_master_addr_port_override
@@ -563,11 +548,7 @@ class LocalElasticAgentTest(unittest.TestCase):
watchdog_file_path = "/tmp/watchdog_timer_" + str(uuid.uuid4())
os.environ[watchdog_env_name] = watchdog_file_path
# Run the agent
- node_conf = Conf(
- entrypoint=_check_local_watchdog_setup,
- local_world_size=1,
- args=(TORCHELASTIC_TIMER_FILE, True),
- )
+ node_conf = Conf(entrypoint=_check_local_watchdog_setup, local_world_size=1, args=(TORCHELASTIC_TIMER_FILE, True))
spec = self.get_worker_spec(node_conf, max_restarts=2)
agent = self.get_agent(spec, node_config=node_conf)
res = agent.run()
@@ -579,43 +560,31 @@ class LocalElasticAgentTest(unittest.TestCase):
if watchdog_env_name in os.environ:
del os.environ[watchdog_env_name]
# Run the agent
- node_conf = Conf(
- entrypoint=_check_local_watchdog_setup,
- local_world_size=1,
- args=(TORCHELASTIC_TIMER_FILE, False),
- )
+ node_conf = Conf(entrypoint=_check_local_watchdog_setup, local_world_size=1, args=(TORCHELASTIC_TIMER_FILE, False))
spec = self.get_worker_spec(node_conf, max_restarts=2)
agent = self.get_agent(spec, node_config=node_conf)
res = agent.run()
self.assertFalse(res.is_failed())
- @skip_but_pass_in_sandcastle_if(
- TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
- )
+ @skip_but_pass_in_sandcastle_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_run_agent_local_watchdog_setup_enabled_etcd(self):
self.run_test_with_backend(
backend="etcd", test_to_run=self.run_agent_local_watchdog_setup_enabled
)
- @skip_but_pass_in_sandcastle_if(
- TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
- )
+ @skip_but_pass_in_sandcastle_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_run_agent_local_watchdog_setup_enabled_c10d(self):
self.run_test_with_backend(
backend="c10d", test_to_run=self.run_agent_local_watchdog_setup_enabled
)
- @skip_but_pass_in_sandcastle_if(
- TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
- )
+ @skip_but_pass_in_sandcastle_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_run_agent_local_watchdog_setup_disabled_etcd(self):
self.run_test_with_backend(
backend="etcd", test_to_run=self.run_agent_local_watchdog_setup_disabled
)
- @skip_but_pass_in_sandcastle_if(
- TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
- )
+ @skip_but_pass_in_sandcastle_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_run_agent_local_watchdog_setup_disabled_c10d(self):
self.run_test_with_backend(
backend="c10d", test_to_run=self.run_agent_local_watchdog_setup_disabled
@@ -626,11 +595,7 @@ class LocalElasticAgentTest(unittest.TestCase):
healthcheck_port_env_name = TORCHELASTIC_HEALTH_CHECK_PORT
os.environ[healthcheck_port_env_name] = "12345"
# Run the agent
- node_conf = Conf(
- entrypoint=_check_local_watchdog_setup,
- local_world_size=1,
- args=(TORCHELASTIC_HEALTH_CHECK_PORT, True),
- )
+ node_conf = Conf(entrypoint=_check_local_watchdog_setup, local_world_size=1, args=(TORCHELASTIC_HEALTH_CHECK_PORT, True))
spec = self.get_worker_spec(node_conf, max_restarts=2)
agent = self.get_agent(spec, node_config=node_conf)
res = agent.run()
@@ -642,67 +607,49 @@ class LocalElasticAgentTest(unittest.TestCase):
if healthcheck_port_env_name in os.environ:
del os.environ[healthcheck_port_env_name]
# Run the agent
- node_conf = Conf(
- entrypoint=_check_local_watchdog_setup,
- local_world_size=1,
- args=(TORCHELASTIC_HEALTH_CHECK_PORT, False),
- )
+ node_conf = Conf(entrypoint=_check_local_watchdog_setup, local_world_size=1, args=(TORCHELASTIC_HEALTH_CHECK_PORT, False))
spec = self.get_worker_spec(node_conf, max_restarts=2)
agent = self.get_agent(spec, node_config=node_conf)
res = agent.run()
self.assertFalse(res.is_failed())
- @skip_but_pass_in_sandcastle_if(
- TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
- )
+ @skip_but_pass_in_sandcastle_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_run_agent_healthcheck_setup_enabled_etcd(self):
self.run_test_with_backend(
backend="etcd", test_to_run=self.run_agent_healthcheck_setup_enabled
)
- @skip_but_pass_in_sandcastle_if(
- TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
- )
+ @skip_but_pass_in_sandcastle_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_run_agent_healthcheck_setup_enabled_c10d(self):
self.run_test_with_backend(
backend="c10d", test_to_run=self.run_agent_healthcheck_setup_enabled
)
- @skip_but_pass_in_sandcastle_if(
- TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
- )
+ @skip_but_pass_in_sandcastle_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_run_agent_healthcheck_setup_disabled_etcd(self):
self.run_test_with_backend(
backend="etcd", test_to_run=self.run_agent_healthcheck_setup_disabled
)
- @skip_but_pass_in_sandcastle_if(
- TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
- )
+ @skip_but_pass_in_sandcastle_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_run_agent_healthcheck_setup_disabled_c10d(self):
self.run_test_with_backend(
backend="c10d", test_to_run=self.run_agent_healthcheck_setup_disabled
)
- @skip_but_pass_in_sandcastle_if(
- TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
- )
+ @skip_but_pass_in_sandcastle_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_run_check_env_function_etcd(self):
self.run_test_with_backend(
backend="etcd", test_to_run=self.run_check_env_function
)
- @skip_but_pass_in_sandcastle_if(
- TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
- )
+ @skip_but_pass_in_sandcastle_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_run_check_nccl_async_error_handling_env_c10d(self):
self.run_test_with_backend(
backend="c10d", test_to_run=self.run_check_nccl_async_error_handling_env
)
- @skip_but_pass_in_sandcastle_if(
- TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
- )
+ @skip_but_pass_in_sandcastle_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_run_check_nccl_async_error_handling_env_default_c10d(self):
self.run_test_with_backend(
backend="c10d",
@@ -715,25 +662,19 @@ class LocalElasticAgentTest(unittest.TestCase):
self.assertEqual("foo", res.return_values[0])
self.assertEqual("foo", res.return_values[1])
- @skip_but_pass_in_sandcastle_if(
- TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
- )
+ @skip_but_pass_in_sandcastle_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_run_function_with_return_value_c10d(self):
self.run_test_with_backend(
backend="c10d", test_to_run=self.run_function_with_return_value
)
- @skip_but_pass_in_sandcastle_if(
- TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
- )
+ @skip_but_pass_in_sandcastle_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_run_function_with_return_value_etcd(self):
self.run_test_with_backend(
backend="etcd", test_to_run=self.run_function_with_return_value
)
- @skip_but_pass_in_sandcastle_if(
- TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
- )
+ @skip_but_pass_in_sandcastle_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_run_function_with_return_value_etcd_v2(self):
self.run_test_with_backend(
backend="etcd-v2", test_to_run=self.run_function_with_return_value
@@ -744,27 +685,19 @@ class LocalElasticAgentTest(unittest.TestCase):
self.assertFalse(res.is_failed())
# _dist_sum internally checks that the sum computed is valid
- @skip_but_pass_in_sandcastle_if(
- TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
- )
+ @skip_but_pass_in_sandcastle_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_simple_dist_sum_c10d(self):
self.run_test_with_backend(backend="c10d", test_to_run=self.simple_dist_sum)
- @skip_but_pass_in_sandcastle_if(
- TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
- )
+ @skip_but_pass_in_sandcastle_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_simple_dist_sum_etcd(self):
self.run_test_with_backend(backend="etcd", test_to_run=self.simple_dist_sum)
- @skip_but_pass_in_sandcastle_if(
- TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
- )
+ @skip_but_pass_in_sandcastle_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_simple_dist_sum_etcd_v2(self):
self.run_test_with_backend(backend="etcd-v2", test_to_run=self.simple_dist_sum)
- def run_distributed_sum_homogeneous(
- self, log_line_prefix_template: Optional[str] = None
- ):
+ def run_distributed_sum_homogeneous(self, log_line_prefix_template: Optional[str] = None):
node_configs = [
Conf(role="sum", entrypoint=_dist_sum, local_world_size=4, tee=Std.ALL),
Conf(role="sum", entrypoint=_dist_sum, local_world_size=4, tee=Std.ALL),
@@ -773,9 +706,7 @@ class LocalElasticAgentTest(unittest.TestCase):
# due to getting stuck on the _dist_sum in waiting for TCPStore workers
# to join the cluster
# TODO(aivanou): t83447589 come up with the proper fix
- res = self.run_job(
- node_configs, log_line_prefix_template=log_line_prefix_template
- )
+ res = self.run_job(node_configs, log_line_prefix_template=log_line_prefix_template)
self.assertEqual(2, len(res["sum"]))
ranks = set()
for run_results in res["sum"]:
@@ -796,11 +727,10 @@ class LocalElasticAgentTest(unittest.TestCase):
log_line_prefix_template = "[${role_name}-${local_rank}:${rank}]:"
self.run_test_with_backend(
backend="c10d",
- test_to_run=lambda: self.run_distributed_sum_homogeneous(
- log_line_prefix_template
- ),
+ test_to_run=lambda: self.run_distributed_sum_homogeneous(log_line_prefix_template)
)
+
@unittest.skipIf(
TEST_WITH_DEV_DBG_ASAN or TEST_WITH_TSAN,
"test incompatible with dev/dbg asan or tsan",
@@ -840,25 +770,19 @@ class LocalElasticAgentTest(unittest.TestCase):
ranks.update(run_results.return_values.keys())
self.assertSetEqual(set(range(1 + 2 + 3)), ranks)
- @skip_but_pass_in_sandcastle_if(
- TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
- )
+ @skip_but_pass_in_sandcastle_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_run_distributed_sum_heterogeneous_c10d(self):
self.run_test_with_backend(
backend="c10d", test_to_run=self.run_distributed_sum_heterogeneous
)
- @skip_but_pass_in_sandcastle_if(
- TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
- )
+ @skip_but_pass_in_sandcastle_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_run_distributed_sum_heterogeneous_etcd(self):
self.run_test_with_backend(
backend="etcd", test_to_run=self.run_distributed_sum_heterogeneous
)
- @skip_but_pass_in_sandcastle_if(
- TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
- )
+ @skip_but_pass_in_sandcastle_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_run_distributed_sum_heterogeneous_etcd_v2(self):
self.run_test_with_backend(
backend="etcd-v2", test_to_run=self.run_distributed_sum_heterogeneous
@@ -885,21 +809,15 @@ class LocalElasticAgentTest(unittest.TestCase):
self.assertEqual(data["message"], failure_data["message"])
self.assertEqual(int(data["extraInfo"]["timestamp"]), failure.timestamp)
- @skip_but_pass_in_sandcastle_if(
- TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
- )
+ @skip_but_pass_in_sandcastle_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_run_sad_function_c10d(self):
self.run_test_with_backend(backend="c10d", test_to_run=self.run_sad_function)
- @skip_but_pass_in_sandcastle_if(
- TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
- )
+ @skip_but_pass_in_sandcastle_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_run_sad_function_etcd(self):
self.run_test_with_backend(backend="etcd", test_to_run=self.run_sad_function)
- @skip_but_pass_in_sandcastle_if(
- TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
- )
+ @skip_but_pass_in_sandcastle_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_run_sad_function_etcd_v2(self):
self.run_test_with_backend(backend="etcd-v2", test_to_run=self.run_sad_function)
@@ -916,25 +834,19 @@ class LocalElasticAgentTest(unittest.TestCase):
self.assertEqual(WorkerState.FAILED, agent.get_worker_group().state)
self.assertTrue(agent._total_execution_time > 0)
- @skip_but_pass_in_sandcastle_if(
- TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
- )
+ @skip_but_pass_in_sandcastle_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_run_bipolar_function_c10d(self):
self.run_test_with_backend(
backend="c10d", test_to_run=self.run_bipolar_function
)
- @skip_but_pass_in_sandcastle_if(
- TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
- )
+ @skip_but_pass_in_sandcastle_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_run_bipolar_function_etcd(self):
self.run_test_with_backend(
backend="etcd", test_to_run=self.run_bipolar_function
)
- @skip_but_pass_in_sandcastle_if(
- TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
- )
+ @skip_but_pass_in_sandcastle_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_run_bipolar_function_etcd_v2(self):
self.run_test_with_backend(
backend="etcd-v2", test_to_run=self.run_bipolar_function
@@ -1390,21 +1302,15 @@ class LocalElasticAgentTest(unittest.TestCase):
self.assertFalse(res.is_failed())
barrier_mock.assert_called_once()
- @skip_but_pass_in_sandcastle_if(
- TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
- )
+ @skip_but_pass_in_sandcastle_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_barrier_failed_c10d(self):
self.run_test_with_backend(backend="c10d", test_to_run=self.barrier_failed)
- @skip_but_pass_in_sandcastle_if(
- TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
- )
+ @skip_but_pass_in_sandcastle_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_barrier_failed_etcd(self):
self.run_test_with_backend(backend="etcd", test_to_run=self.barrier_failed)
- @skip_but_pass_in_sandcastle_if(
- TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
- )
+ @skip_but_pass_in_sandcastle_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_barrier_failed_etcd_v2(self):
self.run_test_with_backend(backend="etcd-v2", test_to_run=self.barrier_failed)
@@ -1423,20 +1329,14 @@ class LocalElasticAgentTest(unittest.TestCase):
agent.run("worker")
pcontext_mock.close.assert_called_once()
- @skip_but_pass_in_sandcastle_if(
- TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
- )
+ @skip_but_pass_in_sandcastle_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_shutdown_called_c10d(self):
self.run_test_with_backend(backend="c10d", test_to_run=self.shutdown_called)
- @skip_but_pass_in_sandcastle_if(
- TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
- )
+ @skip_but_pass_in_sandcastle_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_shutdown_called_etcd(self):
self.run_test_with_backend(backend="etcd", test_to_run=self.shutdown_called)
- @skip_but_pass_in_sandcastle_if(
- TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
- )
+ @skip_but_pass_in_sandcastle_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_shutdown_called_etcd_v2(self):
self.run_test_with_backend(backend="etcd-v2", test_to_run=self.shutdown_called)
diff --git a/test/distributed/elastic/events/lib_test.py b/test/distributed/elastic/events/lib_test.py
index 63836c48c1..3a5fb694bf 100644
--- a/test/distributed/elastic/events/lib_test.py
+++ b/test/distributed/elastic/events/lib_test.py
@@ -13,12 +13,12 @@ from dataclasses import asdict
from unittest.mock import patch
from torch.distributed.elastic.events import (
- _get_or_create_logger,
- construct_and_record_rdzv_event,
Event,
EventSource,
NodeState,
RdzvEvent,
+ _get_or_create_logger,
+ construct_and_record_rdzv_event,
)
from torch.testing._internal.common_utils import run_tests, TestCase
@@ -58,7 +58,6 @@ class EventLibTest(TestCase):
deser_event = Event.deserialize(json_event)
self.assert_event(event, deser_event)
-
class RdzvEventLibTest(TestCase):
@patch("torch.distributed.elastic.events.record_rdzv_event")
@patch("torch.distributed.elastic.events.get_logging_handler")
@@ -73,9 +72,7 @@ class RdzvEventLibTest(TestCase):
@patch("torch.distributed.elastic.events.record_rdzv_event")
@patch("torch.distributed.elastic.events.get_logging_handler")
- def test_construct_and_record_rdzv_event_does_not_run_if_invalid_dest(
- self, get_mock, record_mock
- ):
+ def test_construct_and_record_rdzv_event_does_not_run_if_invalid_dest(self, get_mock, record_mock):
get_mock.return_value = logging.NullHandler()
construct_and_record_rdzv_event(
run_id="test_run_id",
@@ -123,6 +120,7 @@ class RdzvEventLibTest(TestCase):
self.assertEqual(event.local_id, 4)
self.assertEqual(event.error_trace, "test_error_trace")
+
def test_rdzv_event_deserialize(self):
event = self.get_test_rdzv_event()
json_event = event.serialize()
diff --git a/test/distributed/elastic/metrics/api_test.py b/test/distributed/elastic/metrics/api_test.py
index e548cfb6f4..279a1b951f 100644
--- a/test/distributed/elastic/metrics/api_test.py
+++ b/test/distributed/elastic/metrics/api_test.py
@@ -10,10 +10,10 @@ import abc
import unittest.mock as mock
from torch.distributed.elastic.metrics.api import (
- _get_metric_name,
MetricData,
MetricHandler,
MetricStream,
+ _get_metric_name,
prof,
)
from torch.testing._internal.common_utils import run_tests, TestCase
diff --git a/test/distributed/elastic/multiprocessing/api_test.py b/test/distributed/elastic/multiprocessing/api_test.py
index 9658ed087a..6851db05c0 100644
--- a/test/distributed/elastic/multiprocessing/api_test.py
+++ b/test/distributed/elastic/multiprocessing/api_test.py
@@ -22,13 +22,13 @@ import torch
import torch.multiprocessing as mp
from torch.distributed.elastic.multiprocessing import ProcessFailure, start_processes
from torch.distributed.elastic.multiprocessing.api import (
- _validate_full_rank,
- _wrap,
DefaultLogsSpecs,
MultiprocessContext,
RunProcsResult,
SignalException,
Std,
+ _validate_full_rank,
+ _wrap,
to_map,
)
from torch.distributed.elastic.multiprocessing.errors import ErrorHandler
@@ -37,13 +37,13 @@ from torch.testing._internal.common_utils import (
IS_MACOS,
IS_WINDOWS,
NO_MULTIPROCESSING_SPAWN,
- run_tests,
- skip_but_pass_in_sandcastle_if,
- skip_if_pytest,
TEST_WITH_ASAN,
TEST_WITH_DEV_DBG_ASAN,
TEST_WITH_TSAN,
TestCase,
+ run_tests,
+ skip_but_pass_in_sandcastle_if,
+ skip_if_pytest,
)
@@ -67,6 +67,7 @@ class RunProcResultsTest(TestCase):
self.assertTrue(pr_fail.is_failed())
def test_get_failures(self):
+
error_file0 = os.path.join(self.test_dir, "error0.json")
error_file1 = os.path.join(self.test_dir, "error1.json")
eh = ErrorHandler()
@@ -275,7 +276,7 @@ if not (TEST_WITH_DEV_DBG_ASAN or IS_WINDOWS or IS_MACOS):
not_a_dir.name: NotADirectoryError,
}
- for log_dir, expected_error in cases.items():
+ for (log_dir, expected_error) in cases.items():
with self.subTest(log_dir=log_dir, expected_error=expected_error):
with self.assertRaises(expected_error):
pc = None
@@ -291,6 +292,7 @@ if not (TEST_WITH_DEV_DBG_ASAN or IS_WINDOWS or IS_MACOS):
if pc:
pc.close()
+
def test_args_env_len_mismatch(self):
cases = [
# 1 x args; 2 x envs
@@ -394,9 +396,7 @@ if not (TEST_WITH_DEV_DBG_ASAN or IS_WINDOWS or IS_MACOS):
results = pc.wait(period=0.1)
self.assertEqual({0: None, 1: None}, results.return_values)
- @skip_but_pass_in_sandcastle_if(
- TEST_WITH_DEV_DBG_ASAN, "tests incompatible with asan"
- )
+ @skip_but_pass_in_sandcastle_if(TEST_WITH_DEV_DBG_ASAN, "tests incompatible with asan")
def test_function_large_ret_val(self):
# python multiprocessing.queue module uses pipes and actually PipedQueues
# This means that if a single object is greater than a pipe size
@@ -435,8 +435,7 @@ if not (TEST_WITH_DEV_DBG_ASAN or IS_WINDOWS or IS_MACOS):
args={0: ("hello", RAISE), 1: ("world",)},
envs={
0: {"TORCHELASTIC_RUN_ID": "run_id"},
- 1: {"TORCHELASTIC_RUN_ID": "run_id"},
- },
+ 1: {"TORCHELASTIC_RUN_ID": "run_id"}},
logs_specs=DefaultLogsSpecs(log_dir=log_dir),
start_method=start_method,
)
@@ -454,9 +453,7 @@ if not (TEST_WITH_DEV_DBG_ASAN or IS_WINDOWS or IS_MACOS):
self.assertEqual(1, failure.exitcode)
self.assertEqual("<N/A>", failure.signal_name())
self.assertEqual(pc.pids()[0], failure.pid)
- self.assertTrue(
- error_file.startswith(os.path.join(log_dir, "run_id_"))
- )
+ self.assertTrue(error_file.startswith(os.path.join(log_dir, "run_id_")))
self.assertTrue(error_file.endswith("attempt_0/0/error.json"))
self.assertEqual(
int(error_file_data["message"]["extraInfo"]["timestamp"]),
@@ -544,7 +541,9 @@ if not (TEST_WITH_DEV_DBG_ASAN or IS_WINDOWS or IS_MACOS):
args={0: (0, 1)},
envs={0: {}},
logs_specs=DefaultLogsSpecs(
- log_dir=self.log_dir(), redirects=Std.ALL, tee=Std.ALL
+ log_dir=self.log_dir(),
+ redirects=Std.ALL,
+ tee=Std.ALL
),
start_method="spawn",
)
diff --git a/test/distributed/elastic/multiprocessing/bin/echo3.py b/test/distributed/elastic/multiprocessing/bin/echo3.py
index ebad725468..b07f4714b2 100755
--- a/test/distributed/elastic/multiprocessing/bin/echo3.py
+++ b/test/distributed/elastic/multiprocessing/bin/echo3.py
@@ -24,4 +24,5 @@ if __name__ == "__main__":
if args.segfault:
ctypes.string_at(0)
else:
+
print(f"{args.msg} from {rank}")
diff --git a/test/distributed/elastic/multiprocessing/tail_log_test.py b/test/distributed/elastic/multiprocessing/tail_log_test.py
index 6ead06dbe0..2f4a4cc87f 100644
--- a/test/distributed/elastic/multiprocessing/tail_log_test.py
+++ b/test/distributed/elastic/multiprocessing/tail_log_test.py
@@ -53,9 +53,7 @@ class TailLogTest(unittest.TestCase):
}
dst = io.StringIO()
- tail = TailLog(
- name="writer", log_files=log_files, dst=dst, interval_sec=interval_sec
- ).start()
+ tail = TailLog(name="writer", log_files=log_files, dst=dst, interval_sec=interval_sec).start()
# sleep here is intentional to ensure that the log tail
# can gracefully handle and wait for non-existent log files
time.sleep(interval_sec * 10)
@@ -132,6 +130,7 @@ class TailLogTest(unittest.TestCase):
self.assertIn(f"[worker{i}][{i}]", headers)
self.assertTrue(tail.stopped())
+
def test_tail_no_files(self):
"""
Ensures that the log tail can gracefully handle no log files
diff --git a/test/distributed/elastic/rendezvous/api_test.py b/test/distributed/elastic/rendezvous/api_test.py
index b9287546b3..40567857df 100644
--- a/test/distributed/elastic/rendezvous/api_test.py
+++ b/test/distributed/elastic/rendezvous/api_test.py
@@ -6,7 +6,7 @@
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
-from typing import Any, cast, Dict, SupportsInt, Tuple
+from typing import Any, Dict, SupportsInt, Tuple, cast
from unittest import TestCase
from torch.distributed import Store
@@ -170,9 +170,7 @@ class RendezvousParametersTest(TestCase):
params = self._create_params()
- self.assertEqual(
- params.get_as_int("dummy_param"), int(cast(SupportsInt, value))
- )
+ self.assertEqual(params.get_as_int("dummy_param"), int(cast(SupportsInt, value)))
def test_get_as_int_raises_error_if_value_is_invalid(self) -> None:
for value in ["a", "0a", "3b", "abc"]:
@@ -235,9 +233,7 @@ class RendezvousHandlerRegistryTest(TestCase):
self._registry.register("dummy_backend", self._create_handler)
self._registry.register("dummy_backend", self._create_handler)
- def test_register_raises_error_if_called_twice_with_different_creators(
- self,
- ) -> None:
+ def test_register_raises_error_if_called_twice_with_different_creators(self) -> None:
self._registry.register("dummy_backend", self._create_handler)
other_create_handler = lambda p: _DummyRendezvousHandler(p) # noqa: E731
diff --git a/test/distributed/elastic/rendezvous/c10d_rendezvous_backend_test.py b/test/distributed/elastic/rendezvous/c10d_rendezvous_backend_test.py
index 5ebeb00b2f..e31b0f9439 100644
--- a/test/distributed/elastic/rendezvous/c10d_rendezvous_backend_test.py
+++ b/test/distributed/elastic/rendezvous/c10d_rendezvous_backend_test.py
@@ -11,23 +11,22 @@ import tempfile
from base64 import b64encode
from datetime import timedelta
-from typing import Callable, cast, ClassVar
-from unittest import mock, TestCase
+from typing import ClassVar, cast, Callable
+from unittest import TestCase, mock
-from rendezvous_backend_test import RendezvousBackendTestMixin
-
-from torch.distributed import FileStore, TCPStore
+from torch.distributed import TCPStore, FileStore
from torch.distributed.elastic.rendezvous import (
RendezvousConnectionError,
- RendezvousError,
RendezvousParameters,
-)
+ RendezvousError)
from torch.distributed.elastic.rendezvous.c10d_rendezvous_backend import (
C10dRendezvousBackend,
create_backend,
)
+from rendezvous_backend_test import RendezvousBackendTestMixin
+
class TCPStoreBackendTest(TestCase, RendezvousBackendTestMixin):
_store: ClassVar[TCPStore]
@@ -45,7 +44,6 @@ class TCPStoreBackendTest(TestCase, RendezvousBackendTestMixin):
def _corrupt_state(self) -> None:
self._store.set("torch.rendezvous.dummy_run_id", "non_base64")
-
class FileStoreBackendTest(TestCase, RendezvousBackendTestMixin):
_store: ClassVar[FileStore]
@@ -104,6 +102,7 @@ class CreateBackendTest(TestCase):
def tearDown(self) -> None:
os.remove(self._expected_endpoint_file)
+
def _run_test_with_store(self, store_type: str, test_to_run: Callable):
"""
Use this function to specify the store type to use in a test. If
@@ -125,10 +124,10 @@ class CreateBackendTest(TestCase):
typecast_store = cast(self._expected_store_type, store)
self.assertEqual(typecast_store.timeout, self._expected_read_timeout) # type: ignore[attr-defined]
- if self._expected_store_type == TCPStore:
+ if (self._expected_store_type == TCPStore):
self.assertEqual(typecast_store.host, self._expected_endpoint_host) # type: ignore[attr-defined]
self.assertEqual(typecast_store.port, self._expected_endpoint_port) # type: ignore[attr-defined]
- if self._expected_store_type == FileStore:
+ if (self._expected_store_type == FileStore):
if self._params.endpoint:
self.assertEqual(typecast_store.path, self._expected_endpoint_file) # type: ignore[attr-defined]
else:
@@ -143,9 +142,7 @@ class CreateBackendTest(TestCase):
def test_create_backend_returns_backend(self) -> None:
for store_type in ["tcp", "file"]:
with self.subTest(store_type=store_type):
- self._run_test_with_store(
- store_type, self._assert_create_backend_returns_backend
- )
+ self._run_test_with_store(store_type, self._assert_create_backend_returns_backend)
def test_create_backend_returns_backend_if_is_host_is_false(self) -> None:
store = TCPStore( # type: ignore[call-arg] # noqa: F841
@@ -172,36 +169,28 @@ class CreateBackendTest(TestCase):
self._assert_create_backend_returns_backend()
- def test_create_backend_returns_backend_if_endpoint_port_is_not_specified(
- self,
- ) -> None:
+ def test_create_backend_returns_backend_if_endpoint_port_is_not_specified(self) -> None:
self._params.endpoint = self._expected_endpoint_host
self._expected_endpoint_port = 29400
self._assert_create_backend_returns_backend()
- def test_create_backend_returns_backend_if_endpoint_file_is_not_specified(
- self,
- ) -> None:
+ def test_create_backend_returns_backend_if_endpoint_file_is_not_specified(self) -> None:
self._params_filestore.endpoint = ""
self._run_test_with_store("file", self._assert_create_backend_returns_backend)
- def test_create_backend_returns_backend_if_store_type_is_not_specified(
- self,
- ) -> None:
+ def test_create_backend_returns_backend_if_store_type_is_not_specified(self) -> None:
del self._params.config["store_type"]
self._expected_store_type = TCPStore
- if not self._params.get("read_timeout"):
+ if (not self._params.get("read_timeout")):
self._expected_read_timeout = timedelta(seconds=60)
self._assert_create_backend_returns_backend()
- def test_create_backend_returns_backend_if_read_timeout_is_not_specified(
- self,
- ) -> None:
+ def test_create_backend_returns_backend_if_read_timeout_is_not_specified(self) -> None:
del self._params.config["read_timeout"]
self._expected_read_timeout = timedelta(seconds=60)
@@ -209,11 +198,13 @@ class CreateBackendTest(TestCase):
self._assert_create_backend_returns_backend()
def test_create_backend_returns_backend_with_libuv(self) -> None:
+
self._params.config["use_libuv"] = "true"
self._assert_create_backend_returns_backend()
def test_create_backend_returns_backend_without_libuv(self) -> None:
+
self._params.config["use_libuv"] = "false"
self._assert_create_backend_returns_backend()
@@ -246,8 +237,7 @@ class CreateBackendTest(TestCase):
self._params.config["store_type"] = "dummy_store_type"
with self.assertRaisesRegex(
- ValueError,
- r"^Invalid store type given. Currently only supports file and tcp.$",
+ ValueError, r"^Invalid store type given. Currently only supports file and tcp.$"
):
create_backend(self._params)
@@ -262,24 +252,18 @@ class CreateBackendTest(TestCase):
create_backend(self._params)
@mock.patch("tempfile.mkstemp")
- def test_create_backend_raises_error_if_tempfile_creation_fails(
- self, tempfile_mock
- ) -> None:
+ def test_create_backend_raises_error_if_tempfile_creation_fails(self, tempfile_mock) -> None:
tempfile_mock.side_effect = OSError("test error")
# Set the endpoint to empty so it defaults to creating a temp file
self._params_filestore.endpoint = ""
with self.assertRaisesRegex(
RendezvousError,
- r"The file creation for C10d store has failed. See inner exception for details.",
+ r"The file creation for C10d store has failed. See inner exception for details."
):
create_backend(self._params_filestore)
- @mock.patch(
- "torch.distributed.elastic.rendezvous.c10d_rendezvous_backend.FileStore"
- )
- def test_create_backend_raises_error_if_file_path_is_invalid(
- self, filestore_mock
- ) -> None:
+ @mock.patch("torch.distributed.elastic.rendezvous.c10d_rendezvous_backend.FileStore")
+ def test_create_backend_raises_error_if_file_path_is_invalid(self, filestore_mock) -> None:
filestore_mock.side_effect = RuntimeError("test error")
self._params_filestore.endpoint = "bad file path"
with self.assertRaisesRegex(
diff --git a/test/distributed/elastic/rendezvous/dynamic_rendezvous_test.py b/test/distributed/elastic/rendezvous/dynamic_rendezvous_test.py
index 0772ca5135..3713290e48 100644
--- a/test/distributed/elastic/rendezvous/dynamic_rendezvous_test.py
+++ b/test/distributed/elastic/rendezvous/dynamic_rendezvous_test.py
@@ -52,9 +52,7 @@ from torch.distributed.elastic.rendezvous.dynamic_rendezvous import (
class CustomAssertMixin:
assertDictEqual: Callable
- def assert_state_equal(
- self, actual: _RendezvousState, expected: _RendezvousState
- ) -> None:
+ def assert_state_equal(self, actual: _RendezvousState, expected: _RendezvousState) -> None:
self.assertDictEqual(vars(actual), vars(expected))
def assert_state_empty(self, actual: _RendezvousState) -> None:
@@ -89,8 +87,7 @@ class RendezvousTimeoutTest(TestCase):
for join_timeout in join_timeouts:
with self.subTest(join_timeout=join_timeout):
with self.assertRaisesRegex(
- ValueError,
- rf"^The join timeout \({join_timeout}\) must be positive.$",
+ ValueError, rf"^The join timeout \({join_timeout}\) must be positive.$"
):
timeout = RendezvousTimeout(join_timeout)
@@ -146,12 +143,8 @@ class RendezvousStateTest(TestCase):
for num_nodes, max_byte_size in expected_max_sizes:
with self.subTest(num_nodes=num_nodes, max_byte_size=max_byte_size):
for i in range(num_nodes):
- node_running = _NodeDesc(
- f"dummy{i}.dummy1-dummy1-dummy1-dummy1.com", 12345, i
- )
- node_waiting = _NodeDesc(
- f"dummy{i}.dummy2-dummy2-dummy2-dummy2.com", 67890, i
- )
+ node_running = _NodeDesc(f"dummy{i}.dummy1-dummy1-dummy1-dummy1.com", 12345, i)
+ node_waiting = _NodeDesc(f"dummy{i}.dummy2-dummy2-dummy2-dummy2.com", 67890, i)
state.participants[node_running] = i
@@ -276,9 +269,7 @@ class BackendRendezvousStateHolderTest(TestCase, CustomAssertMixin):
return state
def _create_state_holder(self) -> _BackendRendezvousStateHolder:
- return _BackendRendezvousStateHolder(
- self._backend, self._settings, self._cache_duration
- )
+ return _BackendRendezvousStateHolder(self._backend, self._settings, self._cache_duration)
def test_init_initializes_state_holder(self) -> None:
state_holder = self._create_state_holder()
@@ -370,9 +361,7 @@ class BackendRendezvousStateHolderTest(TestCase, CustomAssertMixin):
self._backend.set_state_internal(state)
- with patch(
- "torch.distributed.elastic.rendezvous.dynamic_rendezvous.time"
- ) as mock_time:
+ with patch("torch.distributed.elastic.rendezvous.dynamic_rendezvous.time") as mock_time:
for cache_duration in [1, 5, 10]:
with self.subTest(cache_duration=cache_duration):
self._cache_duration = cache_duration
@@ -408,9 +397,7 @@ class BackendRendezvousStateHolderTest(TestCase, CustomAssertMixin):
self._backend.set_state_internal(state)
- with patch(
- "torch.distributed.elastic.rendezvous.dynamic_rendezvous.time"
- ) as mock_time:
+ with patch("torch.distributed.elastic.rendezvous.dynamic_rendezvous.time") as mock_time:
self._cache_duration = 1
state_holder = self._create_state_holder()
@@ -581,9 +568,7 @@ class DistributedRendezvousOpExecutorTest(TestCase, CustomAssertMixin):
if settings is None:
settings = self._create_settings()
- return _DistributedRendezvousOpExecutor(
- self._node, self._state_holder, settings
- )
+ return _DistributedRendezvousOpExecutor(self._node, self._state_holder, settings)
def _run_action(self, action: _Action) -> None:
op_executor = self._create_op_executor()
@@ -659,18 +644,14 @@ class DistributedRendezvousOpExecutorTest(TestCase, CustomAssertMixin):
node = _NodeDesc(f"dummy{i}", 1, 1)
rank = i
else:
- node = _NodeDesc(
- f"dummy{num_participants - i - 1}", 1, 1
- ) # Add in reverse.
+ node = _NodeDesc(f"dummy{num_participants - i - 1}", 1, 1) # Add in reverse.
rank = 0
state.participants[node] = rank
state.last_heartbeats[node] = self._now
- def test_run_adds_to_participants_and_starts_last_call_if_min_nodes_is_reached(
- self,
- ) -> None:
+ def test_run_adds_to_participants_and_starts_last_call_if_min_nodes_is_reached(self) -> None:
for num_participants in range(3):
self._state = _RendezvousState()
@@ -836,16 +817,12 @@ class DistributedRendezvousOpExecutorTest(TestCase, CustomAssertMixin):
self.assertListEqual(self._mock_state_holder.mock_calls, [call.sync()])
def test_run_delays_execution_if_sync_requested(self) -> None:
- with patch(
- "torch.distributed.elastic.rendezvous.dynamic_rendezvous._delay"
- ) as mock_delay:
+ with patch("torch.distributed.elastic.rendezvous.dynamic_rendezvous._delay") as mock_delay:
self._run_action(_Action.SYNC)
mock_delay.assert_called_once_with(seconds=1)
- self.assertListEqual(
- self._mock_state_holder.mock_calls, [call.sync(), call.sync()]
- )
+ self.assertListEqual(self._mock_state_holder.mock_calls, [call.sync(), call.sync()])
class AbstractTestRendezvousOp(ABC):
@@ -873,9 +850,7 @@ class AbstractTestRendezvousOp(ABC):
mock_datetime = self._datetime_patch.start()
mock_datetime.utcnow.return_value = self._now
- self._time_patch = patch(
- "torch.distributed.elastic.rendezvous.dynamic_rendezvous.time"
- )
+ self._time_patch = patch("torch.distributed.elastic.rendezvous.dynamic_rendezvous.time")
mock_time = self._time_patch.start()
mock_time.monotonic.return_value = self._deadline
@@ -957,18 +932,14 @@ class TestRendezvousJoinOp(AbstractTestRendezvousOp, TestCase):
self._assert_action(expected_action)
- def test_treat_as_redundancy_for_next_rendezvous_if_rendezvous_is_complete(
- self,
- ) -> None:
+ def test_treat_as_redundancy_for_next_rendezvous_if_rendezvous_is_complete(self) -> None:
self._max_nodes = 1
self._state.complete = True
self._assert_action(_Action.ADD_TO_REDUNDANCY_LIST)
- def test_waits_next_round_if_rendezvous_is_complete_and_node_is_redundant(
- self,
- ) -> None:
+ def test_waits_next_round_if_rendezvous_is_complete_and_node_is_redundant(self) -> None:
self._state.redundancy_list.add(self._node)
self._max_nodes = 1
@@ -986,9 +957,7 @@ class TestRendezvousJoinOp(AbstractTestRendezvousOp, TestCase):
self._assert_action(_Action.REMOVE_FROM_REDUNDANCY_LIST)
- def test_waits_next_round_if_rendezvous_is_complete_and_node_is_in_wait_list(
- self,
- ) -> None:
+ def test_waits_next_round_if_rendezvous_is_complete_and_node_is_in_wait_list(self) -> None:
self._state.wait_list.add(self._node)
self._state.complete = True
@@ -1030,18 +999,14 @@ class TestRendezvousJoinOp(AbstractTestRendezvousOp, TestCase):
self._assert_action(_Action.ERROR_TIMEOUT)
- def test_raises_timeout_if_rollback_deadline_exceeded_and_node_is_participant(
- self,
- ) -> None:
+ def test_raises_timeout_if_rollback_deadline_exceeded_and_node_is_participant(self) -> None:
self._deadline = 0
self._state.participants[self._node] = 0
self._assert_action(_Action.ERROR_TIMEOUT)
- def test_raises_timeout_if_rollback_deadline_exceeded_and_node_is_in_wait_list(
- self,
- ) -> None:
+ def test_raises_timeout_if_rollback_deadline_exceeded_and_node_is_in_wait_list(self) -> None:
self._deadline = 0
self._state.wait_list.add(self._node)
@@ -1057,9 +1022,7 @@ class TestRendezvousJoinOp(AbstractTestRendezvousOp, TestCase):
self._assert_action(_Action.REMOVE_FROM_PARTICIPANTS)
- def test_removes_from_wait_list_if_timed_out_but_rollback_deadline_is_not_reached(
- self,
- ) -> None:
+ def test_removes_from_wait_list_if_timed_out_but_rollback_deadline_is_not_reached(self) -> None:
self._deadline = 5
self._state.wait_list.add(self._node)
@@ -1128,9 +1091,7 @@ class TestRendezvousKeepAliveOp(AbstractTestRendezvousOp, TestCase):
def test_finishes_if_no_keep_alive_update_is_needed(self) -> None:
delta = timedelta(seconds=1)
- self._state.last_heartbeats[self._node] = (
- self._now - self._keep_alive_interval + delta
- )
+ self._state.last_heartbeats[self._node] = self._now - self._keep_alive_interval + delta
self._assert_action(_Action.FINISH)
@@ -1215,9 +1176,7 @@ class DynamicRendezvousHandlerTest(TestCase):
_ = store.get("dummy_key")
- self._mock_store_get.assert_called_once_with(
- "torch.rendezvous.dummy_run_id.0/dummy_key"
- )
+ self._mock_store_get.assert_called_once_with("torch.rendezvous.dummy_run_id.0/dummy_key")
def test_next_rendezvous_respects_the_requested_timeout(self) -> None:
self._mock_sync.side_effect = lambda: time.sleep(0.3)
@@ -1530,9 +1489,7 @@ class CreateHandlerTest(TestCase):
self.assertEqual(handler.settings.min_nodes, self._params.min_nodes)
self.assertEqual(handler.settings.max_nodes, self._params.max_nodes)
self.assertEqual(handler.settings.timeout.join, self._expected_timeout.join)
- self.assertEqual(
- handler.settings.timeout.last_call, self._expected_timeout.last_call
- )
+ self.assertEqual(handler.settings.timeout.last_call, self._expected_timeout.last_call)
self.assertEqual(handler.settings.timeout.close, self._expected_timeout.close)
def test_create_handler_returns_handler_if_timeout_is_not_specified(self) -> None:
@@ -1559,7 +1516,6 @@ def _ignore_exception(exception_type: Exception, fn: Callable):
except exception_type as e:
pass
-
def _wait_for(condition, timeout=10, interval=1, name=None):
def _wait_while():
while True:
@@ -1567,21 +1523,18 @@ def _wait_for(condition, timeout=10, interval=1, name=None):
break
else:
time.sleep(interval)
-
wait_thread = threading.Thread(target=_wait_while, name=name)
wait_thread.start()
wait_thread.join(timeout=timeout)
-
class _CapturingThread(threading.Thread):
+
def __init__(self, target=None, name=None, args=None, kwargs=None):
if args is None:
args = ()
if kwargs is None:
kwargs = {}
- threading.Thread.__init__(
- self, target=target, args=args, kwargs=kwargs, name=name
- )
+ threading.Thread.__init__(self, target=target, args=args, kwargs=kwargs, name=name)
self._result = None
def run(self):
@@ -1592,7 +1545,6 @@ class _CapturingThread(threading.Thread):
threading.Thread.join(self, *args)
return self._result
-
class IntegrationTest(TestCase):
def setUp(self) -> None:
self._store = DummyStore()
@@ -1650,8 +1602,7 @@ class IntegrationTest(TestCase):
handler2_thread = _CapturingThread(target=handler2.next_rendezvous)
handler3_thread = _CapturingThread(
target=_ignore_exception,
- args=(RendezvousTimeoutError, lambda: handler3.next_rendezvous()),
- )
+ args=(RendezvousTimeoutError, lambda: handler3.next_rendezvous()))
handler1_thread.start()
handler2_thread.start()
@@ -1691,8 +1642,7 @@ class IntegrationTest(TestCase):
handler3_thread = _CapturingThread(
target=_ignore_exception,
- args=(RendezvousTimeoutError, lambda: handler3.next_rendezvous()),
- )
+ args=(RendezvousTimeoutError, lambda: handler3.next_rendezvous()))
handler1_thread.start()
handler2_thread.start()
@@ -1707,12 +1657,8 @@ class IntegrationTest(TestCase):
handler2._stop_heartbeats()
- _wait_for(
- lambda: len(pickle.loads(self._backend.get_state()[0]).participants) == 1
- )
- _wait_for(
- lambda: len(pickle.loads(self._backend.get_state()[0]).wait_list) == 1
- )
+ _wait_for(lambda: len(pickle.loads(self._backend.get_state()[0]).participants) == 1)
+ _wait_for(lambda: len(pickle.loads(self._backend.get_state()[0]).wait_list) == 1)
class _InMemoryRendezvousBackend(RendezvousBackend):
diff --git a/test/distributed/elastic/rendezvous/etcd_rendezvous_backend_test.py b/test/distributed/elastic/rendezvous/etcd_rendezvous_backend_test.py
index 55343bd080..a972ef01b2 100644
--- a/test/distributed/elastic/rendezvous/etcd_rendezvous_backend_test.py
+++ b/test/distributed/elastic/rendezvous/etcd_rendezvous_backend_test.py
@@ -8,24 +8,21 @@
import subprocess
from base64 import b64encode
-from typing import cast, ClassVar
+from typing import ClassVar, cast
from unittest import TestCase
from etcd import EtcdKeyNotFound # type: ignore[import]
-from rendezvous_backend_test import RendezvousBackendTestMixin
-
-from torch.distributed.elastic.rendezvous import (
- RendezvousConnectionError,
- RendezvousParameters,
-)
+from torch.distributed.elastic.rendezvous import RendezvousConnectionError, RendezvousParameters
from torch.distributed.elastic.rendezvous.etcd_rendezvous_backend import (
- create_backend,
EtcdRendezvousBackend,
+ create_backend,
)
from torch.distributed.elastic.rendezvous.etcd_server import EtcdServer
from torch.distributed.elastic.rendezvous.etcd_store import EtcdStore
+from rendezvous_backend_test import RendezvousBackendTestMixin
+
class EtcdRendezvousBackendTest(TestCase, RendezvousBackendTestMixin):
_server: ClassVar[EtcdServer]
@@ -48,9 +45,7 @@ class EtcdRendezvousBackendTest(TestCase, RendezvousBackendTestMixin):
except EtcdKeyNotFound:
pass
- self._backend = EtcdRendezvousBackend(
- self._client, "dummy_run_id", "/dummy_prefix"
- )
+ self._backend = EtcdRendezvousBackend(self._client, "dummy_run_id", "/dummy_prefix")
def _corrupt_state(self) -> None:
self._client.write("/dummy_prefix/dummy_run_id", "non_base64")
@@ -112,9 +107,7 @@ class CreateBackendTest(TestCase):
self.test_create_backend_returns_backend()
- def test_create_backend_returns_backend_if_read_timeout_is_not_specified(
- self,
- ) -> None:
+ def test_create_backend_returns_backend_if_read_timeout_is_not_specified(self) -> None:
del self._params.config["read_timeout"]
self._expected_read_timeout = 60
@@ -133,9 +126,7 @@ class CreateBackendTest(TestCase):
def test_create_backend_raises_error_if_protocol_is_invalid(self) -> None:
self._params.config["protocol"] = "dummy"
- with self.assertRaisesRegex(
- ValueError, r"^The protocol must be HTTP or HTTPS.$"
- ):
+ with self.assertRaisesRegex(ValueError, r"^The protocol must be HTTP or HTTPS.$"):
create_backend(self._params)
def test_create_backend_raises_error_if_read_timeout_is_invalid(self) -> None:
diff --git a/test/distributed/elastic/rendezvous/etcd_server_test.py b/test/distributed/elastic/rendezvous/etcd_server_test.py
index 88726dddae..08fe2e14a6 100644
--- a/test/distributed/elastic/rendezvous/etcd_server_test.py
+++ b/test/distributed/elastic/rendezvous/etcd_server_test.py
@@ -6,8 +6,8 @@
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import os
-import sys
import unittest
+import sys
import etcd
from torch.distributed.elastic.rendezvous.etcd_rendezvous import (
@@ -20,7 +20,6 @@ if os.getenv("CIRCLECI"):
print("T85992919 temporarily disabling in circle ci", file=sys.stderr)
sys.exit(0)
-
class EtcdServerTest(unittest.TestCase):
def test_etcd_server_start_stop(self):
server = EtcdServer()
diff --git a/test/distributed/elastic/rendezvous/rendezvous_backend_test.py b/test/distributed/elastic/rendezvous/rendezvous_backend_test.py
index fa2c6ae9c2..b64254bf56 100644
--- a/test/distributed/elastic/rendezvous/rendezvous_backend_test.py
+++ b/test/distributed/elastic/rendezvous/rendezvous_backend_test.py
@@ -7,13 +7,10 @@
# LICENSE file in the root directory of this source tree.
from abc import ABC, abstractmethod
-from typing import Any, Callable, cast, Optional, Tuple
+from typing import Any, Callable, Optional, Tuple, cast
from torch.distributed.elastic.rendezvous import RendezvousStateError
-from torch.distributed.elastic.rendezvous.dynamic_rendezvous import (
- RendezvousBackend,
- Token,
-)
+from torch.distributed.elastic.rendezvous.dynamic_rendezvous import RendezvousBackend, Token
class RendezvousBackendTestMixin(ABC):
@@ -31,9 +28,7 @@ class RendezvousBackendTestMixin(ABC):
"""Corrupts the state stored in the backend."""
pass
- def _set_state(
- self, state: bytes, token: Optional[Any] = None
- ) -> Tuple[bytes, Token, bool]:
+ def _set_state(self, state: bytes, token: Optional[Any] = None) -> Tuple[bytes, Token, bool]:
result = self._backend.set_state(state, token)
self.assertIsNotNone(result)
diff --git a/test/distributed/elastic/rendezvous/utils_test.py b/test/distributed/elastic/rendezvous/utils_test.py
index b876f458ab..c180924ba5 100644
--- a/test/distributed/elastic/rendezvous/utils_test.py
+++ b/test/distributed/elastic/rendezvous/utils_test.py
@@ -6,19 +6,19 @@
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
-import socket
import threading
import time
+import socket
from datetime import timedelta
from typing import List
from unittest import TestCase
from unittest.mock import patch
from torch.distributed.elastic.rendezvous.utils import (
+ _PeriodicTimer,
_delay,
_matches_machine_hostname,
_parse_rendezvous_config,
- _PeriodicTimer,
_try_parse_port,
parse_rendezvous_endpoint,
)
@@ -229,9 +229,7 @@ class UtilsTest(TestCase):
def test_matches_machine_hostname_returns_true_if_hostname_is_machine_address(
self,
) -> None:
- addr_list = socket.getaddrinfo(
- socket.gethostname(), None, proto=socket.IPPROTO_TCP
- )
+ addr_list = socket.getaddrinfo(socket.gethostname(), None, proto=socket.IPPROTO_TCP)
for addr in (addr_info[4][0] for addr_info in addr_list):
with self.subTest(addr=addr):
@@ -257,26 +255,20 @@ class UtilsTest(TestCase):
self.assertGreaterEqual(time2 - time1, 0.2)
- @patch(
- "socket.getaddrinfo",
- side_effect=[
- [(None, None, 0, "a_host", ("1.2.3.4", 0))],
- [(None, None, 0, "a_different_host", ("1.2.3.4", 0))],
- ],
- )
+
+ @patch('socket.getaddrinfo', side_effect=[
+ [(None, None, 0, 'a_host', ('1.2.3.4', 0))],
+ [(None, None, 0, 'a_different_host', ('1.2.3.4', 0))]])
def test_matches_machine_hostname_returns_true_if_ip_address_match_between_hosts(
self,
_0,
) -> None:
self.assertTrue(_matches_machine_hostname("a_host"))
- @patch(
- "socket.getaddrinfo",
- side_effect=[
- [(None, None, 0, "a_host", ("1.2.3.4", 0))],
- [(None, None, 0, "another_host_with_different_ip", ("1.2.3.5", 0))],
- ],
- )
+
+ @patch('socket.getaddrinfo', side_effect=[
+ [(None, None, 0, 'a_host', ('1.2.3.4', 0))],
+ [(None, None, 0, 'another_host_with_different_ip', ('1.2.3.5', 0))]])
def test_matches_machine_hostname_returns_false_if_ip_address_not_match_between_hosts(
self,
_0,
diff --git a/test/distributed/elastic/timer/file_based_local_timer_test.py b/test/distributed/elastic/timer/file_based_local_timer_test.py
index 6c7a92c35a..198c57f34b 100644
--- a/test/distributed/elastic/timer/file_based_local_timer_test.py
+++ b/test/distributed/elastic/timer/file_based_local_timer_test.py
@@ -111,18 +111,14 @@ if not (IS_WINDOWS or IS_MACOS):
num_requests_per_client = 10
processes = []
for i in range(num_clients):
- p = mp.Process(
- target=func, args=(num_requests_per_client, self.file_path)
- )
+ p = mp.Process(target=func, args=(num_requests_per_client, self.file_path))
processes.append(p)
p.start()
for p in processes:
p.join()
self.server.run_once() # Allows the server to process all requests
- self.assertEqual(
- 2 * num_clients * num_requests_per_client, self.server._request_count
- )
+ self.assertEqual(2 * num_clients * num_requests_per_client, self.server._request_count)
@mock.patch("torch.distributed.elastic.timer.FileTimerServer._reap_worker")
def test_exit_before_release(self, mock_reap):
@@ -143,9 +139,7 @@ if not (IS_WINDOWS or IS_MACOS):
self.assertEqual(0, len(self.server._timers))
@mock.patch("torch.distributed.elastic.timer.FileTimerServer._reap_worker")
- @mock.patch(
- "torch.distributed.elastic.timer.FileTimerServer.is_process_running"
- )
+ @mock.patch("torch.distributed.elastic.timer.FileTimerServer.is_process_running")
def test_exit_before_release_reap(self, mock_pid_exists, mock_reap):
def func1(file_path):
client = timer.FileTimerClient(file_path)
@@ -191,6 +185,7 @@ if not (IS_WINDOWS or IS_MACOS):
client.acquire("test_scope", 0)
time.sleep(interval)
+
class FileTimerClientTest(TestCase):
def test_send_request_without_server(self):
client = timer.FileTimerClient("test_file")
@@ -199,6 +194,7 @@ if not (IS_WINDOWS or IS_MACOS):
with timer.expires(after=0.1):
time.sleep(0.1)
+
class FileTimerServerTest(TestCase):
def setUp(self):
super().setUp()
@@ -238,26 +234,14 @@ if not (IS_WINDOWS or IS_MACOS):
def _expired_timer(self, pid, scope):
expired = time.time() - 60
- return timer.FileTimerRequest(
- worker_pid=pid,
- scope_id=scope,
- expiration_time=expired,
- signal=signal.SIGKILL,
- )
+ return timer.FileTimerRequest(worker_pid=pid, scope_id=scope, expiration_time=expired, signal=signal.SIGKILL)
def _valid_timer(self, pid, scope):
valid = time.time() + 60
- return timer.FileTimerRequest(
- worker_pid=pid,
- scope_id=scope,
- expiration_time=valid,
- signal=signal.SIGKILL,
- )
+ return timer.FileTimerRequest(worker_pid=pid, scope_id=scope, expiration_time=valid, signal=signal.SIGKILL)
def _release_timer(self, pid, scope):
- return timer.FileTimerRequest(
- worker_pid=pid, scope_id=scope, expiration_time=-1
- )
+ return timer.FileTimerRequest(worker_pid=pid, scope_id=scope, expiration_time=-1)
@mock.patch("os.kill")
def test_expired_timers(self, mock_os_kill):
@@ -294,9 +278,7 @@ if not (IS_WINDOWS or IS_MACOS):
self.assertEqual(0, len(self.server._timers))
mock_os_kill.assert_not_called()
- @mock.patch(
- "torch.distributed.elastic.timer.FileTimerServer.is_process_running"
- )
+ @mock.patch("torch.distributed.elastic.timer.FileTimerServer.is_process_running")
@mock.patch("os.kill")
def test_valid_timers(self, mock_os_kill, mock_pid_exists):
"""
diff --git a/test/distributed/elastic/timer/local_timer_example.py b/test/distributed/elastic/timer/local_timer_example.py
index 48907bca7b..71204d8350 100644
--- a/test/distributed/elastic/timer/local_timer_example.py
+++ b/test/distributed/elastic/timer/local_timer_example.py
@@ -14,12 +14,12 @@ import time
import torch.distributed.elastic.timer as timer
import torch.multiprocessing as torch_mp
from torch.testing._internal.common_utils import (
- IS_MACOS,
- IS_WINDOWS,
+ TEST_WITH_DEV_DBG_ASAN,
run_tests,
+ IS_WINDOWS,
+ IS_MACOS,
skip_but_pass_in_sandcastle_if,
- TEST_WITH_DEV_DBG_ASAN,
- TestCase,
+ TestCase
)
@@ -42,7 +42,6 @@ def _stuck_function(rank, mp_queue):
# timer is not supported on macos or windows
if not (IS_WINDOWS or IS_MACOS):
-
class LocalTimerExample(TestCase):
"""
Demonstrates how to use LocalTimerServer and LocalTimerClient
@@ -56,9 +55,7 @@ if not (IS_WINDOWS or IS_MACOS):
unittest. As of now this will SIGSEGV.
"""
- @skip_but_pass_in_sandcastle_if(
- TEST_WITH_DEV_DBG_ASAN, "test is asan incompatible"
- )
+ @skip_but_pass_in_sandcastle_if(TEST_WITH_DEV_DBG_ASAN, "test is asan incompatible")
def test_torch_mp_example(self):
# in practice set the max_interval to a larger value (e.g. 60 seconds)
mp_queue = mp.get_context("spawn").Queue()
@@ -83,9 +80,7 @@ if not (IS_WINDOWS or IS_MACOS):
server.stop()
- @skip_but_pass_in_sandcastle_if(
- TEST_WITH_DEV_DBG_ASAN, "test is asan incompatible"
- )
+ @skip_but_pass_in_sandcastle_if(TEST_WITH_DEV_DBG_ASAN, "test is asan incompatible")
def test_example_start_method_spawn(self):
self._run_example_with(start_method="spawn")
diff --git a/test/distributed/elastic/timer/local_timer_test.py b/test/distributed/elastic/timer/local_timer_test.py
index 6111f326d2..386b6e9be9 100644
--- a/test/distributed/elastic/timer/local_timer_test.py
+++ b/test/distributed/elastic/timer/local_timer_test.py
@@ -15,12 +15,12 @@ import torch.distributed.elastic.timer as timer
from torch.distributed.elastic.timer.api import TimerRequest
from torch.distributed.elastic.timer.local_timer import MultiprocessingRequestQueue
from torch.testing._internal.common_utils import (
- IS_MACOS,
- IS_WINDOWS,
run_tests,
+ IS_WINDOWS,
+ IS_MACOS,
TEST_WITH_DEV_DBG_ASAN,
TEST_WITH_TSAN,
- TestCase,
+ TestCase
)
diff --git a/test/distributed/elastic/utils/distributed_test.py b/test/distributed/elastic/utils/distributed_test.py
index 65ebd4b6e7..4e5fa8d7e0 100644
--- a/test/distributed/elastic/utils/distributed_test.py
+++ b/test/distributed/elastic/utils/distributed_test.py
@@ -24,19 +24,12 @@ from torch.testing._internal.common_utils import (
IS_WINDOWS,
run_tests,
TEST_WITH_TSAN,
- TestCase,
+ TestCase
)
def _create_c10d_store_mp(is_server, server_addr, port, world_size, wait_for_workers):
- store = create_c10d_store(
- is_server,
- server_addr,
- port,
- world_size,
- wait_for_workers=wait_for_workers,
- timeout=2,
- )
+ store = create_c10d_store(is_server, server_addr, port, world_size, wait_for_workers=wait_for_workers, timeout=2)
if store is None:
raise AssertionError()
diff --git a/test/distributed/launcher/api_test.py b/test/distributed/launcher/api_test.py
index 81e9320d1f..6a4b46272e 100644
--- a/test/distributed/launcher/api_test.py
+++ b/test/distributed/launcher/api_test.py
@@ -23,7 +23,9 @@ from unittest.mock import MagicMock, Mock, patch
import torch
import torch.distributed as dist
from torch.distributed.elastic.agent.server.api import RunResult, WorkerState
-from torch.distributed.elastic.multiprocessing.api import SignalException
+from torch.distributed.elastic.multiprocessing.api import (
+ SignalException,
+)
from torch.distributed.elastic.multiprocessing.errors import ChildFailedError
from torch.distributed.elastic.rendezvous.etcd_server import EtcdServer
from torch.distributed.elastic.utils import get_socket_with_port
@@ -155,9 +157,7 @@ class ElasticLaunchTest(unittest.TestCase):
{str(i) for i in range(world_size)}, set(os.listdir(self.test_dir))
)
- @skip_but_pass_in_sandcastle_if(
- TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
- )
+ @skip_but_pass_in_sandcastle_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_launch_script_python(self):
nnodes = 1
nproc_per_node = 4
@@ -172,9 +172,7 @@ class ElasticLaunchTest(unittest.TestCase):
world_size = nnodes * nproc_per_node
self.check_works_ran(world_size)
- @skip_but_pass_in_sandcastle_if(
- TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
- )
+ @skip_but_pass_in_sandcastle_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_launch_script_python_local_rank_transfer(self):
nnodes = 1
nproc_per_node = 4
@@ -189,9 +187,7 @@ class ElasticLaunchTest(unittest.TestCase):
world_size = nnodes * nproc_per_node
self.check_works_ran(world_size)
- @skip_but_pass_in_sandcastle_if(
- TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
- )
+ @skip_but_pass_in_sandcastle_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_launch_script_bash(self):
nnodes = 1
nproc_per_node = 4
@@ -204,9 +200,7 @@ class ElasticLaunchTest(unittest.TestCase):
world_size = nnodes * nproc_per_node
self.check_works_ran(world_size)
- @skip_but_pass_in_sandcastle_if(
- TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
- )
+ @skip_but_pass_in_sandcastle_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_launch_function(self):
nnodes = 1
nproc_per_node = 4
@@ -220,9 +214,7 @@ class ElasticLaunchTest(unittest.TestCase):
actual_res = sorted(value for value in res.values())
self.assertEqual(expected_res, actual_res)
- @skip_but_pass_in_sandcastle_if(
- TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
- )
+ @skip_but_pass_in_sandcastle_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_launch_dist_sum_with_static_rdzv(self):
nnodes = 1
nproc_per_node = 4
@@ -251,9 +243,7 @@ class ElasticLaunchTest(unittest.TestCase):
actual_res = sorted(value for value in res.values())
self.assertEqual(expected_res, actual_res)
- @skip_but_pass_in_sandcastle_if(
- TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
- )
+ @skip_but_pass_in_sandcastle_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_launch_elastic(self):
nproc_per_node = 4
@@ -298,9 +288,7 @@ class ElasticLaunchTest(unittest.TestCase):
)("-u", path("bin/test_script.py"), f"--touch-file-dir={self.test_dir}")
record_mock.assert_called_once()
- @skip_but_pass_in_sandcastle_if(
- TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
- )
+ @skip_but_pass_in_sandcastle_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_launch_elastic_multiple_agents(self):
min_nodes = 1
max_nodes = 2
diff --git a/test/distributed/launcher/launch_test.py b/test/distributed/launcher/launch_test.py
index b8312de37f..4140e55c6c 100644
--- a/test/distributed/launcher/launch_test.py
+++ b/test/distributed/launcher/launch_test.py
@@ -15,8 +15,8 @@ from contextlib import closing
import torch.distributed.launch as launch
from torch.distributed.elastic.utils import get_socket_with_port
from torch.testing._internal.common_utils import (
- skip_but_pass_in_sandcastle_if,
TEST_WITH_DEV_DBG_ASAN,
+ skip_but_pass_in_sandcastle_if,
)
diff --git a/test/distributed/launcher/run_test.py b/test/distributed/launcher/run_test.py
index c816042e3e..f33d075d8a 100644
--- a/test/distributed/launcher/run_test.py
+++ b/test/distributed/launcher/run_test.py
@@ -145,9 +145,7 @@ class ElasticLaunchTest(unittest.TestCase):
{str(i) for i in range(world_size)}, set(os.listdir(self.test_dir))
)
- @skip_but_pass_in_sandcastle_if(
- TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
- )
+ @skip_but_pass_in_sandcastle_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_launch_user_script_bash(self):
run_id = str(uuid.uuid4().int)
nnodes = 1
@@ -178,9 +176,7 @@ class ElasticLaunchTest(unittest.TestCase):
{str(i) for i in range(world_size)}, set(os.listdir(self.test_dir))
)
- @skip_but_pass_in_sandcastle_if(
- TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
- )
+ @skip_but_pass_in_sandcastle_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_launch_user_script_default_nproc(self):
run_id = str(uuid.uuid4().int)
nnodes = 1
@@ -209,9 +205,7 @@ class ElasticLaunchTest(unittest.TestCase):
{str(i) for i in range(world_size)}, set(os.listdir(self.test_dir))
)
- @skip_but_pass_in_sandcastle_if(
- TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
- )
+ @skip_but_pass_in_sandcastle_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_launch_with_env_vars(self):
run_id = str(uuid.uuid4().int)
nnodes = 1
@@ -269,37 +263,27 @@ class ElasticLaunchTest(unittest.TestCase):
{str(i) for i in range(world_size)}, set(os.listdir(self.test_dir))
)
- @skip_but_pass_in_sandcastle_if(
- TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
- )
+ @skip_but_pass_in_sandcastle_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_nproc_launch_auto_configurations(self):
self._test_nproc_launch_configuration("auto", os.cpu_count())
- @skip_but_pass_in_sandcastle_if(
- TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
- )
+ @skip_but_pass_in_sandcastle_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_nproc_launch_number_configurations(self):
self._test_nproc_launch_configuration("4", 4)
- @skip_but_pass_in_sandcastle_if(
- TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
- )
+ @skip_but_pass_in_sandcastle_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_nproc_launch_unknown_configurations(self):
with self.assertRaises(ValueError):
self._test_nproc_launch_configuration("unknown", 4)
- @skip_but_pass_in_sandcastle_if(
- TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
- )
+ @skip_but_pass_in_sandcastle_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
@patch("torch.cuda.is_available", return_value=True)
@patch("torch.cuda.device_count", return_value=3)
def test_nproc_gpu_launch_configurations(self, _mock1, _mock2):
self._test_nproc_launch_configuration("auto", 3)
self._test_nproc_launch_configuration("gpu", 3)
- @skip_but_pass_in_sandcastle_if(
- TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
- )
+ @skip_but_pass_in_sandcastle_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_launch_elastic(self):
run_id = str(uuid.uuid4().int)
min_nodes = 1
@@ -327,9 +311,7 @@ class ElasticLaunchTest(unittest.TestCase):
)
@mock.patch("torch.distributed.elastic.events.record")
- @skip_but_pass_in_sandcastle_if(
- TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
- )
+ @skip_but_pass_in_sandcastle_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_launch_elastic_worker_raise_exception(self, record_mock):
"""
Asserts that when the worker program fails and lancher raieses exception
@@ -357,9 +339,7 @@ class ElasticLaunchTest(unittest.TestCase):
record_mock.assert_called_once()
- @skip_but_pass_in_sandcastle_if(
- TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
- )
+ @skip_but_pass_in_sandcastle_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
@mock.patch(
"torch.distributed.elastic.agent.server.local_elastic_agent.LocalElasticAgent.run"
)
@@ -391,9 +371,7 @@ class ElasticLaunchTest(unittest.TestCase):
launch.main(args)
record_mock.assert_called_once()
- @skip_but_pass_in_sandcastle_if(
- TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
- )
+ @skip_but_pass_in_sandcastle_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_launch_standalone(self):
nnodes = 1
nproc_per_node = 4
@@ -415,9 +393,7 @@ class ElasticLaunchTest(unittest.TestCase):
{str(i) for i in range(world_size)}, set(os.listdir(self.test_dir))
)
- @skip_but_pass_in_sandcastle_if(
- TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
- )
+ @skip_but_pass_in_sandcastle_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_launch_run_path(self):
nnodes = 1
nproc_per_node = 4
@@ -439,9 +415,7 @@ class ElasticLaunchTest(unittest.TestCase):
{str(i) for i in range(world_size)}, set(os.listdir(self.test_dir))
)
- @skip_but_pass_in_sandcastle_if(
- TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
- )
+ @skip_but_pass_in_sandcastle_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_launch_elastic_multiple_agents(self):
run_id = str(uuid.uuid4().int)
min_nodes = 1
@@ -510,9 +484,7 @@ class ElasticLaunchTest(unittest.TestCase):
launch.main(args)
rdzv_handler_mock.shutdown.assert_called_once()
- @skip_but_pass_in_sandcastle_if(
- TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
- )
+ @skip_but_pass_in_sandcastle_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_is_torchelastic_launched(self):
# launch test script with torchelastic and validate that
# torch.distributed.is_torchelastic_launched() returns True
@@ -534,9 +506,7 @@ class ElasticLaunchTest(unittest.TestCase):
self.assertEqual("True", is_torchelastic_launched)
@patch("torch.distributed.run.metadata")
- @skip_but_pass_in_sandcastle_if(
- TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
- )
+ @skip_but_pass_in_sandcastle_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_is_torchelastic_launched_with_logs_spec_defined(self, metadata_mock):
# mock the entrypoint API to avoid version issues.
entrypoints = MagicMock()
@@ -548,7 +518,7 @@ class ElasticLaunchTest(unittest.TestCase):
ep = MagicMock()
ep.load.return_value = DefaultLogsSpecs
- group.select.return_value = ep
+ group.select.return_value = (ep)
group.__getitem__.return_value = ep
out_file = f"{os.path.join(self.test_dir, 'out')}"
@@ -570,9 +540,7 @@ class ElasticLaunchTest(unittest.TestCase):
is_torchelastic_launched = fp.readline()
self.assertEqual("True", is_torchelastic_launched)
- @skip_but_pass_in_sandcastle_if(
- TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
- )
+ @skip_but_pass_in_sandcastle_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_logs_logs_spec_entrypoint_must_be_defined(self):
with self.assertRaises(ValueError):
launch.main(
@@ -623,9 +591,7 @@ class ElasticLaunchTest(unittest.TestCase):
runpy.run_path(sys.argv[0], run_name="__main__")
# nothing to validate, just make sure it runs
- @skip_but_pass_in_sandcastle_if(
- TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
- )
+ @skip_but_pass_in_sandcastle_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_init_method_tcp_with_torchelastic(self):
port = get_free_port()
launch.main(
@@ -663,9 +629,7 @@ class ElasticLaunchTest(unittest.TestCase):
runpy.run_path(sys.argv[0], run_name="__main__")
# nothing to validate, just make sure it runs
- @skip_but_pass_in_sandcastle_if(
- TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
- )
+ @skip_but_pass_in_sandcastle_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_init_method_env_with_torchelastic(self):
port = get_free_port()
launch.main(
diff --git a/test/distributed/nn/jit/test_instantiator.py b/test/distributed/nn/jit/test_instantiator.py
index 03d3a6f050..8a7026f964 100644
--- a/test/distributed/nn/jit/test_instantiator.py
+++ b/test/distributed/nn/jit/test_instantiator.py
@@ -6,15 +6,15 @@ import sys
from typing import Tuple
import torch
+from torch import Tensor, nn
import torch.distributed as dist
-from torch import nn, Tensor
if not dist.is_available():
print("Distributed not available, skipping tests", file=sys.stderr)
sys.exit(0)
from torch.distributed.nn.jit import instantiator
-from torch.testing._internal.common_utils import run_tests, TestCase
+from torch.testing._internal.common_utils import TestCase, run_tests
@torch.jit.interface
diff --git a/test/distributed/optim/test_zero_redundancy_optimizer.py b/test/distributed/optim/test_zero_redundancy_optimizer.py
index 485df8f5b5..b84d96cb0f 100644
--- a/test/distributed/optim/test_zero_redundancy_optimizer.py
+++ b/test/distributed/optim/test_zero_redundancy_optimizer.py
@@ -47,7 +47,6 @@ try:
except ImportError:
HAS_TORCHVISION = False
-
# Use GLOO on GPU when running CUDA + Windows
def _get_backend_for_tests():
return (
@@ -726,8 +725,7 @@ class TestZeroRedundancyOptimizerDistributed(TestZeroRedundancyOptimizer):
common_distributed.logger.info(
"Skipping `test_nondefault_process_group()` since world size "
"of %s is less than %s",
- self.world_size,
- MIN_WORLD_SIZE,
+ self.world_size, MIN_WORLD_SIZE
)
return
BACKEND = dist.Backend.GLOO
@@ -1277,7 +1275,7 @@ class TestZeroRedundancyOptimizerDistributed(TestZeroRedundancyOptimizer):
[torch.randn(1, 3, 3, 1000).to(device) for _ in range(NUM_INPUTS)],
)
)
- for model, inputs in models_to_test:
+ for (model, inputs) in models_to_test:
# Enable determinism in cudnn operators
with torch.backends.cudnn.flags(
enabled=True, deterministic=True, benchmark=False
diff --git a/test/distributed/pipeline/sync/conftest.py b/test/distributed/pipeline/sync/conftest.py
index 4f2479b27b..78f7d3a8f1 100644
--- a/test/distributed/pipeline/sync/conftest.py
+++ b/test/distributed/pipeline/sync/conftest.py
@@ -5,13 +5,11 @@
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import tempfile
-
import pytest
import torch
import torch.distributed as dist
-
@pytest.fixture(autouse=True)
def manual_seed_zero():
torch.manual_seed(0)
@@ -40,7 +38,6 @@ def cuda_sleep():
def pytest_report_header():
return f"torch: {torch.__version__}"
-
@pytest.fixture
def setup_rpc(scope="session"):
file = tempfile.NamedTemporaryFile()
@@ -50,12 +47,11 @@ def setup_rpc(scope="session"):
world_size=1,
rpc_backend_options=dist.rpc.TensorPipeRpcBackendOptions(
init_method=f"file://{file.name}",
- ),
+ )
)
yield
dist.rpc.shutdown()
-
def pytest_ignore_collect(path, config):
"Skip this directory if distributed modules are not enabled."
return not dist.is_available()
diff --git a/test/distributed/pipeline/sync/skip/test_gpipe.py b/test/distributed/pipeline/sync/skip/test_gpipe.py
index e002d65db7..21731d452d 100644
--- a/test/distributed/pipeline/sync/skip/test_gpipe.py
+++ b/test/distributed/pipeline/sync/skip/test_gpipe.py
@@ -12,19 +12,13 @@ from torch import nn
from torch.distributed.pipeline.sync import Pipe
from torch.distributed.pipeline.sync.skip import pop, skippable, stash
-from torch.distributed.pipeline.sync.skip.portal import (
- PortalBlue,
- PortalCopy,
- PortalOrange,
-)
+from torch.distributed.pipeline.sync.skip.portal import PortalBlue, PortalCopy, PortalOrange
from torch.distributed.pipeline.sync.utils import partition_model
from torch.testing._internal.common_utils import run_tests
@pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required")
-@pytest.mark.parametrize(
- "balance", [[3], [1, 2], [2, 1], [1, 1, 1]], ids=["3", "1:2", "2:1", "1:1:1"]
-)
+@pytest.mark.parametrize("balance", [[3], [1, 2], [2, 1], [1, 1, 1]], ids=["3", "1:2", "2:1", "1:1:1"])
@pytest.mark.parametrize("checkpoint", ["never", "always", "except_last"])
def test_1to3(balance, checkpoint, setup_rpc):
if torch.cuda.device_count() < len(balance):
@@ -73,12 +67,8 @@ def test_1to3(balance, checkpoint, setup_rpc):
loss = output.local_value().mean()
loss.backward()
- assert torch.allclose(
- output.local_value().norm(), torch.tensor(1039.0, device=out_device), atol=6e-1
- )
- assert torch.allclose(
- input.grad.norm(), torch.tensor(0.0004533053, device=in_device)
- )
+ assert torch.allclose(output.local_value().norm(), torch.tensor(1039.0, device=out_device), atol=6e-1)
+ assert torch.allclose(input.grad.norm(), torch.tensor(0.0004533053, device=in_device))
def test_none_skip(setup_rpc):
diff --git a/test/distributed/pipeline/sync/skip/test_leak.py b/test/distributed/pipeline/sync/skip/test_leak.py
index 2bf797dae5..f0e82f7bba 100644
--- a/test/distributed/pipeline/sync/skip/test_leak.py
+++ b/test/distributed/pipeline/sync/skip/test_leak.py
@@ -10,7 +10,7 @@ import pytest
import torch
from torch import nn
-from torch.distributed.pipeline.sync import is_checkpointing, is_recomputing, Pipe
+from torch.distributed.pipeline.sync import Pipe, is_checkpointing, is_recomputing
from torch.distributed.pipeline.sync.skip import pop, skippable, stash
from torch.distributed.pipeline.sync.skip.tracker import current_skip_tracker
from torch.testing._internal.common_utils import run_tests
@@ -113,9 +113,7 @@ def test_no_portal_without_pipe(train, monkeypatch, setup_rpc):
def deny(*args, **kwargs):
raise AssertionError("tried to create Portal without Pipe")
- monkeypatch.setattr(
- "torch.distributed.pipeline.sync.skip.portal.Portal.__init__", deny
- )
+ monkeypatch.setattr("torch.distributed.pipeline.sync.skip.portal.Portal.__init__", deny)
model = nn.Sequential(Stash(), Pop())
diff --git a/test/distributed/pipeline/sync/skip/test_tracker.py b/test/distributed/pipeline/sync/skip/test_tracker.py
index 007a5a963a..5810cab976 100644
--- a/test/distributed/pipeline/sync/skip/test_tracker.py
+++ b/test/distributed/pipeline/sync/skip/test_tracker.py
@@ -6,25 +6,18 @@
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
-import threading
from queue import Queue
+import threading
import pytest
import torch
from torch import nn
-from torch.distributed.pipeline.sync.checkpoint import (
- enable_checkpointing,
- enable_recomputing,
-)
+from torch.distributed.pipeline.sync.checkpoint import enable_checkpointing, enable_recomputing
from torch.distributed.pipeline.sync.microbatch import Batch
from torch.distributed.pipeline.sync.skip import pop, skippable, stash
from torch.distributed.pipeline.sync.skip.layout import SkipLayout
-from torch.distributed.pipeline.sync.skip.tracker import (
- current_skip_tracker,
- SkipTracker,
- SkipTrackerThroughPotals,
-)
+from torch.distributed.pipeline.sync.skip.tracker import SkipTracker, SkipTrackerThroughPotals, current_skip_tracker
from torch.testing._internal.common_utils import run_tests
@@ -83,10 +76,7 @@ def test_reuse_portal():
def test_no_copy_no_portal():
- skip_layout = SkipLayout(
- num_partitions=2,
- skip_routes={(None, "copy"): (0, 1), (None, "not_copy"): (0, 0)},
- )
+ skip_layout = SkipLayout(num_partitions=2, skip_routes={(None, "copy"): (0, 1), (None, "not_copy"): (0, 0)})
skip_tracker = SkipTrackerThroughPotals(skip_layout)
batch = Batch(torch.tensor([1.0]))
diff --git a/test/distributed/pipeline/sync/skip/test_verify_skippables.py b/test/distributed/pipeline/sync/skip/test_verify_skippables.py
index 265c3fee85..6de439ec88 100644
--- a/test/distributed/pipeline/sync/skip/test_verify_skippables.py
+++ b/test/distributed/pipeline/sync/skip/test_verify_skippables.py
@@ -151,12 +151,7 @@ def test_double_stash_pop_but_isolated():
ns2 = Namespace()
verify_skippables(
- nn.Sequential(
- Layer1().isolate(ns1),
- Layer2().isolate(ns1),
- Layer3().isolate(ns2),
- Layer4().isolate(ns2),
- )
+ nn.Sequential(Layer1().isolate(ns1), Layer2().isolate(ns1), Layer3().isolate(ns2), Layer4().isolate(ns2),)
)
diff --git a/test/distributed/pipeline/sync/test_balance.py b/test/distributed/pipeline/sync/test_balance.py
index 82af7545bb..b8a81aabb7 100644
--- a/test/distributed/pipeline/sync/test_balance.py
+++ b/test/distributed/pipeline/sync/test_balance.py
@@ -12,17 +12,11 @@ import pytest
import torch
from torch import nn
-from torch.distributed.pipeline.sync._balance import (
- balance_by_size,
- balance_by_time,
- blockpartition,
-)
+from torch.distributed.pipeline.sync._balance import balance_by_size, balance_by_time, blockpartition
from torch.distributed.pipeline.sync._balance.profile import layerwise_sandbox
from torch.testing._internal.common_utils import run_tests
-skip_if_no_cuda = pytest.mark.skipif(
- not torch.cuda.is_available(), reason="cuda required"
-)
+skip_if_no_cuda = pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required")
devices = ["cpu"]
if torch.cuda.is_available():
@@ -30,10 +24,7 @@ if torch.cuda.is_available():
def test_blockpartition():
- assert blockpartition.solve([1, 2, 3, 4, 5, 6], partitions=2) == [
- [1, 2, 3, 4],
- [5, 6],
- ]
+ assert blockpartition.solve([1, 2, 3, 4, 5, 6], partitions=2) == [[1, 2, 3, 4], [5, 6]]
def test_blockpartition_zeros():
diff --git a/test/distributed/pipeline/sync/test_bugs.py b/test/distributed/pipeline/sync/test_bugs.py
index c3dc716a64..f9860cb0f2 100644
--- a/test/distributed/pipeline/sync/test_bugs.py
+++ b/test/distributed/pipeline/sync/test_bugs.py
@@ -8,12 +8,12 @@
# LICENSE file in the root directory of this source tree.
import pytest
import torch
-import torch.nn.functional as F
from torch import nn
+import torch.nn.functional as F
from torch.distributed.pipeline.sync import Pipe
-from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_utils import run_tests
+from torch.testing._internal.common_cuda import TEST_MULTIGPU
def test_python_autograd_function(setup_rpc):
diff --git a/test/distributed/pipeline/sync/test_checkpoint.py b/test/distributed/pipeline/sync/test_checkpoint.py
index e1ae6f6754..f3d57c218c 100644
--- a/test/distributed/pipeline/sync/test_checkpoint.py
+++ b/test/distributed/pipeline/sync/test_checkpoint.py
@@ -10,15 +10,10 @@ from functools import partial
import pytest
import torch
-import torch.cuda
from torch import nn
+import torch.cuda
-from torch.distributed.pipeline.sync.checkpoint import (
- checkpoint,
- Checkpointing,
- is_checkpointing,
- is_recomputing,
-)
+from torch.distributed.pipeline.sync.checkpoint import Checkpointing, checkpoint, is_checkpointing, is_recomputing
from torch.distributed.pipeline.sync.dependency import fork, join
from torch.distributed.pipeline.sync.microbatch import Batch
from torch.testing._internal.common_utils import run_tests
@@ -68,14 +63,7 @@ def test_serial_checkpoints(device):
# +--> {b} --Checkpoint(Log)--> {b} --First--> {b}
out.backward()
- assert timeline == [
- "a:forward",
- "b:forward",
- "b:forward",
- "b:backward",
- "a:forward",
- "a:backward",
- ]
+ assert timeline == ["a:forward", "b:forward", "b:forward", "b:backward", "a:forward", "a:backward"]
# |----------------------| |-----------------------| |-----------------------|
# forward pass Checkpoint(Log[b]) Checkpoint(Log[a])
diff --git a/test/distributed/pipeline/sync/test_copy.py b/test/distributed/pipeline/sync/test_copy.py
index 22a3a37805..171b7ffbb8 100644
--- a/test/distributed/pipeline/sync/test_copy.py
+++ b/test/distributed/pipeline/sync/test_copy.py
@@ -10,19 +10,10 @@ import pytest
import torch
from torch.distributed.pipeline.sync.copy import Copy, Wait
-from torch.distributed.pipeline.sync.stream import (
- CPUStream,
- current_stream,
- get_device,
- is_cuda,
- new_stream,
- use_stream,
-)
+from torch.distributed.pipeline.sync.stream import CPUStream, current_stream, get_device, is_cuda, new_stream, use_stream
from torch.testing._internal.common_utils import run_tests
-skip_if_no_cuda = pytest.mark.skipif(
- not torch.cuda.is_available(), reason="cuda required"
-)
+skip_if_no_cuda = pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required")
def _test_copy_wait(prev_stream, next_stream, cuda_sleep=None):
diff --git a/test/distributed/pipeline/sync/test_deferred_batch_norm.py b/test/distributed/pipeline/sync/test_deferred_batch_norm.py
index 2fea96e7dd..4e2578da94 100644
--- a/test/distributed/pipeline/sync/test_deferred_batch_norm.py
+++ b/test/distributed/pipeline/sync/test_deferred_batch_norm.py
@@ -28,7 +28,7 @@ def tilt_dist(input):
# Tilt mean by single batch.
for i, single in enumerate(input):
- single += 2**i
+ single += 2 ** i
return input
@@ -140,7 +140,7 @@ def test_optimize():
dbn.eval()
with torch.no_grad():
- assert torch.allclose(bn(input), dbn(input), atol=1e-1 * (10**i))
+ assert torch.allclose(bn(input), dbn(input), atol=1e-1 * (10 ** i))
def test_conv_bn():
diff --git a/test/distributed/pipeline/sync/test_dependency.py b/test/distributed/pipeline/sync/test_dependency.py
index 73283b88ab..cff4082759 100644
--- a/test/distributed/pipeline/sync/test_dependency.py
+++ b/test/distributed/pipeline/sync/test_dependency.py
@@ -11,7 +11,7 @@ import weakref
import pytest
import torch
-from torch.distributed.pipeline.sync.dependency import Fork, fork, Join, join
+from torch.distributed.pipeline.sync.dependency import Fork, Join, fork, join
from torch.testing._internal.common_utils import run_tests
diff --git a/test/distributed/pipeline/sync/test_pipe.py b/test/distributed/pipeline/sync/test_pipe.py
index b0237f8427..7fc8d8b7c5 100644
--- a/test/distributed/pipeline/sync/test_pipe.py
+++ b/test/distributed/pipeline/sync/test_pipe.py
@@ -6,26 +6,27 @@
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
-import random
-import time
from collections import OrderedDict
from copy import deepcopy
+import time
import pytest
+import random
import torch
-from torch import nn, Tensor
+from torch import nn
+from torch import Tensor
-from torch.distributed.pipeline.sync import NoChunk, Pipe, WithDevice
+from torch.distributed.pipeline.sync import Pipe, NoChunk, WithDevice
from torch.distributed.pipeline.sync.pipe import PipeSequential
-from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_utils import run_tests, TEST_CUDA
+from torch.testing._internal.common_cuda import TEST_MULTIGPU
skip_if_no_cuda = pytest.mark.skipif(not TEST_CUDA, reason="cuda required")
def test_pipe_without_rpc():
model = nn.Sequential(nn.Linear(1, 1))
- with pytest.raises(RuntimeError, match="Please initialize RPC framework"):
+ with pytest.raises(RuntimeError, match='Please initialize RPC framework'):
pipe = Pipe(model, chunks=1)
@@ -134,19 +135,14 @@ def test_checkpoint_mode(setup_rpc):
never_output = never(input)
assert count_grad_fn(always_output.local_value().grad_fn, "CheckpointBackward") == 2
- assert (
- count_grad_fn(except_last_output.local_value().grad_fn, "CheckpointBackward")
- == 1
- )
+ assert count_grad_fn(except_last_output.local_value().grad_fn, "CheckpointBackward") == 1
assert count_grad_fn(never_output.local_value().grad_fn, "CheckpointBackward") == 0
def test_checkpoint_mode_invalid(setup_rpc):
model = nn.Sequential(nn.Linear(1, 1))
- with pytest.raises(
- ValueError, match="checkpoint is not one of 'always', 'except_last', or 'never'"
- ):
+ with pytest.raises(ValueError, match="checkpoint is not one of 'always', 'except_last', or 'never'"):
Pipe(model, chunks=2, checkpoint="INVALID_CHECKPOINT")
@@ -333,7 +329,10 @@ def test_multi_sequence_input(setup_rpc):
model = Pipe(nn.Sequential(MultiSeq()))
with pytest.raises(TypeError):
- model([torch.rand(10), torch.rand(10)], [torch.rand(10), torch.rand(10)])
+ model(
+ [torch.rand(10), torch.rand(10)],
+ [torch.rand(10), torch.rand(10)]
+ )
def test_input_singleton(setup_rpc):
@@ -428,9 +427,7 @@ def test_valid_non_tensor(checkpoint, setup_rpc):
res += d
return c, res, a, d + f if f is not None else d, b, e, f
- model = Pipe(
- nn.Sequential(NonTensor1(), NonTensor2()), chunks=5, checkpoint=checkpoint
- )
+ model = Pipe(nn.Sequential(NonTensor1(), NonTensor2()), chunks=5, checkpoint=checkpoint)
a = random.randint(0, 10)
b = torch.rand(10, 10)
c = random.randint(0, 1) == 0
@@ -510,7 +507,7 @@ def test_uneven_batch_size(checkpoint, setup_rpc):
b = random.randint(0, 10)
c = torch.rand(4, 10)
- with pytest.raises(RuntimeError, match="Found different number of chunks"):
+ with pytest.raises(RuntimeError, match='Found different number of chunks'):
model(a, b, c)
@@ -532,7 +529,7 @@ def test_no_chunk(checkpoint, setup_rpc):
assert torch.allclose(torch.cat((c, c, c, c, c)), res[2])
# Test invalid type for NoChunk
- with pytest.raises(TypeError, match="NoChunk only supported for tensors"):
+ with pytest.raises(TypeError, match='NoChunk only supported for tensors'):
NoChunk(b)
@@ -541,10 +538,7 @@ def test_deferred_batch_norm(checkpoint, setup_rpc):
bn = nn.BatchNorm2d(3)
pipe_bn = deepcopy(bn)
pipe = Pipe(
- nn.Sequential(pipe_bn),
- chunks=2,
- checkpoint=checkpoint,
- deferred_batch_norm=True,
+ nn.Sequential(pipe_bn), chunks=2, checkpoint=checkpoint, deferred_batch_norm=True
)
x = torch.rand(4, 3, 10, 10)
@@ -560,10 +554,7 @@ def test_deferred_batch_norm_params(checkpoint, setup_rpc):
bn = nn.BatchNorm2d(3)
pipe_bn = deepcopy(bn)
pipe = Pipe(
- nn.Sequential(pipe_bn),
- chunks=1,
- checkpoint=checkpoint,
- deferred_batch_norm=True,
+ nn.Sequential(pipe_bn), chunks=1, checkpoint=checkpoint, deferred_batch_norm=True
)
x = torch.rand(4, 3, 10, 10)
@@ -691,9 +682,7 @@ def test_named_children(setup_rpc):
def test_verify_module_non_sequential(setup_rpc):
- with pytest.raises(
- TypeError, match="module must be nn.Sequential to be partitioned"
- ):
+ with pytest.raises(TypeError, match="module must be nn.Sequential to be partitioned"):
Pipe(nn.Module())
@@ -701,9 +690,7 @@ def test_verify_module_duplicate_children(setup_rpc):
conv = nn.Conv2d(3, 3, 1)
model = nn.Sequential(conv, conv)
- with pytest.raises(
- ValueError, match="module with duplicate children is not supported"
- ):
+ with pytest.raises(ValueError, match="module with duplicate children is not supported"):
Pipe(model)
@@ -721,17 +708,22 @@ def test_verify_module_params_on_same_device(setup_rpc):
with pytest.raises(
ValueError,
- match=r"should have all parameters on a single device, please use .to\(\)"
- " to place the module on a single device",
- ):
+ match=r'should have all parameters on a single device, please use .to\(\)'
+ ' to place the module on a single device'):
Pipe(model)
@pytest.mark.skipif(not TEST_MULTIGPU, reason="Need atleast two GPUs")
def test_verify_nested_modules(setup_rpc):
model = nn.Sequential(
- nn.Sequential(nn.Linear(32, 16).cuda(0), nn.Linear(16, 8).cuda(0)),
- nn.Sequential(nn.Linear(8, 4).cuda(1), nn.Linear(4, 2).cuda(1)),
+ nn.Sequential(
+ nn.Linear(32, 16).cuda(0),
+ nn.Linear(16, 8).cuda(0)
+ ),
+ nn.Sequential(
+ nn.Linear(8, 4).cuda(1),
+ nn.Linear(4, 2).cuda(1)
+ ),
)
pipe = Pipe(model)
@@ -793,11 +785,7 @@ def test_multiple_inputs(checkpoint, setup_rpc):
def forward(self, a, b):
return a + b
- model = Pipe(
- nn.Sequential(Module1().cuda(0), Module2().cuda(0)),
- chunks=2,
- checkpoint=checkpoint,
- )
+ model = Pipe(nn.Sequential(Module1().cuda(0), Module2().cuda(0)), chunks=2, checkpoint=checkpoint)
t = torch.rand(10)
res = model(t, t, t).local_value()
assert torch.equal(res, (t + t + t) + (t * t * t))
@@ -817,10 +805,7 @@ def test_inputs_wrong_device(setup_rpc):
a = torch.rand(10).cuda(1)
b = torch.rand(10).cuda(1)
model = Pipe(nn.Sequential(Module1().cuda(0), Module1().cuda(1)), chunks=2)
- with pytest.raises(
- ValueError,
- match="All inputs should be on the same device as the first partition",
- ):
+ with pytest.raises(ValueError, match='All inputs should be on the same device as the first partition'):
model(a, b)
@@ -830,27 +815,21 @@ def test_with_device_wrapper(setup_rpc):
fc2 = nn.Linear(8, 4).cuda(1)
dropout = nn.Dropout()
- model = nn.Sequential(fc1, fc2, WithDevice(dropout, "cuda:1"))
+ model = nn.Sequential(fc1, fc2, WithDevice(dropout, 'cuda:1'))
model = Pipe(model, chunks=8)
- assert (
- torch.device("cuda:1") == model(torch.rand(16, 16).cuda(0)).local_value().device
- )
- assert [torch.device("cuda:0"), torch.device("cuda:1")] == model.devices
+ assert torch.device('cuda:1') == model(torch.rand(16, 16).cuda(0)).local_value().device
+ assert [torch.device('cuda:0'), torch.device('cuda:1')] == model.devices
- model = nn.Sequential(fc1, WithDevice(dropout, "cuda:1"))
+ model = nn.Sequential(fc1, WithDevice(dropout, 'cuda:1'))
model = Pipe(model, chunks=8)
- assert (
- torch.device("cuda:1") == model(torch.rand(16, 16).cuda(0)).local_value().device
- )
- assert [torch.device("cuda:0"), torch.device("cuda:1")] == model.devices
+ assert torch.device('cuda:1') == model(torch.rand(16, 16).cuda(0)).local_value().device
+ assert [torch.device('cuda:0'), torch.device('cuda:1')] == model.devices
- model = nn.Sequential(fc1, WithDevice(fc2, "cuda:0"))
+ model = nn.Sequential(fc1, WithDevice(fc2, 'cuda:0'))
model = Pipe(model, chunks=8)
- assert (
- torch.device("cuda:0") == model(torch.rand(16, 16).cuda(0)).local_value().device
- )
- assert [torch.device("cuda:0")] == model.devices
- assert torch.device("cuda:0") == fc2.weight.device
+ assert torch.device('cuda:0') == model(torch.rand(16, 16).cuda(0)).local_value().device
+ assert [torch.device('cuda:0')] == model.devices
+ assert torch.device('cuda:0') == fc2.weight.device
if __name__ == "__main__":
diff --git a/test/distributed/pipeline/sync/test_stream.py b/test/distributed/pipeline/sync/test_stream.py
index 29281ca606..6fa8e99b13 100644
--- a/test/distributed/pipeline/sync/test_stream.py
+++ b/test/distributed/pipeline/sync/test_stream.py
@@ -23,9 +23,7 @@ from torch.distributed.pipeline.sync.stream import (
)
from torch.testing._internal.common_utils import run_tests
-skip_if_no_cuda = pytest.mark.skipif(
- not torch.cuda.is_available(), reason="cuda required"
-)
+skip_if_no_cuda = pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required")
class TestNewStream:
diff --git a/test/distributed/pipeline/sync/test_transparency.py b/test/distributed/pipeline/sync/test_transparency.py
index a87a04150f..88456b407d 100644
--- a/test/distributed/pipeline/sync/test_transparency.py
+++ b/test/distributed/pipeline/sync/test_transparency.py
@@ -22,12 +22,7 @@ def test_simple_linears(setup_rpc):
p.grad = None
inputs = torch.rand(8, 1)
- model = nn.Sequential(
- nn.Linear(1, 2),
- nn.Linear(2, 4),
- nn.Linear(4, 2),
- nn.Linear(2, 1),
- )
+ model = nn.Sequential(nn.Linear(1, 2), nn.Linear(2, 4), nn.Linear(4, 2), nn.Linear(2, 1),)
# Without Pipe
outputs = model(inputs)
diff --git a/test/distributed/pipeline/sync/test_worker.py b/test/distributed/pipeline/sync/test_worker.py
index ca5d99c576..7d347d48a2 100644
--- a/test/distributed/pipeline/sync/test_worker.py
+++ b/test/distributed/pipeline/sync/test_worker.py
@@ -13,7 +13,7 @@ import torch
from torch.distributed.pipeline.sync.microbatch import Batch
from torch.distributed.pipeline.sync.stream import CPUStream
-from torch.distributed.pipeline.sync.worker import spawn_workers, Task
+from torch.distributed.pipeline.sync.worker import Task, spawn_workers
from torch.testing._internal.common_utils import run_tests
@@ -25,7 +25,6 @@ class fake_device:
type = "fake"
index = None
-
def test_compute_multithreading():
"""Task.compute should be executed on multiple threads."""
thread_ids = set()
diff --git a/test/distributed/rpc/cuda/test_tensorpipe_agent.py b/test/distributed/rpc/cuda/test_tensorpipe_agent.py
index 12af5036b1..cef2e9d36a 100644
--- a/test/distributed/rpc/cuda/test_tensorpipe_agent.py
+++ b/test/distributed/rpc/cuda/test_tensorpipe_agent.py
@@ -9,19 +9,19 @@ if not dist.is_available():
print("Distributed not available, skipping tests", file=sys.stderr)
sys.exit(0)
-import torch
from torch.testing._internal.common_utils import run_tests
from torch.testing._internal.distributed.rpc.tensorpipe_rpc_agent_test_fixture import (
TensorPipeRpcAgentTestFixture,
)
from torch.testing._internal.distributed.rpc_utils import (
- generate_tests,
GENERIC_CUDA_TESTS,
TENSORPIPE_CUDA_TESTS,
+ generate_tests,
)
+import torch
if torch.cuda.is_available():
- torch.cuda.memory._set_allocator_settings("expandable_segments:False")
+ torch.cuda.memory._set_allocator_settings('expandable_segments:False')
globals().update(
generate_tests(
diff --git a/test/distributed/rpc/test_share_memory.py b/test/distributed/rpc/test_share_memory.py
index c587023722..8b538c44b6 100644
--- a/test/distributed/rpc/test_share_memory.py
+++ b/test/distributed/rpc/test_share_memory.py
@@ -1,36 +1,34 @@
#!/usr/bin/env python3
# Owner(s): ["oncall: distributed"]
+import torch
+import torch.distributed as dist
+
import contextlib
import copyreg
import os
import sys
-import torch
-import torch.distributed as dist
-
if not dist.is_available():
print("Distributed not available, skipping tests", file=sys.stderr)
sys.exit(0)
-import torch.distributed.rpc as rpc
-import torch.multiprocessing.reductions as TorchMpReductions
from torch import multiprocessing
-from torch.distributed.rpc.api import _use_rpc_pickler
+import torch.multiprocessing.reductions as TorchMpReductions
+import torch.distributed.rpc as rpc
from torch.distributed.rpc.internal import _InternalRPCPickler
-from torch.testing._internal.common_utils import run_tests, TestCase
-
+from torch.distributed.rpc.api import _use_rpc_pickler
+from torch.testing._internal.common_utils import TestCase, run_tests
@contextlib.contextmanager
def fs_sharing():
prev_strategy = multiprocessing.get_sharing_strategy()
- multiprocessing.set_sharing_strategy("file_system")
+ multiprocessing.set_sharing_strategy('file_system')
try:
yield
finally:
multiprocessing.set_sharing_strategy(prev_strategy)
-
class ShareMemoryRPCPickler(_InternalRPCPickler):
def __init__(self) -> None:
super().__init__()
@@ -48,36 +46,38 @@ class ShareMemoryRPCPickler(_InternalRPCPickler):
torch.nn.parameter.Parameter
] = TorchMpReductions.reduce_tensor
-
def worker_loop(a):
- rpc.init_rpc("worker1", rank=1, world_size=2)
+ rpc.init_rpc('worker1', rank=1, world_size=2)
rpc.shutdown()
-
def worker_fn(m):
pass
-
class TestRPCPickler(TestCase):
def test_case(self):
- os.environ["MASTER_ADDR"] = "localhost"
- os.environ["MASTER_PORT"] = "29500"
+ os.environ['MASTER_ADDR'] = 'localhost'
+ os.environ['MASTER_PORT'] = '29500'
with fs_sharing():
r = multiprocessing.spawn(worker_loop, join=False)
try:
with _use_rpc_pickler(ShareMemoryRPCPickler()):
- rpc.init_rpc("worker0", rank=0, world_size=2)
+ rpc.init_rpc(
+ 'worker0',
+ rank=0,
+ world_size=2)
m = torch.nn.Linear(1, 2)
m.share_memory()
- rref = rpc.remote("worker1", worker_fn, args=(m,))
+ rref = rpc.remote(
+ 'worker1',
+ worker_fn,
+ args=(m,))
rref.to_here()
finally:
rpc.shutdown()
r.join()
-
-if __name__ == "__main__":
+if __name__ == '__main__':
run_tests()
diff --git a/test/distributed/rpc/test_tensorpipe_agent.py b/test/distributed/rpc/test_tensorpipe_agent.py
index 56f176a17c..bb0870dd50 100644
--- a/test/distributed/rpc/test_tensorpipe_agent.py
+++ b/test/distributed/rpc/test_tensorpipe_agent.py
@@ -15,9 +15,9 @@ from torch.testing._internal.distributed.rpc.tensorpipe_rpc_agent_test_fixture i
TensorPipeRpcAgentTestFixture,
)
from torch.testing._internal.distributed.rpc_utils import (
- generate_tests,
GENERIC_TESTS,
TENSORPIPE_TESTS,
+ generate_tests,
)
diff --git a/test/distributed/tensor/parallel/test_ddp_2d_parallel.py b/test/distributed/tensor/parallel/test_ddp_2d_parallel.py
index 8c69bf25a8..ef059d9933 100644
--- a/test/distributed/tensor/parallel/test_ddp_2d_parallel.py
+++ b/test/distributed/tensor/parallel/test_ddp_2d_parallel.py
@@ -2,7 +2,7 @@
import torch
import torch.distributed as dist
-from torch.distributed._tensor import DeviceMesh, DTensor, init_device_mesh, Replicate
+from torch.distributed._tensor import DeviceMesh, DTensor, Replicate, init_device_mesh
from torch.distributed.tensor.parallel import (
ColwiseParallel,
parallelize_module,
@@ -39,11 +39,7 @@ def init_model(device_type, model_parallel_size=TP_DEGREE):
device_type=device_type,
mesh=torch.arange(0, world_size).view(-1, model_parallel_size),
)
- mesh_2d = init_device_mesh(
- device_type,
- (world_size // model_parallel_size, model_parallel_size),
- mesh_dim_names=("dp", "tp"),
- )
+ mesh_2d = init_device_mesh(device_type, (world_size // model_parallel_size, model_parallel_size), mesh_dim_names=("dp", "tp"))
dp_pg = mesh_2d.get_group(mesh_dim=0)
diff --git a/test/distributed/tensor/parallel/test_fsdp_2d_parallel.py b/test/distributed/tensor/parallel/test_fsdp_2d_parallel.py
index 329131290c..64bc628d1b 100644
--- a/test/distributed/tensor/parallel/test_fsdp_2d_parallel.py
+++ b/test/distributed/tensor/parallel/test_fsdp_2d_parallel.py
@@ -381,9 +381,7 @@ class TestNew2dParallelStateDict(DTensorTestBase):
"net1": ColwiseParallel(),
"net2": RowwiseParallel(),
}
- model_2d = parallelize_module(
- simple_model().cuda(), mesh_2d["tp"], parallelize_plan
- )
+ model_2d = parallelize_module(simple_model().cuda(), mesh_2d["tp"], parallelize_plan)
model_2d = FSDP(model_2d, device_mesh=mesh_2d["dp"], use_orig_params=True)
FSDP.set_state_dict_type(
model_2d,
diff --git a/test/distributed/tensor/parallel/test_parallelize_api.py b/test/distributed/tensor/parallel/test_parallelize_api.py
index 53f92ecd0d..ed5a7361d0 100644
--- a/test/distributed/tensor/parallel/test_parallelize_api.py
+++ b/test/distributed/tensor/parallel/test_parallelize_api.py
@@ -4,7 +4,9 @@ from copy import deepcopy
import torch
from torch.distributed._tensor import DeviceMesh, DTensor, Replicate, Shard
-from torch.distributed.tensor.parallel.api import parallelize_module
+from torch.distributed.tensor.parallel.api import (
+ parallelize_module,
+)
from torch.distributed.tensor.parallel.style import (
ColwiseParallel,
PrepareModuleInput,
@@ -175,8 +177,9 @@ class TensorParallelAPITests(DTensorTestBase):
module,
device_mesh,
PrepareModuleInput(
- input_layouts=Shard(0), desired_input_layouts=Replicate()
- ),
+ input_layouts=Shard(0),
+ desired_input_layouts=Replicate()
+ )
)
inp = torch.rand(5, 7, device=self.device_type)
output = module(inp).redistribute(device_mesh, [Shard(0)]).to_local()
@@ -190,8 +193,9 @@ class TensorParallelAPITests(DTensorTestBase):
module,
device_mesh,
PrepareModuleOutput(
- output_layouts=Replicate(), desired_output_layouts=Shard(0)
- ),
+ output_layouts=Replicate(),
+ desired_output_layouts=Shard(0)
+ )
)
torch.manual_seed(15)
inp = torch.rand(16, 7, device=self.device_type)
diff --git a/test/distributed/tensor/parallel/test_tp_examples.py b/test/distributed/tensor/parallel/test_tp_examples.py
index c85032fe2f..1733c3065a 100644
--- a/test/distributed/tensor/parallel/test_tp_examples.py
+++ b/test/distributed/tensor/parallel/test_tp_examples.py
@@ -1,19 +1,12 @@
# Copyright (c) Meta Platforms, Inc. and affiliates
# Owner(s): ["oncall: distributed"]
-import itertools
from copy import deepcopy
-
+import itertools
import torch
import torch.distributed as dist
import torch.nn.functional as F
-from torch.distributed._tensor import (
- DeviceMesh,
- distribute_tensor,
- DTensor,
- Replicate,
- Shard,
-)
+from torch.distributed._tensor import DeviceMesh, DTensor, Replicate, Shard, distribute_tensor
from torch.distributed._tensor.debug import CommDebugMode
from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import (
checkpoint_wrapper,
@@ -44,7 +37,6 @@ from torch.testing._internal.distributed._tensor.common_dtensor import (
c10d_functional = torch.ops.c10d_functional
-
class DistTensorParallelExampleTest(DTensorTestBase):
def _check_module(self, m1, m2, check_grad=False):
named_parameters = dict(m1.named_parameters())
@@ -103,7 +95,6 @@ class DistTensorParallelExampleTest(DTensorTestBase):
output.sum().backward()
from torch.distributed._tensor.debug import CommDebugMode
-
comm_mode = CommDebugMode()
with comm_mode:
output_tp = model_tp(inp)
@@ -111,12 +102,8 @@ class DistTensorParallelExampleTest(DTensorTestBase):
self.assertEqual(output, output_tp)
if is_seq_parallel:
- self.assertEqual(
- comm_mode.get_comm_counts()[c10d_functional.all_gather_into_tensor], 2
- )
- self.assertEqual(
- comm_mode.get_comm_counts()[c10d_functional.reduce_scatter_tensor], 1
- )
+ self.assertEqual(comm_mode.get_comm_counts()[c10d_functional.all_gather_into_tensor], 2)
+ self.assertEqual(comm_mode.get_comm_counts()[c10d_functional.reduce_scatter_tensor], 1)
else:
self.assertEqual(comm_mode.get_comm_counts()[c10d_functional.all_reduce], 1)
@@ -227,22 +214,16 @@ class DistTensorParallelExampleTest(DTensorTestBase):
output_tp = model_tp(inp)
self.assertEqual(output, output_tp)
if is_seq_parallel:
- self.assertDictEqual(
- comm_mode.get_comm_counts(),
- {
- c10d_functional.all_reduce: 1,
- c10d_functional.reduce_scatter_tensor: 4,
- c10d_functional.all_gather_into_tensor: 7,
- },
- )
+ self.assertDictEqual(comm_mode.get_comm_counts(), {
+ c10d_functional.all_reduce: 1,
+ c10d_functional.reduce_scatter_tensor: 4,
+ c10d_functional.all_gather_into_tensor: 7,
+ })
else:
- self.assertDictEqual(
- comm_mode.get_comm_counts(),
- {
- c10d_functional.all_reduce: 5,
- c10d_functional.all_gather_into_tensor: 2,
- },
- )
+ self.assertDictEqual(comm_mode.get_comm_counts(), {
+ c10d_functional.all_reduce: 5,
+ c10d_functional.all_gather_into_tensor: 2,
+ })
# Ensure gradients are equal.
output.sum().backward()
@@ -250,21 +231,15 @@ class DistTensorParallelExampleTest(DTensorTestBase):
output_tp.sum().backward()
self._check_module(model, model_tp, check_grad=True)
if is_seq_parallel:
- self.assertDictEqual(
- comm_mode.get_comm_counts(),
- {
- c10d_functional.reduce_scatter_tensor: 4,
- c10d_functional.all_gather_into_tensor: 7,
- },
- )
+ self.assertDictEqual(comm_mode.get_comm_counts(), {
+ c10d_functional.reduce_scatter_tensor: 4,
+ c10d_functional.all_gather_into_tensor: 7,
+ })
else:
- self.assertDictEqual(
- comm_mode.get_comm_counts(),
- {
- c10d_functional.all_reduce: 8,
- c10d_functional.all_gather_into_tensor: 1,
- },
- )
+ self.assertDictEqual(comm_mode.get_comm_counts(), {
+ c10d_functional.all_reduce: 8,
+ c10d_functional.all_gather_into_tensor: 1,
+ })
# Ensure model weights are still the same after update.
optim.step()
@@ -272,12 +247,9 @@ class DistTensorParallelExampleTest(DTensorTestBase):
optim_tp.step()
self._check_module(model, model_tp)
if is_seq_parallel:
- self.assertDictEqual(
- comm_mode.get_comm_counts(),
- {
- c10d_functional.all_reduce: 30,
- },
- )
+ self.assertDictEqual(comm_mode.get_comm_counts(), {
+ c10d_functional.all_reduce: 30,
+ })
else:
self.assertDictEqual(comm_mode.get_comm_counts(), {})
@@ -358,9 +330,7 @@ class DistTensorParallelExampleTest(DTensorTestBase):
with loss_parallel():
if shard_dim == channel_dim:
with comm_mode:
- dist_y = F.cross_entropy(
- dist_x, target, weight, reduction=reduction
- )
+ dist_y = F.cross_entropy(dist_x, target, weight, reduction=reduction)
self.assertEqual(comm_mode.get_total_counts(), 3)
self.assertEqual(
comm_mode.get_comm_counts()[c10d_functional.all_reduce],
@@ -377,9 +347,7 @@ class DistTensorParallelExampleTest(DTensorTestBase):
y.backward()
dist_y.backward()
self.assertEqual(comm_mode.get_total_counts(), 0)
- self.assertTrue(
- dist_x.grad.placements[0].is_shard(shard_dim)
- )
+ self.assertTrue(dist_x.grad.placements[0].is_shard(shard_dim))
self.assertEqual(dist_x.grad.full_tensor(), x.grad)
x.grad.zero_()
else:
@@ -387,9 +355,8 @@ class DistTensorParallelExampleTest(DTensorTestBase):
ValueError,
"loss_parallel",
):
- dist_y = F.cross_entropy(
- dist_x, target, reduction=reduction
- )
+ dist_y = F.cross_entropy(dist_x, target, reduction=reduction)
+
instantiate_parametrized_tests(DistTensorParallelExampleTest)
diff --git a/test/distributed/tensor/parallel/test_tp_random_state.py b/test/distributed/tensor/parallel/test_tp_random_state.py
index 5c83802076..366006ac73 100644
--- a/test/distributed/tensor/parallel/test_tp_random_state.py
+++ b/test/distributed/tensor/parallel/test_tp_random_state.py
@@ -102,9 +102,7 @@ class TensorParallelRandomStateTests(DTensorTestBase):
# each rank within a TP group has the same initial weights
self.assertEqual(tensor1, tensor2)
- self.check_gathered_tensors(
- tp_rank, tp_size, tensor_gather, tp_weights_assert
- )
+ self.check_gathered_tensors(tp_rank, tp_size, tensor_gather, tp_weights_assert)
# check across TP groups
# all-gather local shards
@@ -125,9 +123,7 @@ class TensorParallelRandomStateTests(DTensorTestBase):
# random seeds set in data loading.
self.assertNotEqual(tensor1, tensor2)
- self.check_gathered_tensors(
- dp_rank, dp_size, tensor_gather, dp_weights_assert
- )
+ self.check_gathered_tensors(dp_rank, dp_size, tensor_gather, dp_weights_assert)
if __name__ == "__main__":
diff --git a/test/distributed/tensor/parallel/test_tp_style.py b/test/distributed/tensor/parallel/test_tp_style.py
index ab4f1ab8a7..47bd32a0c7 100644
--- a/test/distributed/tensor/parallel/test_tp_style.py
+++ b/test/distributed/tensor/parallel/test_tp_style.py
@@ -6,15 +6,9 @@ from copy import deepcopy
import torch
import torch.nn as nn
-from torch.distributed._tensor import (
- distribute_tensor,
- DTensor,
- init_device_mesh,
- Replicate,
- Shard,
-)
-from torch.distributed._tensor.debug import CommDebugMode
+from torch.distributed._tensor import Replicate, Shard, init_device_mesh, distribute_tensor, DTensor
from torch.distributed._tensor.placement_types import _Partial
+from torch.distributed._tensor.debug import CommDebugMode
from torch.distributed.tensor.parallel import parallelize_module
from torch.distributed.tensor.parallel.style import (
ColwiseParallel,
@@ -26,15 +20,14 @@ from torch.distributed.tensor.parallel.style import (
from torch.testing._internal.common_utils import run_tests
from torch.testing._internal.distributed._tensor.common_dtensor import (
DTensorTestBase,
+ with_comms,
NUM_DEVICES,
RMSNormPython,
- with_comms,
)
c10d_functional = torch.ops.c10d_functional
-
class TensorParallelStyleTest(DTensorTestBase):
@property
def world_size(self):
@@ -50,9 +43,7 @@ class TensorParallelStyleTest(DTensorTestBase):
default_col_parallel = ColwiseParallel()
with comm_mode:
- colwise_mod = parallelize_module(
- deepcopy(model), mesh, default_col_parallel
- )
+ colwise_mod = parallelize_module(deepcopy(model), mesh, default_col_parallel)
out = colwise_mod(tensor)
# ensure output shard on the last dim
self.assertEqual(out.shape, (8, 16 // self.world_size))
@@ -66,23 +57,17 @@ class TensorParallelStyleTest(DTensorTestBase):
sharded_col_parallel = ColwiseParallel(input_layouts=Shard(0))
with comm_mode:
- colwise_mod = parallelize_module(
- deepcopy(model), mesh, sharded_col_parallel
- )
+ colwise_mod = parallelize_module(deepcopy(model), mesh, sharded_col_parallel)
out = colwise_mod(tensor)
# ensure output shard on the last dim
self.assertEqual(out.shape, (8 * self.world_size, 16 // self.world_size))
# allgather in fwd
- self.assertEqual(
- comm_mode.get_comm_counts()[c10d_functional.all_gather_into_tensor], 1
- )
+ self.assertEqual(comm_mode.get_comm_counts()[c10d_functional.all_gather_into_tensor], 1)
self.assertEqual(comm_mode.get_total_counts(), 1)
out.sum().backward()
# reduce_scatter in bwd
- self.assertEqual(
- comm_mode.get_comm_counts()[c10d_functional.reduce_scatter_tensor], 1
- )
+ self.assertEqual(comm_mode.get_comm_counts()[c10d_functional.reduce_scatter_tensor], 1)
self.assertEqual(comm_mode.get_total_counts(), 2)
@with_comms
@@ -95,9 +80,7 @@ class TensorParallelStyleTest(DTensorTestBase):
default_col_parallel = ColwiseParallel()
with comm_mode:
- colwise_mod = parallelize_module(
- deepcopy(model), mesh, default_col_parallel
- )
+ colwise_mod = parallelize_module(deepcopy(model), mesh, default_col_parallel)
out = colwise_mod(tensor)
# ensure output shard on the last dim
self.assertEqual(out.shape, (4, 2, 16 // self.world_size))
@@ -113,16 +96,12 @@ class TensorParallelStyleTest(DTensorTestBase):
mesh = init_device_mesh(self.device_type, (self.world_size,))
comm_mode = CommDebugMode()
- tensor = torch.rand(
- 8, 16 // self.world_size, device=self.device_type, requires_grad=True
- )
+ tensor = torch.rand(8, 16 // self.world_size, device=self.device_type, requires_grad=True)
model = nn.Linear(16, 16, device=self.device_type)
default_row_parallel = RowwiseParallel()
with comm_mode:
- rowwise_mod = parallelize_module(
- deepcopy(model), mesh, default_row_parallel
- )
+ rowwise_mod = parallelize_module(deepcopy(model), mesh, default_row_parallel)
out = rowwise_mod(tensor)
# ensure output replicated
self.assertEqual(out.shape, (8, 16))
@@ -136,23 +115,17 @@ class TensorParallelStyleTest(DTensorTestBase):
sharded_row_parallel = RowwiseParallel(output_layouts=Shard(0))
with comm_mode:
- rowwise_mod = parallelize_module(
- deepcopy(model), mesh, sharded_row_parallel
- )
+ rowwise_mod = parallelize_module(deepcopy(model), mesh, sharded_row_parallel)
out = rowwise_mod(tensor)
# ensure output replicated
self.assertEqual(out.shape, (8 // self.world_size, 16))
# reduce_scatter in fwd
- self.assertEqual(
- comm_mode.get_comm_counts()[c10d_functional.reduce_scatter_tensor], 1
- )
+ self.assertEqual(comm_mode.get_comm_counts()[c10d_functional.reduce_scatter_tensor], 1)
self.assertEqual(comm_mode.get_total_counts(), 1)
out.sum().backward()
# allgather in bwd
- self.assertEqual(
- comm_mode.get_comm_counts()[c10d_functional.all_gather_into_tensor], 1
- )
+ self.assertEqual(comm_mode.get_comm_counts()[c10d_functional.all_gather_into_tensor], 1)
self.assertEqual(comm_mode.get_total_counts(), 2)
@with_comms
@@ -164,9 +137,7 @@ class TensorParallelStyleTest(DTensorTestBase):
model = nn.Embedding(16, 16, device=self.device_type)
with comm_mode:
- rowwise_mod = parallelize_module(
- deepcopy(model), mesh, RowwiseParallel(input_layouts=Replicate())
- )
+ rowwise_mod = parallelize_module(deepcopy(model), mesh, RowwiseParallel(input_layouts=Replicate()))
out = rowwise_mod(tensor)
# ensure output shard on the last dim
self.assertEqual(out.shape, (4, 2, 16))
@@ -178,21 +149,21 @@ class TensorParallelStyleTest(DTensorTestBase):
# no comm in bwd
self.assertEqual(comm_mode.get_total_counts(), 1)
+
@with_comms
def test_prepare_module_input(self):
mesh = init_device_mesh(self.device_type, (self.world_size,))
tensor = torch.ones(2, 16, device=self.device_type)
expected_tensor = torch.ones(2 * self.world_size, 16, device=self.device_type)
- prepare_inp_style = PrepareModuleInput(
- input_layouts=Shard(0), desired_input_layouts=Replicate()
- )
+ prepare_inp_style = PrepareModuleInput(input_layouts=Shard(0), desired_input_layouts=Replicate())
model = nn.Identity()
allgather_mod = parallelize_module(model, mesh, prepare_inp_style)
output = allgather_mod(tensor).full_tensor()
self.assertEqual(output, expected_tensor)
+
@with_comms
def test_prepare_module_input_multiple_inputs(self):
mesh = init_device_mesh(self.device_type, (self.world_size,))
@@ -207,41 +178,26 @@ class TensorParallelStyleTest(DTensorTestBase):
# Raise assertion error if input_layouts and desired_input_layouts do not have same length.
test_mod = TestModule().to(self.device_type)
- with self.assertRaisesRegex(
- AssertionError,
- "input_layouts and desired_input_layouts should have same length!",
- ):
- prepare_inps_dimension_mismatch = PrepareModuleInput(
- input_layouts=Shard(0), desired_input_layouts=(Replicate(), None)
- )
+ with self.assertRaisesRegex(AssertionError, "input_layouts and desired_input_layouts should have same length!"):
+ prepare_inps_dimension_mismatch = PrepareModuleInput(input_layouts=Shard(0), desired_input_layouts=(Replicate(), None))
# Raise assertion error if module inputs and input_layouts do not have same length.
- prepare_inps_short_dimension = PrepareModuleInput(
- input_layouts=Shard(0), desired_input_layouts=Replicate()
- )
+ prepare_inps_short_dimension = PrepareModuleInput(input_layouts=Shard(0), desired_input_layouts=Replicate())
parallelize_module(test_mod.linear, mesh, ColwiseParallel())
parallelize_module(test_mod, mesh, prepare_inps_short_dimension)
- with self.assertRaisesRegex(
- ValueError, "module inputs and input_layouts should have same length!"
- ):
+ with self.assertRaisesRegex(ValueError, "module inputs and input_layouts should have same length!"):
output = test_mod(
torch.randn(2, 8, device=self.device_type),
- torch.ones(
- self.world_size * 2, 8 // self.world_size, device=self.device_type
- ),
+ torch.ones(self.world_size * 2, 8 // self.world_size, device=self.device_type)
)
test_mod = TestModule().to(self.device_type)
- prepare_inps = PrepareModuleInput(
- input_layouts=(Shard(0), None), desired_input_layouts=(Replicate(), None)
- )
+ prepare_inps = PrepareModuleInput(input_layouts=(Shard(0), None), desired_input_layouts=(Replicate(), None))
parallelize_module(test_mod.linear, mesh, ColwiseParallel())
parallelize_module(test_mod, mesh, prepare_inps)
output = test_mod(
torch.randn(2, 8, device=self.device_type),
- torch.ones(
- self.world_size * 2, 8 // self.world_size, device=self.device_type
- ),
+ torch.ones(self.world_size * 2, 8 // self.world_size, device=self.device_type)
)
self.assertEqual(output.shape, (self.world_size * 2, 8 // self.world_size))
@@ -251,9 +207,7 @@ class TensorParallelStyleTest(DTensorTestBase):
tensor = torch.ones(8, 16, device=self.device_type)
expected_tensor = torch.ones(8 // self.world_size, 16, device=self.device_type)
- prepare_out_style = PrepareModuleOutput(
- output_layouts=Replicate(), desired_output_layouts=Shard(0)
- )
+ prepare_out_style = PrepareModuleOutput(output_layouts=Replicate(), desired_output_layouts=Shard(0))
model = nn.Identity()
chunk_mod = parallelize_module(model, mesh, prepare_out_style)
@@ -267,22 +221,12 @@ class TensorParallelStyleTest(DTensorTestBase):
comm_mode = CommDebugMode()
batch, N, embedding_dim = 20, 8, 12
- global_input = torch.rand(
- batch,
- N * self.world_size,
- embedding_dim,
- device=self.device_type,
- requires_grad=True,
- )
+ global_input = torch.rand(batch, N * self.world_size, embedding_dim, device=self.device_type, requires_grad=True)
sharded_input = distribute_tensor(global_input, mesh, [Shard(1)])
# test LayerNorm
for elementwise_affine in [True, False]:
- norm = nn.LayerNorm(
- embedding_dim,
- elementwise_affine=elementwise_affine,
- device=self.device_type,
- )
+ norm = nn.LayerNorm(embedding_dim, elementwise_affine=elementwise_affine, device=self.device_type)
sp_norm = parallelize_module(deepcopy(norm), mesh, SequenceParallel())
output = norm(global_input)
@@ -295,9 +239,7 @@ class TensorParallelStyleTest(DTensorTestBase):
self.assertIsInstance(sharded_out, DTensor)
self.assertEqual(sharded_out.placements, (Shard(1),))
self.assertEqual(comm_mode.get_total_counts(), 0)
- self.assertEqual(
- comm_mode.get_comm_counts()[c10d_functional.all_reduce], 0
- )
+ self.assertEqual(comm_mode.get_comm_counts()[c10d_functional.all_reduce], 0)
if elementwise_affine:
self.assertEqual(sp_norm.weight.grad.placements, (_Partial(),))
self.assertEqual(sp_norm.bias.grad.placements, (_Partial(),))
diff --git a/test/distributed/test_c10d_common.py b/test/distributed/test_c10d_common.py
index 654d0e1dab..dd17e7dc03 100644
--- a/test/distributed/test_c10d_common.py
+++ b/test/distributed/test_c10d_common.py
@@ -21,8 +21,8 @@ if not dist.is_available():
print("distributed package not available, skipping tests", file=sys.stderr)
sys.exit(0)
-import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD
import torch.distributed.distributed_c10d as c10d
+import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from torch import nn
@@ -34,13 +34,13 @@ from torch.testing._internal.common_distributed import (
skip_if_lt_x_gpu,
)
from torch.testing._internal.common_utils import (
- instantiate_parametrized_tests,
- load_tests,
- parametrize,
retry_on_connect_failures,
+ TestCase,
+ load_tests,
run_tests,
TEST_WITH_DEV_DBG_ASAN,
- TestCase,
+ instantiate_parametrized_tests,
+ parametrize
)
from torch.utils.checkpoint import checkpoint
@@ -141,13 +141,7 @@ class TimeoutTest(TestCase):
if init_type == "file":
barrier_store = dist.FileStore(f.name)
elif init_type == "tcp":
- barrier_store = dist.TCPStore(
- "localhost",
- port,
- world_size,
- is_master=rank == 0,
- wait_for_workers=False,
- )
+ barrier_store = dist.TCPStore("localhost", port, world_size, is_master=rank == 0, wait_for_workers=False)
elif init_type == "hash":
barrier_store = dist.HashStore()
try:
@@ -159,7 +153,7 @@ class TimeoutTest(TestCase):
group_name="_",
rendezvous_count=world_size,
timeout=timeout,
- logging_interval=timeout / 2,
+ logging_interval=timeout / 2
)
except torch.distributed.DistStoreError as e:
self.assertTrue(isinstance(e, torch.distributed.DistError))
@@ -171,14 +165,7 @@ class TimeoutTest(TestCase):
for init_type in ["file", "tcp", "hash"]:
for rank in range(world_size):
t = threading.Thread(
- target=thread_work,
- args=(
- timedelta(seconds=3),
- init_type,
- world_size,
- rank,
- error_list,
- ),
+ target=thread_work, args=(timedelta(seconds=3), init_type, world_size, rank, error_list,)
)
threads.append(t)
t.start()
@@ -189,14 +176,10 @@ class TimeoutTest(TestCase):
# we expect the world_size-1 threads to have failed
self.assertEqual(len(error_list), world_size - 1)
for error in error_list:
- self.assertTrue(
- "Timed out initializing process group in store based barrier"
- in error.args[0]
- )
+ self.assertTrue("Timed out initializing process group in store based barrier" in error.args[0])
error_list = []
threads = []
-
class Net(nn.Module):
def __init__(self):
super().__init__()
@@ -392,9 +375,7 @@ class CommonDistributedDataParallelTest:
def _get_process_group(self):
raise NotImplementedError("To be implemented by child class")
- def _train_model(
- self, model, input_var, target, loss, run_checkpoint=False, use_reentrant=True
- ):
+ def _train_model(self, model, input_var, target, loss, run_checkpoint=False, use_reentrant=True):
model.train()
if run_checkpoint:
output = checkpoint(model, input_var, use_reentrant=use_reentrant)
@@ -437,21 +418,9 @@ class CommonDistributedDataParallelTest:
for i in range(n_iters):
model.zero_grad(set_to_none=False)
ddp_model.zero_grad(set_to_none=False)
+ self._train_model(model, input, target, loss, run_checkpoint=run_checkpoint, use_reentrant=use_reentrant)
self._train_model(
- model,
- input,
- target,
- loss,
- run_checkpoint=run_checkpoint,
- use_reentrant=use_reentrant,
- )
- self._train_model(
- ddp_model,
- ddp_input,
- ddp_target,
- loss,
- run_checkpoint=run_checkpoint,
- use_reentrant=use_reentrant,
+ ddp_model, ddp_input, ddp_target, loss, run_checkpoint=run_checkpoint, use_reentrant=use_reentrant
)
for i, j in zip(model.parameters(), ddp_model.parameters()):
if not allow_none_grads:
@@ -467,7 +436,6 @@ class CommonDistributedDataParallelTest:
"""
Runs checkpoint for a single layer in the model.
"""
-
def __init__(self, use_reentrant=True):
super().__init__()
self.l1 = nn.Linear(20, 20)
@@ -485,7 +453,6 @@ class CommonDistributedDataParallelTest:
cases such as pipeline parallel where the same layer can be checkpointed
more than one time.
"""
-
def __init__(self, use_reentrant=True):
super().__init__(use_reentrant=use_reentrant)
@@ -499,7 +466,6 @@ class CommonDistributedDataParallelTest:
"""
Similar to CheckpointTwiceModule but the weights are shared.
"""
-
def __init__(self, use_reentrant=True):
super().__init__(use_reentrant=use_reentrant)
# Share weights
@@ -511,6 +477,7 @@ class CommonDistributedDataParallelTest:
x = checkpoint(self.l2, x, use_reentrant=self.use_reentrant)
return x
+
class DynamicCheckpointTwiceModule(CheckpointTwiceModule):
def __init__(self, use_reentrant=True):
super().__init__(use_reentrant=use_reentrant)
@@ -531,6 +498,7 @@ class CommonDistributedDataParallelTest:
# Share weights
self.l1.weight = self.l2.weight
+
def _prepare_dummy_data(self):
ddp_bs = 16
bs = ddp_bs * self.world_size
@@ -541,6 +509,7 @@ class CommonDistributedDataParallelTest:
ddp_target = target[offset : offset + ddp_bs]
return input, ddp_input, target, ddp_target
+
@skip_if_lt_x_gpu(2)
@parametrize("use_reentrant", [True, False])
def test_ddp_checkpointing_once(self, use_reentrant):
@@ -577,10 +546,10 @@ class CommonDistributedDataParallelTest:
process_group = self._get_process_group()
for use_bucket_view in (True, False):
err_ctx = (
- nullcontext()
- if not use_reentrant
- else self.assertRaisesRegex(
- RuntimeError, "Expected to mark a variable ready only once."
+ nullcontext() if not use_reentrant else
+ self.assertRaisesRegex(
+ RuntimeError,
+ "Expected to mark a variable ready only once."
)
)
with err_ctx:
@@ -609,10 +578,10 @@ class CommonDistributedDataParallelTest:
process_group = self._get_process_group()
for use_bucket_view in (True, False):
err_ctx = (
- nullcontext()
- if not use_reentrant
- else self.assertRaisesRegex(
- RuntimeError, "Expected to mark a variable ready only once."
+ nullcontext() if not use_reentrant else
+ self.assertRaisesRegex(
+ RuntimeError,
+ "Expected to mark a variable ready only once."
)
)
with err_ctx:
@@ -665,7 +634,7 @@ class CommonDistributedDataParallelTest:
find_unused_parameters=True,
# Grads can be none sometimes due to dynamic module not using
# all params.
- allow_none_grads=True,
+ allow_none_grads=True
)
@skip_if_lt_x_gpu(2)
@@ -684,7 +653,7 @@ class CommonDistributedDataParallelTest:
find_unused_parameters=True,
# Grads can be none sometimes due to dynamic module not using
# all params.
- allow_none_grads=True,
+ allow_none_grads=True
)
# DDP works as expected if there is weight sharing among layers
@@ -912,7 +881,7 @@ class CommonDistributedDataParallelTest:
x = torch.zeros(
(1 if self.rank != 0 else 0, 2, 11, 13),
dtype=torch.float32,
- device=self.rank,
+ device=self.rank
)
# input requires grad, this will trigger the collective communication
@@ -925,7 +894,11 @@ class CommonDistributedDataParallelTest:
self._test_not_nan(model, x)
# all ranks receive empty inputs
- x = torch.zeros((0, 2, 11, 13), dtype=torch.float32, device=self.rank)
+ x = torch.zeros(
+ (0, 2, 11, 13),
+ dtype=torch.float32,
+ device=self.rank
+ )
# input requires grad, this will trigger the collective communication
# in the backward pass
@@ -960,13 +933,17 @@ class CommonDistributedDataParallelTest:
x = torch.zeros(
(3 if self.rank != 0 else 0, 2, 30, 30),
dtype=torch.float32,
- device=self.rank,
+ device=self.rank
)
self._test_not_nan(model, x)
# all ranks receive empty inputs
- x = torch.zeros((0, 2, 30, 30), dtype=torch.float32, device=self.rank)
+ x = torch.zeros(
+ (0, 2, 30, 30),
+ dtype=torch.float32,
+ device=self.rank
+ )
self._test_not_nan(model, x)
@@ -985,13 +962,16 @@ class CommonDistributedDataParallelTest:
def forward(self, x):
o1 = None if self.skip_o1 else self.relu(self.seq1(x))
- o2 = {"a": self.seq2(x), "b": self.relu(self.seq2(x))}
+ o2 = {
+ "a": self.seq2(x),
+ "b": self.relu(self.seq2(x))
+ }
return CommonDistributedDataParallelTest.CustomOutput(o1=o1, o2=o2)
def _test_dataclass_output(self, skip_o1):
- net_x = torch.cat([torch.ones(4, 10) * i for i in range(self.world_size)]).to(
- self.rank
- )
+ net_x = torch.cat(
+ [torch.ones(4, 10) * i for i in range(self.world_size)]
+ ).to(self.rank)
ddp_x = torch.ones(4, 10, device=self.rank) * self.rank
# use manual_seed to make sure local models start with the same values
@@ -1111,6 +1091,7 @@ class AbstractCommTest:
self.fail("test subclass didn't override device")
def _verify_sequence_number_across_pg(self, pg, verify_pg):
+
seq_num = pg._get_sequence_number_for_group()
obj_list = [None for _ in range(dist.get_world_size(verify_pg))]
# We use a separate pg to verify the sequence numbers, otherwise these
@@ -1292,12 +1273,11 @@ class AbstractCommTest:
tensor = torch.ones(2, 2, device=self.device) * 7
tensor_h = tensor.half()
- tensor_list = [
- torch.zeros(2, 2, device=self.device) for _ in range(self.world_size)
- ]
+ tensor_list = [torch.zeros(2, 2, device=self.device) for _ in range(self.world_size)]
tensor_list_h = list(tensor_list)
tensor_list_h[1] = tensor_list_h[1].half()
+
with self.assertRaisesRegex(ValueError, "tensors with different dtypes"):
dist.all_gather(tensor_list_h, tensor)
@@ -1349,9 +1329,7 @@ class AbstractCommTest:
tensor = torch.rand(2, device=self.device)
tensor_c = torch.view_as_complex(tensor)
- tensor_list = [
- torch.rand(2, device=self.device) for _ in range(self.world_size)
- ]
+ tensor_list = [torch.rand(2, device=self.device) for _ in range(self.world_size)]
tensor_list_c = list(tensor_list)
tensor_list_c[1] = torch.view_as_complex(tensor_list_c[1])
@@ -1376,7 +1354,6 @@ class AbstractCommTest:
dist.broadcast(outensor, src=0)
self.assertEqual(outensor, tensor)
-
# Variant of AbstractCommTest that expects world size of 4
class AbstractLargeCommTest:
@property
@@ -1405,9 +1382,7 @@ class AbstractLargeCommTest:
self.assertIn(rank, ranks_in)
self.assertNotIn(rank, ranks_out)
- self.assertIsNone(
- dist.new_group(ranks=ranks_out, use_local_synchronization=True)
- )
+ self.assertIsNone(dist.new_group(ranks=ranks_out, use_local_synchronization=True))
new_pg = dist.new_group(ranks=ranks_in, use_local_synchronization=True)
self.assertIsInstance(new_pg, dist.ProcessGroup)
@@ -1418,7 +1393,7 @@ class AbstractLargeCommTest:
self.assertEqual(
ranks_in,
dist.get_process_group_ranks(new_pg),
- f"expecting {ranks_in} but got {dist.get_process_group_ranks(new_pg)}",
+ f"expecting {ranks_in} but got {dist.get_process_group_ranks(new_pg)}"
)
def _test_new_group_local_sync_sanity_check(self, backend):
@@ -1438,18 +1413,12 @@ class AbstractLargeCommTest:
new_pg = dist.new_group(ranks=ranks_in, use_local_synchronization=True)
input_tensor = torch.tensor([pg_idx, rank], device=self.device)
- output_tensor_list = [
- torch.tensor(
- [-1, -1],
- device=self.device,
- )
- for _ in range(new_pg.size())
- ]
+ output_tensor_list = [torch.tensor([-1, -1], device=self.device,) for _ in range(new_pg.size())]
dist.all_gather(output_tensor_list, input_tensor, group=new_pg)
expected = [
torch.tensor([pg_idx, ranks_in[0]], device=self.device),
- torch.tensor([pg_idx, ranks_in[1]], device=self.device),
+ torch.tensor([pg_idx, ranks_in[1]], device=self.device)
]
self.assertEqual(output_tensor_list, expected)
@@ -1480,17 +1449,13 @@ class AbstractLargeCommTest:
input_tensor = torch.tensor([pg_idx, rank], device=self.device)
for new_pg in new_pgs:
output_tensor_list = [
- torch.tensor(
- [-1, -1],
- device=self.device,
- )
- for _ in range(new_pg.size())
+ torch.tensor([-1, -1], device=self.device,) for _ in range(new_pg.size())
]
dist.all_gather(output_tensor_list, input_tensor, group=new_pg)
expected = [
torch.tensor([pg_idx, ranks_in[0]], device=self.device),
- torch.tensor([pg_idx, ranks_in[1]], device=self.device),
+ torch.tensor([pg_idx, ranks_in[1]], device=self.device)
]
self.assertEqual(output_tensor_list, expected)
@@ -1542,9 +1507,7 @@ class CommTest(AbstractCommTest, MultiProcessTestCase):
for mode in invalid_debug_modes:
os.environ["TORCH_DISTRIBUTED_DEBUG"] = str(mode)
- with self.assertRaisesRegex(
- ValueError, "The value of TORCH_DISTRIBUTED_DEBUG must"
- ):
+ with self.assertRaisesRegex(ValueError, "The value of TORCH_DISTRIBUTED_DEBUG must"):
dist.set_debug_level_from_env()
@@ -1560,9 +1523,7 @@ class DummyProcessGroup(dist.ProcessGroup):
return "Dummy"
def allgather(self, output_tensor_lists, input_tensor_list, opts=None):
- for output_tensor_list, input_tensor in zip(
- output_tensor_lists, input_tensor_list
- ):
+ for output_tensor_list, input_tensor in zip(output_tensor_lists, input_tensor_list):
for output_tensor in output_tensor_list:
output_tensor.copy_(input_tensor)
@@ -1600,9 +1561,7 @@ class DummyProcessGroup(dist.ProcessGroup):
return DummyWork()
def reduce_scatter(self, output_tensor_list, input_tensor_lists, opts=None):
- for output_tensor, input_tensor_list in zip(
- output_tensor_list, input_tensor_lists
- ):
+ for output_tensor, input_tensor_list in zip(output_tensor_list, input_tensor_lists):
output_tensor.copy_(input_tensor_list[self.rank()])
return DummyWork()
@@ -1638,25 +1597,28 @@ class PythonProcessGroupExtensionTest(MultiProcessTestCase):
def test_backend_class_attr(self):
dist.Backend.register_backend(
- "dummy", PythonProcessGroupExtensionTest.create_dummy
+ "dummy",
+ PythonProcessGroupExtensionTest.create_dummy
)
self.assertEqual(dist.Backend.DUMMY, "dummy")
self.assertEqual(
dist.Backend._plugins["DUMMY"].creator_fn,
- PythonProcessGroupExtensionTest.create_dummy,
+ PythonProcessGroupExtensionTest.create_dummy
)
def test_is_backend_available(self):
self.assertEqual(dist.is_ucc_available(), dist.is_backend_available("ucc"))
self.assertFalse(dist.is_backend_available("dummy"))
dist.Backend.register_backend(
- "dummy", PythonProcessGroupExtensionTest.create_dummy
+ "dummy",
+ PythonProcessGroupExtensionTest.create_dummy
)
self.assertTrue(dist.is_backend_available("dummy"))
def test_backend_config(self):
dist.Backend.register_backend(
- "dummy", PythonProcessGroupExtensionTest.create_dummy
+ "dummy",
+ PythonProcessGroupExtensionTest.create_dummy
)
# Ensure backend config can be created with the following arguments
@@ -1691,15 +1653,11 @@ class PythonProcessGroupExtensionTest(MultiProcessTestCase):
dist.BackendConfig(config_str)
def test_init_process_group_with_multiple_backends(self):
- dist.Backend.register_backend(
- "dummy", PythonProcessGroupExtensionTest.create_dummy
- )
+ dist.Backend.register_backend("dummy", PythonProcessGroupExtensionTest.create_dummy)
- os.environ["MASTER_ADDR"] = "localhost"
- os.environ["MASTER_PORT"] = "6789"
- dist.init_process_group(
- "cpu:dummy,cuda:dummy", rank=self.rank, world_size=self.world_size
- )
+ os.environ['MASTER_ADDR'] = 'localhost'
+ os.environ['MASTER_PORT'] = '6789'
+ dist.init_process_group("cpu:dummy,cuda:dummy", rank=self.rank, world_size=self.world_size)
# test all_gather
input_tensor = torch.ones(2, 2) * 7
@@ -1721,12 +1679,10 @@ class PythonProcessGroupExtensionTest(MultiProcessTestCase):
return DummyProcessGroup(group_rank, group_size)
def test_collectives(self):
- dist.Backend.register_backend(
- "dummy", PythonProcessGroupExtensionTest.create_dummy
- )
+ dist.Backend.register_backend("dummy", PythonProcessGroupExtensionTest.create_dummy)
- os.environ["MASTER_ADDR"] = "localhost"
- os.environ["MASTER_PORT"] = "6789"
+ os.environ['MASTER_ADDR'] = 'localhost'
+ os.environ['MASTER_PORT'] = '6789'
dist.init_process_group("dummy", rank=self.rank, world_size=self.world_size)
# test all_gather
@@ -1757,12 +1713,10 @@ class PythonProcessGroupExtensionTest(MultiProcessTestCase):
dist.destroy_process_group()
def test_send_recv(self):
- dist.Backend.register_backend(
- "dummy", PythonProcessGroupExtensionTest.create_dummy
- )
+ dist.Backend.register_backend("dummy", PythonProcessGroupExtensionTest.create_dummy)
- os.environ["MASTER_ADDR"] = "localhost"
- os.environ["MASTER_PORT"] = "6789"
+ os.environ['MASTER_ADDR'] = 'localhost'
+ os.environ['MASTER_PORT'] = '6789'
dist.init_process_group("dummy", rank=self.rank, world_size=self.world_size)
# test send
@@ -1785,7 +1739,6 @@ class PythonProcessGroupExtensionTest(MultiProcessTestCase):
instantiate_parametrized_tests(CommonDistributedDataParallelTest)
-
class ProcessGroupWithDispatchedCollectivesTests(MultiProcessTestCase):
@property
def world_size(self):
@@ -1838,7 +1791,7 @@ class ProcessGroupWithDispatchedCollectivesTests(MultiProcessTestCase):
backend=backend,
rank=self.rank,
world_size=self.world_size,
- store=store,
+ store=store
)
pg = c10d._get_default_group()
self.assertEqual(pg.rank(), self.rank)
@@ -1924,7 +1877,6 @@ class ProcessGroupWithDispatchedCollectivesTests(MultiProcessTestCase):
output_tensor = torch.zeros(2, 2, device=torch.device(device))
dist.all_to_all_single(output_tensor, input_tensor)
-
class CompilerTest(MultiProcessTestCase):
def setUp(self):
super().setUp()
@@ -1976,18 +1928,14 @@ class CompilerTest(MultiProcessTestCase):
commed = False
while prev is not None and not commed:
curr = prev
- waited |= all(
- [
- curr.op == "call_function",
- curr.target == _wait_comm,
- ]
- )
- commed |= all(
- [
- curr.op == "call_function",
- CommTensor._is_supported(curr.target.__name__),
- ]
- )
+ waited |= all([
+ curr.op == "call_function",
+ curr.target == _wait_comm,
+ ])
+ commed |= all([
+ curr.op == "call_function",
+ CommTensor._is_supported(curr.target.__name__),
+ ])
prev = curr.args[0]
@@ -2030,9 +1978,7 @@ class CompilerTest(MultiProcessTestCase):
def comm_fn(tensor, group=None):
out_tensors = [torch.zeros_like(tensor) for _ in range(group.size())]
output_tensor = torch.cat(out_tensors, dim=0)
- work = dist.all_gather_into_tensor(
- output_tensor, tensor, group=group, async_op=True
- )
+ work = dist.all_gather_into_tensor(output_tensor, tensor, group=group, async_op=True)
work.wait()
return work, output_tensor
@@ -2043,9 +1989,7 @@ class CompilerTest(MultiProcessTestCase):
def comm_fn(tensor, group=None):
in_tensors = [tensor.clone() + i for i in range(group.size())]
out_tensor = torch.zeros_like(tensor)
- work = dist.reduce_scatter(
- out_tensor, in_tensors, group=group, async_op=True
- )
+ work = dist.reduce_scatter(out_tensor, in_tensors, group=group, async_op=True)
return work, out_tensor
self._test_work_wait(tensor, comm_fn=comm_fn)
@@ -2053,9 +1997,7 @@ class CompilerTest(MultiProcessTestCase):
def _test_reduce_scatter_tensor_work_wait(self, tensor):
def comm_fn(tensor, group=None):
out_tensor = torch.zeros_like(tensor).chunk(group.size(), dim=0)[self.rank]
- work = dist.reduce_scatter_tensor(
- out_tensor, tensor, group=group, async_op=True
- )
+ work = dist.reduce_scatter_tensor(out_tensor, tensor, group=group, async_op=True)
return work, out_tensor
self._test_work_wait(tensor, comm_fn=comm_fn)
@@ -2069,13 +2011,9 @@ class CompilerTest(MultiProcessTestCase):
def _test_scatter_work_wait(self, tensor):
def comm_fn(tensor, group=None):
- in_tensors = (
- [tensor + i for i in range(group.size())] if self.rank == 0 else None
- )
+ in_tensors = [tensor + i for i in range(group.size())] if self.rank == 0 else None
out_tensor = torch.zeros_like(tensor)
- work = dist.scatter(
- out_tensor, in_tensors, src=0, group=group, async_op=True
- )
+ work = dist.scatter(out_tensor, in_tensors, src=0, group=group, async_op=True)
return work, out_tensor
self._test_work_wait(tensor, comm_fn=comm_fn)
@@ -2107,35 +2045,22 @@ class CompilerTest(MultiProcessTestCase):
class ReduceOpTest(TestCase):
+
# Ref: https://github.com/pytorch/pytorch/issues/87191
def test_op_isinstance_of_reduceop(self):
for reduce_op in (
- c10d.ReduceOp.SUM,
- c10d.ReduceOp.AVG,
- c10d.ReduceOp.PRODUCT,
- c10d.ReduceOp.MIN,
- c10d.ReduceOp.MAX,
- c10d.ReduceOp.BAND,
- c10d.ReduceOp.BOR,
- c10d.ReduceOp.BXOR,
+ c10d.ReduceOp.SUM, c10d.ReduceOp.AVG, c10d.ReduceOp.PRODUCT, c10d.ReduceOp.MIN, c10d.ReduceOp.MAX,
+ c10d.ReduceOp.BAND, c10d.ReduceOp.BOR, c10d.ReduceOp.BXOR,
):
self.assertTrue(isinstance(reduce_op, c10d.ReduceOp))
for scale in (torch.tensor(1.0), 2.0):
- self.assertTrue(
- isinstance(dist._make_nccl_premul_sum(scale), c10d.ReduceOp)
- )
+ self.assertTrue(isinstance(dist._make_nccl_premul_sum(scale), c10d.ReduceOp))
# Ref: https://github.com/pytorch/pytorch/pull/87303#discussion_r1002879700
def test_reduceop_copyable(self):
for reduce_op in (
- c10d.ReduceOp.SUM,
- c10d.ReduceOp.AVG,
- c10d.ReduceOp.PRODUCT,
- c10d.ReduceOp.MIN,
- c10d.ReduceOp.MAX,
- c10d.ReduceOp.BAND,
- c10d.ReduceOp.BOR,
- c10d.ReduceOp.BXOR,
+ c10d.ReduceOp.SUM, c10d.ReduceOp.AVG, c10d.ReduceOp.PRODUCT, c10d.ReduceOp.MIN, c10d.ReduceOp.MAX,
+ c10d.ReduceOp.BAND, c10d.ReduceOp.BOR, c10d.ReduceOp.BXOR,
):
self.assertEqual(copy.copy(reduce_op), reduce_op)
self.assertEqual(copy.deepcopy(reduce_op), reduce_op)
@@ -2149,14 +2074,8 @@ class ReduceOpTest(TestCase):
def test_reduceop_pickle(self):
for reduce_op in (
- c10d.ReduceOp.SUM,
- c10d.ReduceOp.AVG,
- c10d.ReduceOp.PRODUCT,
- c10d.ReduceOp.MIN,
- c10d.ReduceOp.MAX,
- c10d.ReduceOp.BAND,
- c10d.ReduceOp.BOR,
- c10d.ReduceOp.BXOR,
+ c10d.ReduceOp.SUM, c10d.ReduceOp.AVG, c10d.ReduceOp.PRODUCT, c10d.ReduceOp.MIN, c10d.ReduceOp.MAX,
+ c10d.ReduceOp.BAND, c10d.ReduceOp.BOR, c10d.ReduceOp.BXOR,
):
pickle.loads(pickle.dumps(reduce_op))
orig = c10d.ReduceOp(reduce_op)
@@ -2169,14 +2088,8 @@ class ReduceOpTest(TestCase):
def test_reduceop_equal(self):
not_reduceop = "abc"
for reduce_op in (
- c10d.ReduceOp.SUM,
- c10d.ReduceOp.AVG,
- c10d.ReduceOp.PRODUCT,
- c10d.ReduceOp.MIN,
- c10d.ReduceOp.MAX,
- c10d.ReduceOp.BAND,
- c10d.ReduceOp.BOR,
- c10d.ReduceOp.BXOR,
+ c10d.ReduceOp.SUM, c10d.ReduceOp.AVG, c10d.ReduceOp.PRODUCT, c10d.ReduceOp.MIN, c10d.ReduceOp.MAX,
+ c10d.ReduceOp.BAND, c10d.ReduceOp.BOR, c10d.ReduceOp.BXOR,
):
reduce_op_obj = c10d.ReduceOp(reduce_op)
# this calls `ReduceOp.__eq__(self, other)`
diff --git a/test/distributed/test_c10d_gloo.py b/test/distributed/test_c10d_gloo.py
index 34f8849ddb..b82e7f641a 100644
--- a/test/distributed/test_c10d_gloo.py
+++ b/test/distributed/test_c10d_gloo.py
@@ -339,12 +339,8 @@ class ProcessGroupGlooTest(MultiProcessTestCase):
]
output = broadcast(xs, i, j)
- self.assertEqual(
- torch.tensor([i * num + j], dtype=torch.float32), output[0]
- )
- self.assertEqual(
- torch.tensor([i * num + j], dtype=torch.float32), output[1]
- )
+ self.assertEqual(torch.tensor([i * num + j], dtype=torch.float32), output[0])
+ self.assertEqual(torch.tensor([i * num + j], dtype=torch.float32), output[1])
# Test overloaded convenience function
x = torch.tensor([self.rank + 1.0])
@@ -423,7 +419,7 @@ class ProcessGroupGlooTest(MultiProcessTestCase):
# Single input tests
tests = simple_reduce_tests(self.rank, self.world_size)
- for op, input, expected in tests:
+ for (op, input, expected) in tests:
opts = c10d.AllreduceOptions()
opts.reduceOp = op
tensor = fn(input)
@@ -434,7 +430,7 @@ class ProcessGroupGlooTest(MultiProcessTestCase):
# Multi input tests
tests = simple_multi_input_reduce_tests(self.rank, self.world_size)
- for op, inputs, output in tests:
+ for (op, inputs, output) in tests:
opts = c10d.AllreduceOptions()
opts.reduceOp = op
tensors = [fn(input) for input in inputs]
@@ -510,9 +506,7 @@ class ProcessGroupGlooTest(MultiProcessTestCase):
opts = c10d.AllreduceCoalescedOptions()
pg.allreduce_coalesced([], opts)
- with self.assertRaisesRegex(
- RuntimeError, "tensors must all have the same type"
- ):
+ with self.assertRaisesRegex(RuntimeError, "tensors must all have the same type"):
opts = c10d.AllreduceCoalescedOptions()
pg.allreduce_coalesced([t1, t2], opts)
@@ -627,9 +621,7 @@ class ProcessGroupGlooTest(MultiProcessTestCase):
# Sparse allreduce only works with c10d.ReduceOp.SUM.
for op in [c10d.ReduceOp.PRODUCT, c10d.ReduceOp.MIN, c10d.ReduceOp.MAX]:
- with self.assertRaisesRegex(
- RuntimeError, "unsupported reduction operation"
- ):
+ with self.assertRaisesRegex(RuntimeError, "unsupported reduction operation"):
opts = c10d.AllreduceOptions()
opts.reduceOp = op
pg.allreduce([t3], opts)
@@ -644,7 +636,7 @@ class ProcessGroupGlooTest(MultiProcessTestCase):
tests = simple_sparse_reduce_tests(
self.rank, self.world_size, num_inputs=num_inputs_per_rank
)
- for inputs, outputs in tests:
+ for (inputs, outputs) in tests:
tensors = [fn(input) for input in inputs]
fut = pg.allreduce(tensors).get_future()
fut.wait()
@@ -665,11 +657,11 @@ class ProcessGroupGlooTest(MultiProcessTestCase):
@requires_gloo()
def test_sparse_allreduce_cuda_dispatched(self):
store = c10d.FileStore(self.file_name, self.world_size)
- dist.init_process_group(
- backend="gloo", store=store, rank=self.rank, world_size=self.world_size
+ dist.init_process_group(backend="gloo", store=store, rank=self.rank, world_size=self.world_size)
+ tests = simple_sparse_reduce_tests(
+ self.rank, self.world_size, num_inputs=1
)
- tests = simple_sparse_reduce_tests(self.rank, self.world_size, num_inputs=1)
- for inputs, outputs in tests:
+ for (inputs, outputs) in tests:
tensors = inputs[-1].clone().cuda()
work = dist.all_reduce(tensors, async_op=True)
work.wait()
@@ -715,10 +707,8 @@ class ProcessGroupGlooTest(MultiProcessTestCase):
work = dist.reduce_scatter_tensor(output, input, async_op=True)
work.wait()
- expect = (
- input.view(self.world_size, *out_shape).chunk(self.world_size)[self.rank]
- * self.world_size
- )
+ expect = input.view(self.world_size, *out_shape) \
+ .chunk(self.world_size)[self.rank] * self.world_size
self.assertTrue(torch.allclose(output, expect))
@requires_gloo()
@@ -740,12 +730,8 @@ class ProcessGroupGlooTest(MultiProcessTestCase):
work.wait()
for output, input in zip(outputs, inputs):
- expect = (
- input.view(self.world_size, *output.shape).chunk(self.world_size)[
- self.rank
- ]
- * self.world_size
- )
+ expect = input.view(self.world_size, *output.shape) \
+ .chunk(self.world_size)[self.rank] * self.world_size
self.assertTrue(torch.allclose(output, expect))
@requires_gloo()
@@ -783,16 +769,12 @@ class ProcessGroupGlooTest(MultiProcessTestCase):
opts.rootRank = 0
pg.scatter([t1, t1], [], opts)
- with self.assertRaisesRegex(
- RuntimeError, "requires a single-element input list"
- ):
+ with self.assertRaisesRegex(RuntimeError, "requires a single-element input list"):
opts = c10d.ScatterOptions()
opts.rootRank = self.rank
pg.scatter([t1], [], opts)
- with self.assertRaisesRegex(
- RuntimeError, "requires a single-element input list"
- ):
+ with self.assertRaisesRegex(RuntimeError, "requires a single-element input list"):
opts = c10d.ScatterOptions()
opts.rootRank = self.rank
pg.scatter([t1], [[t1] * self.world_size, [t1] * self.world_size], opts)
@@ -1112,9 +1094,7 @@ class ProcessGroupGlooTest(MultiProcessTestCase):
t2 = torch.zeros([1], dtype=torch.float64)
t3 = torch.zeros([2], dtype=torch.float32)
- with self.assertRaisesRegex(
- RuntimeError, "requires non-empty input tensor list"
- ):
+ with self.assertRaisesRegex(RuntimeError, "requires non-empty input tensor list"):
pg.allgather([], [])
with self.assertRaisesRegex(
@@ -1278,19 +1258,11 @@ class ProcessGroupGlooTest(MultiProcessTestCase):
)
xxs = [2 * [torch.tensor([i + self.rank])] for i in range(2)]
- yys = [
- [[torch.zeros_like(x) for x in xx] for _ in range(self.world_size)]
- for xx in xxs
- ]
- futs = [
- c10d.all_gather_coalesced(yy, xx, async_op=True) for xx, yy in zip(xxs, yys)
- ]
+ yys = [[[torch.zeros_like(x) for x in xx] for _ in range(self.world_size)] for xx in xxs]
+ futs = [c10d.all_gather_coalesced(yy, xx, async_op=True) for xx, yy in zip(xxs, yys)]
# expected outputs
- zzs = [
- [2 * [torch.tensor([i + r])] for r in range(self.world_size)]
- for i in range(2)
- ]
+ zzs = [[2 * [torch.tensor([i + r])] for r in range(self.world_size)] for i in range(2)]
torch.futures.wait_all(futs)
for yy, zz in zip(yys, zzs):
@@ -1352,7 +1324,7 @@ class ProcessGroupGlooTest(MultiProcessTestCase):
pg = self._create_process_group_gloo(
store, self.rank, self.world_size, self.opts()
)
- for op, input, output in simple_reduce_tests(self.rank, self.world_size):
+ for (op, input, output) in simple_reduce_tests(self.rank, self.world_size):
for root in range(self.world_size):
opts = c10d.ReduceOptions()
opts.reduceOp = op
@@ -1484,11 +1456,12 @@ class ProcessGroupGlooTest(MultiProcessTestCase):
def test_round_robin(self):
num_process_groups = 2
store = c10d.FileStore(self.file_name, self.world_size)
- c10d.init_process_group(
- backend="gloo", store=store, rank=self.rank, world_size=self.world_size
- )
+ c10d.init_process_group(backend="gloo", store=store, rank=self.rank, world_size=self.world_size)
pg = c10d._round_robin_process_groups(
- [c10d.new_group(pg_options=self.opts()) for i in range(num_process_groups)]
+ [
+ c10d.new_group(pg_options=self.opts())
+ for i in range(num_process_groups)
+ ]
)
# Run a few collectives so that we have called each process group
@@ -1501,13 +1474,14 @@ class ProcessGroupGlooTest(MultiProcessTestCase):
@requires_gloo()
def test_round_robin_create_destroy(self):
store = c10d.FileStore(self.file_name, self.world_size)
- c10d.init_process_group(
- backend="gloo", store=store, rank=self.rank, world_size=self.world_size
- )
+ c10d.init_process_group(backend="gloo", store=store, rank=self.rank, world_size=self.world_size)
def create(num, prefix):
return c10d._round_robin_process_groups(
- [c10d.new_group(pg_options=self.opts()) for i in range(num)]
+ [
+ c10d.new_group(pg_options=self.opts())
+ for i in range(num)
+ ]
)
# Run create/use/destroy twice
@@ -1530,18 +1504,14 @@ class DistributedDataParallelTest(
def _get_process_group(self):
store = self._get_store()
- c10d.init_process_group(
- backend="gloo", store=store, rank=self.rank, world_size=self.world_size
- )
+ c10d.init_process_group(backend="gloo", store=store, rank=self.rank, world_size=self.world_size)
return c10d.distributed_c10d._get_default_group()
def _test_gloo_backend(
self, devices, device_ids, multi_device=False, gradient_as_bucket_view=False
):
store = c10d.FileStore(self.file_name, self.world_size)
- c10d.init_process_group(
- backend="gloo", store=store, rank=self.rank, world_size=self.world_size
- )
+ c10d.init_process_group(backend="gloo", store=store, rank=self.rank, world_size=self.world_size)
process_group = c10d.distributed_c10d._get_default_group()
device = devices[-1]
backend = process_group._get_backend(device)
@@ -1822,7 +1792,6 @@ class DistributedDataParallelTest(
def forward(self, x):
x = self.relu(self.fc1(x))
return F.softmax(x, dim=1)
-
pg = dist.init_process_group(
"gloo",
init_method=f"file://{self.file_name}",
@@ -1833,13 +1802,14 @@ class DistributedDataParallelTest(
local_shard_metadata = ShardMetadata(
shard_offsets=[(self.rank % 2) * 5, 0],
shard_sizes=[5, 10],
- placement=f"rank:{self.rank}/cuda:{self.rank}",
+ placement=f"rank:{self.rank}/cuda:{self.rank}"
)
local_shards = [Shard(torch.randn(5, 10, device=device), local_shard_metadata)]
st = init_from_local_shards(local_shards, [10, 10])
m = MyModule(st)
DistributedDataParallel._set_params_and_buffers_to_ignore_for_model(
- module=m, params_and_buffers_to_ignore={"st"}
+ module=m,
+ params_and_buffers_to_ignore={'st'}
)
# test to make DDP constructor will not fail when module includes a ShardedTensor when ignored
DistributedDataParallel(
@@ -1869,9 +1839,7 @@ class DistributedDataParallelTest(
# Check that the gradients are sparse and identical
vanilla_parameter = next(vanilla_model.parameters())
ddp_parameter = next(ddp_model.parameters())
- self.assertEqual(
- vanilla_parameter.grad.coalesce(), ddp_parameter.grad.coalesce()
- )
+ self.assertEqual(vanilla_parameter.grad.coalesce(), ddp_parameter.grad.coalesce())
@requires_gloo()
@skip_if_lt_x_gpu(2)
@@ -2096,9 +2064,7 @@ class DistributedDataParallelTest(
ModuleForDdpCommHook(), process_group=process_group
)
- expected_err = (
- "Communication hook: return annotation should be torch.futures.Future"
- )
+ expected_err = "Communication hook: return annotation should be torch.futures.Future"
with self.assertRaisesRegex(
ValueError,
expected_err,
@@ -2206,9 +2172,7 @@ class ReducerTest(TestCase):
self.file = tempfile.NamedTemporaryFile(delete=False)
world_size = 1
self.store = c10d.FileStore(self.file.name, world_size)
- c10d.init_process_group(
- backend="gloo", store=self.store, rank=0, world_size=world_size
- )
+ c10d.init_process_group(backend="gloo", store=self.store, rank=0, world_size=world_size)
self.process_group = c10d.distributed_c10d._get_default_group()
def tearDown(self):
@@ -2224,9 +2188,7 @@ class ReducerTest(TestCase):
model = ReducerModule()
parameters = list(model.parameters())
buckets = [list(range(len(parameters)))]
- dist.Reducer(
- parameters, buckets, [dist._DEFAULT_FIRST_BUCKET_BYTES], self.process_group
- )
+ dist.Reducer(parameters, buckets, [dist._DEFAULT_FIRST_BUCKET_BYTES], self.process_group)
def _create_mixed_precision_model(self):
model = ReducerModule()
@@ -2247,7 +2209,7 @@ class ReducerTest(TestCase):
parameters,
buckets,
[dist._DEFAULT_FIRST_BUCKET_BYTES],
- self.process_group,
+ self.process_group
)
@requires_gloo()
@@ -2262,7 +2224,7 @@ class ReducerTest(TestCase):
parameters,
buckets,
[dist._DEFAULT_FIRST_BUCKET_BYTES for _ in buckets],
- self.process_group,
+ self.process_group
)
def _create_reducer_for_models(self, models, find_unused_parameters=False):
@@ -2345,6 +2307,7 @@ class CommTest(test_c10d_common.AbstractCommTest, MultiProcessTestCase):
def device(self):
return "cpu"
+
def setUp(self):
super().setUp()
self._spawn_processes()
@@ -2391,9 +2354,7 @@ class CommTest(test_c10d_common.AbstractCommTest, MultiProcessTestCase):
@skip_if_lt_x_gpu(2)
def test_broadcast_coalesced_gloo_cuda(self):
store = c10d.FileStore(self.file_name, self.world_size)
- c10d.init_process_group(
- backend="gloo", store=store, rank=self.rank, world_size=self.world_size
- )
+ c10d.init_process_group(backend="gloo", store=store, rank=self.rank, world_size=self.world_size)
process_group = c10d.distributed_c10d._get_default_group()
device = torch.device("cuda:%d" % self.rank)
backend = process_group._get_backend(device)
@@ -2405,9 +2366,7 @@ class CommTest(test_c10d_common.AbstractCommTest, MultiProcessTestCase):
@requires_gloo()
def test_broadcast_coalesced_gloo_cpu(self):
store = c10d.FileStore(self.file_name, self.world_size)
- c10d.init_process_group(
- backend="gloo", store=store, rank=self.rank, world_size=self.world_size
- )
+ c10d.init_process_group(backend="gloo", store=store, rank=self.rank, world_size=self.world_size)
process_group = c10d.distributed_c10d._get_default_group()
device = torch.device("cpu")
backend = process_group._get_backend(device)
@@ -2462,10 +2421,7 @@ class CommTest(test_c10d_common.AbstractCommTest, MultiProcessTestCase):
def test_bool_tensors(self):
self._test_bool_tensors(backend="gloo")
-
-class GlooProcessGroupWithDispatchedCollectivesTests(
- test_c10d_common.ProcessGroupWithDispatchedCollectivesTests
-):
+class GlooProcessGroupWithDispatchedCollectivesTests(test_c10d_common.ProcessGroupWithDispatchedCollectivesTests):
@requires_gloo()
def test_collectives(self):
self._test_collectives(backend="gloo")
@@ -2503,8 +2459,8 @@ class GlooProcessGroupWithDispatchedCollectivesTests(
)
dist.monitored_barrier()
-
class CompilerTest(test_c10d_common.CompilerTest):
+
@property
def world_size(self):
return 2
@@ -2524,28 +2480,36 @@ class CompilerTest(test_c10d_common.CompilerTest):
@skip_if_lt_x_gpu(2)
def test_allreduce_work_wait_gpu(self):
- self._test_allreduce_work_wait(torch.ones(2, 2, device=self.rank) * self.rank)
+ self._test_allreduce_work_wait(
+ torch.ones(2, 2, device=self.rank) * self.rank
+ )
def test_allgather_work_wait_cpu(self):
self._test_allgather_work_wait(torch.ones(2, 2) * self.rank)
@skip_if_lt_x_gpu(2)
def test_allgather_work_wait_gpu(self):
- self._test_allgather_work_wait(torch.ones(2, 2, device=self.rank) * self.rank)
+ self._test_allgather_work_wait(
+ torch.ones(2, 2, device=self.rank) * self.rank
+ )
def test_broadcast_work_wait_cpu(self):
self._test_broadcast_work_wait(torch.ones(2, 2) * self.rank)
@skip_if_lt_x_gpu(2)
def test_broadcast_work_wait_gpu(self):
- self._test_broadcast_work_wait(torch.ones(2, 2, device=self.rank) * self.rank)
+ self._test_broadcast_work_wait(
+ torch.ones(2, 2, device=self.rank) * self.rank
+ )
def test_scatter_work_wait_cpu(self):
self._test_scatter_work_wait(torch.ones(2, 2) * self.rank)
@skip_if_lt_x_gpu(2)
def test_scatter_work_wait_gpu(self):
- self._test_scatter_work_wait(torch.ones(2, 2, device=self.rank) * self.rank)
+ self._test_scatter_work_wait(
+ torch.ones(2, 2, device=self.rank) * self.rank
+ )
def test_nested_comm_tensor_wrapping(self):
self._test_nested_comm_tensor_wrapping(torch.ones(2, 2) * self.rank)
@@ -2559,7 +2523,6 @@ class CompilerTest(test_c10d_common.CompilerTest):
torch.ones(2, 2, device=self.rank) * self.rank
)
-
class LargeCommTest(test_c10d_common.AbstractLargeCommTest, MultiProcessTestCase):
def setUp(self):
super().setUp()
@@ -2588,7 +2551,6 @@ class LargeCommTest(test_c10d_common.AbstractLargeCommTest, MultiProcessTestCase
def test_new_group_local_sync_duplicate_pg(self):
self._test_new_group_local_sync_duplicate_pg(backend="gloo")
-
if __name__ == "__main__":
assert (
not torch.cuda._initialized
diff --git a/test/distributed/test_c10d_nccl.py b/test/distributed/test_c10d_nccl.py
index bbe4461e0c..bbaafd1cac 100644
--- a/test/distributed/test_c10d_nccl.py
+++ b/test/distributed/test_c10d_nccl.py
@@ -1,23 +1,23 @@
# Owner(s): ["oncall: distributed"]
import copy
-import json
import math
import os
-import pickle
import random
import re
import signal
import sys
import tempfile
import threading
+import pickle
import time
+import json
import warnings
from contextlib import contextmanager
from datetime import datetime, timedelta
-from enum import auto, Enum
from itertools import chain, product
-from unittest import mock, SkipTest
+from unittest import SkipTest, mock
+from enum import auto, Enum
import torch
import torch.distributed as c10d
@@ -26,43 +26,42 @@ if not c10d.is_available() or not c10d.is_nccl_available():
print("c10d NCCL not available, skipping tests", file=sys.stderr)
sys.exit(0)
-from typing import Dict, List
-
import test_c10d_common
import torch.distributed as dist
import torch.distributed.algorithms.ddp_comm_hooks.default_hooks as default
import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
-from test_c10d_common import ConvNet, DoubleGpuNet, gpus_for_rank, ModuleForDdpCommHook
+from typing import Dict, List
+from test_c10d_common import gpus_for_rank, DoubleGpuNet, ConvNet, ModuleForDdpCommHook
from torch import nn
from torch._C._distributed_c10d import OpType
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_distributed import (
- get_timeout,
- init_multigpu_helper,
MultiProcessTestCase,
- requires_gloo,
+ init_multigpu_helper,
requires_nccl,
+ requires_gloo,
requires_nccl_version,
skip_if_lt_x_gpu,
+ get_timeout,
skip_if_rocm,
with_dist_debug_levels,
with_nccl_blocking_wait,
)
from torch.testing._internal.common_utils import (
- instantiate_parametrized_tests,
- parametrize,
- retry_on_connect_failures,
+ TestCase,
run_tests,
- skip_but_pass_in_sandcastle,
- skip_but_pass_in_sandcastle_if,
+ retry_on_connect_failures,
skipIfRocm,
- TEST_CUDA,
TEST_WITH_DEV_DBG_ASAN,
TEST_WITH_ROCM,
- TestCase,
+ parametrize,
+ instantiate_parametrized_tests,
+ skip_but_pass_in_sandcastle,
+ skip_but_pass_in_sandcastle_if,
+ TEST_CUDA
)
if TEST_WITH_DEV_DBG_ASAN:
@@ -72,12 +71,15 @@ if TEST_WITH_DEV_DBG_ASAN:
sys.exit(0)
# bfloat16 is only supported by CUDA 11+
-BFLOAT16_AVAILABLE = torch.cuda.is_available() and (
- (torch.version.cuda is not None and int(torch.version.cuda.split(".")[0]) >= 11)
- or torch.version.hip is not None
+BFLOAT16_AVAILABLE = (
+ torch.cuda.is_available()
+ and
+ (
+ (torch.version.cuda is not None and int(torch.version.cuda.split('.')[0]) >= 11)
+ or torch.version.hip is not None
+ )
)
-
class RendezvousEnvTest(TestCase):
@retry_on_connect_failures
@requires_nccl()
@@ -216,8 +218,7 @@ class ProcessGroupNCCLTest(MultiProcessTestCase):
rank=self.rank,
store=store,
pg_options=opts,
- device_id=device_id,
- )
+ device_id=device_id)
pg = c10d.distributed_c10d._get_default_group()
return pg
@@ -267,23 +268,13 @@ class ProcessGroupNCCLTest(MultiProcessTestCase):
pg.reduce(xs).wait()
self.assertEqual(0, xs[0].numel())
- ys = [
- [
- torch.FloatTensor([]).cuda(local_device_idx)
- for _ in range(self.world_size)
- ]
- ]
+ ys = [[torch.FloatTensor([]).cuda(local_device_idx) for _ in range(self.world_size)]]
pg.allgather(ys, xs).wait()
for y in ys[0]:
self.assertEqual(0, y.numel())
ys = [torch.FloatTensor([]).cuda(local_device_idx)]
- xs = [
- [
- torch.FloatTensor([]).cuda(local_device_idx)
- for _ in range(self.world_size)
- ]
- ]
+ xs = [[torch.FloatTensor([]).cuda(local_device_idx) for _ in range(self.world_size)]]
pg.reduce_scatter(ys, xs).wait()
self.assertEqual(0, ys[0].numel())
@@ -309,10 +300,7 @@ class ProcessGroupNCCLTest(MultiProcessTestCase):
self.assertEqual(torch.tensor([i]), output[0])
expected_tensor = torch.empty([i + 1, i + 1]).fill_(i + 1)
- xs = [
- torch.empty([i + 1, i + 1]).fill_(-1).cuda(device=device_idx)
- for device_idx in self.rank_to_GPU[self.rank]
- ]
+ xs = [torch.empty([i + 1, i + 1]).fill_(-1).cuda(device=device_idx) for device_idx in self.rank_to_GPU[self.rank]]
# test with multiple input tensors (multiple gpu in one rank)
for j in range(len(xs)):
@@ -332,9 +320,7 @@ class ProcessGroupNCCLTest(MultiProcessTestCase):
indices = torch.tensor([[0, 1]])
values = torch.tensor([[1, 2, 0], [4, 0, 6]])
- sparse_tensor = torch.sparse_coo_tensor(indices, values, size=(2, 3)).to(
- self.rank
- )
+ sparse_tensor = torch.sparse_coo_tensor(indices, values, size=(2, 3)).to(self.rank)
# sparse allreduce call is wrapped in a try catch since the c10d API is only available in the nccl experimental branch
try:
@@ -379,37 +365,26 @@ class ProcessGroupNCCLTest(MultiProcessTestCase):
# Avg (only available for NCCL 2.10+)
if torch.cuda.nccl.version() >= (2, 10, 0):
- tensors = [torch.tensor([self.rank + 1.0]).cuda(local_device_id)]
+ tensors = [torch.tensor([self.rank + 1.]).cuda(local_device_id)]
allreduce(tensors, c10d.ReduceOp.AVG)
ndev = self.world_size
self.assertEqual(
- torch.tensor([ndev * (ndev + 1.0) / (2.0 * ndev)]),
+ torch.tensor([ndev * (ndev + 1.) / (2. * ndev)]),
tensors[0],
)
# Premul Sum
if torch.cuda.nccl.version() >= (2, 11, 1):
for dtype in torch.half, torch.float, torch.double:
- for factor in (
- 3.0,
- torch.tensor([5.0], device=local_device_id, dtype=dtype),
- ):
- tensors = [
- torch.tensor([self.rank + 1])
- .cuda(local_device_id)
- .to(dtype=dtype)
- ]
+ for factor in (3.0, torch.tensor([5.0], device=local_device_id, dtype=dtype)):
+ tensors = [torch.tensor([self.rank + 1]).cuda(local_device_id).to(dtype=dtype)]
allreduce(tensors, c10d._make_nccl_premul_sum(factor))
self.assertEqual(
- factor
- * torch.tensor(
- [self.world_size * (self.world_size + 1) / 2],
- dtype=dtype,
- device=local_device_id,
- ),
+ factor * torch.tensor([self.world_size * (self.world_size + 1) / 2],
+ dtype=dtype, device=local_device_id),
tensors[0],
)
@@ -417,7 +392,9 @@ class ProcessGroupNCCLTest(MultiProcessTestCase):
tensors = [torch.tensor([self.rank + 1]).cuda(local_device_id)]
allreduce(tensors, c10d.ReduceOp.PRODUCT)
- self.assertEqual(torch.tensor([math.factorial(self.world_size)]), tensors[0])
+ self.assertEqual(
+ torch.tensor([math.factorial(self.world_size)]), tensors[0]
+ )
# Min
tensors = [torch.tensor([self.rank + 1]).cuda(local_device_id)]
@@ -431,13 +408,14 @@ class ProcessGroupNCCLTest(MultiProcessTestCase):
allreduce(tensors, c10d.ReduceOp.MAX)
self.assertEqual(torch.tensor([self.world_size]), tensors[0])
- for op, err in zip(
- (c10d.ReduceOp.BAND, c10d.ReduceOp.BOR, c10d.ReduceOp.BXOR),
- ("ReduceOp.BAND", "ReduceOp.BOR", "ReduceOp.BXOR"),
- ):
- with self.assertRaisesRegex(ValueError, "Cannot use " + err + " with NCCL"):
+ for op, err in zip((c10d.ReduceOp.BAND, c10d.ReduceOp.BOR, c10d.ReduceOp.BXOR),
+ ("ReduceOp.BAND", "ReduceOp.BOR", "ReduceOp.BXOR")):
+ with self.assertRaisesRegex(
+ ValueError, "Cannot use " + err + " with NCCL"
+ ):
allreduce(tensors, op)
+
@requires_nccl()
@skip_but_pass_in_sandcastle_if(not TEST_MULTIGPU, "NCCL test requires 2+ GPUs")
def test_alltoall_ops_with_cudafree_race(self):
@@ -550,7 +528,7 @@ class ProcessGroupNCCLTest(MultiProcessTestCase):
("ReduceOp.BAND", "ReduceOp.BOR", "ReduceOp.BXOR"),
):
with self.assertRaisesRegex(
- ValueError, "Cannot use " + err + " with NCCL"
+ ValueError, "Cannot use " + err + " with NCCL"
):
reduce(tensors, self.rank, rt, op)
@@ -563,14 +541,11 @@ class ProcessGroupNCCLTest(MultiProcessTestCase):
factor_ref = factor
float_tensors = [
torch.tensor(
- [self.rank + 1.0], device=f"cuda:{local_device_id}"
- )
+ [self.rank + 1.0], device=f"cuda:{local_device_id}")
]
float_tensors_ref = [
torch.tensor(
- [(self.rank + 1.0) * factor_ref],
- device=f"cuda:{local_device_id}",
- )
+ [(self.rank + 1.0) * factor_ref], device=f"cuda:{local_device_id}")
]
reduce(float_tensors_ref, rt, 0)
@@ -593,12 +568,8 @@ class ProcessGroupNCCLTest(MultiProcessTestCase):
output_tensors = []
expected_output = []
- output_per_gpu = (
- [torch.empty(2, 2).fill_(-1)] * len(local_device_ids) * self.world_size
- )
- expected_per_gpu = (
- [torch.empty(2, 2).fill_(2)] * len(local_device_ids) * self.world_size
- )
+ output_per_gpu = ([torch.empty(2, 2).fill_(-1)] * len(local_device_ids) * self.world_size)
+ expected_per_gpu = ([torch.empty(2, 2).fill_(2)] * len(local_device_ids) * self.world_size)
for gpu in local_device_ids:
output_tensors.append([t.cuda(device=gpu) for t in output_per_gpu])
@@ -623,9 +594,7 @@ class ProcessGroupNCCLTest(MultiProcessTestCase):
# allgather_base is GPU number agnostic.
# Each rank contribute one tensor regardless of GPU counts
tensor = torch.tensor([self.rank]).cuda(local_device_id)
- output_t = torch.empty((self.world_size), dtype=tensor.dtype).cuda(
- local_device_id
- )
+ output_t = torch.empty((self.world_size), dtype=tensor.dtype).cuda(local_device_id)
allgather_base(output_t, tensor)
@@ -775,8 +744,7 @@ class ProcessGroupNCCLTest(MultiProcessTestCase):
with self.assertRaisesRegex(
# throws error message from dispatcher
- RuntimeError,
- "There were no tensor arguments to this function",
+ RuntimeError, "There were no tensor arguments to this function"
):
opts = c10d.GatherOptions()
opts.rootRank = 0
@@ -853,6 +821,7 @@ class ProcessGroupNCCLTest(MultiProcessTestCase):
for rank in range(self.world_size):
ls.append(torch.tensor([rank]).cuda(gpu_idx))
+
# test each rank to scatter
expected = [torch.tensor([self.rank])]
for i in range(stress_length):
@@ -897,8 +866,7 @@ class ProcessGroupNCCLTest(MultiProcessTestCase):
with self.assertRaisesRegex(
# throws error message from dispatcher
- RuntimeError,
- "There were no tensor arguments to this function",
+ RuntimeError, "There were no tensor arguments to this function"
):
opts = c10d.ScatterOptions()
opts.rootRank = 0
@@ -977,8 +945,8 @@ class ProcessGroupNCCLTest(MultiProcessTestCase):
[
(1 + self.world_size) * self.world_size // 2
+ self.world_size * self.rank
- ]
- )
+ ])
+
self.assertEqual(expected, output[i])
@@ -993,7 +961,9 @@ class ProcessGroupNCCLTest(MultiProcessTestCase):
reduce_scatter(output, tensor_lists, c10d.ReduceOp.MAX)
for i in range(num_gpus):
- expected = torch.tensor([self.rank + self.world_size + i])
+ expected = torch.tensor(
+ [self.rank + self.world_size + i]
+ )
self.assertEqual(expected, output[i])
# Product
@@ -1051,9 +1021,7 @@ class ProcessGroupNCCLTest(MultiProcessTestCase):
output = [t.float() for t in output]
tensor_lists = [[t.float() for t in tl] for tl in tensor_lists]
output_ref = [t.float() for t in output]
- tensor_lists_ref = [
- [t.float() * factor_ref for t in tl] for tl in tensor_lists
- ]
+ tensor_lists_ref = [[t.float() * factor_ref for t in tl] for tl in tensor_lists]
reduce_scatter(output, tensor_lists, c10d._make_nccl_premul_sum(factor))
reduce_scatter(output_ref, tensor_lists_ref, c10d.ReduceOp.SUM)
self.assertEqual(output_ref, output)
@@ -1072,9 +1040,7 @@ class ProcessGroupNCCLTest(MultiProcessTestCase):
# reduce_scatter_base is GPU number agnostic.
# Each rank contribute one tensor regardless of GPU counts
output_t = torch.empty([1]).cuda(local_device_id)
- tensor = torch.arange(self.world_size, dtype=output_t.dtype).cuda(
- local_device_id
- )
+ tensor = torch.arange(self.world_size, dtype=output_t.dtype).cuda(local_device_id)
reduce_scatter_base(output_t, tensor)
@@ -1099,9 +1065,7 @@ class ProcessGroupNCCLTest(MultiProcessTestCase):
for i in range(1, len(local_device_ids) + 1):
for j in range(i):
- tensors_list[i - 1].append(
- torch.tensor([j + 1]).cuda(local_device_ids[j])
- )
+ tensors_list[i - 1].append(torch.tensor([j + 1]).cuda(local_device_ids[j]))
works = []
for tensors in tensors_list:
@@ -1181,9 +1145,7 @@ class ProcessGroupNCCLTest(MultiProcessTestCase):
dist.all_reduce(t)
def abortpg():
- c10d.distributed_c10d._get_default_group()._get_backend(
- torch.device(device)
- )._shutdown()
+ c10d.distributed_c10d._get_default_group()._get_backend(torch.device(device))._shutdown()
# Initialize DDP to ensure "destroy_process_group" will not call
# ProcessGroupNCCL destructor since DDP holds a reference to process group.
@@ -1256,6 +1218,7 @@ class ProcessGroupNCCLTest(MultiProcessTestCase):
pg._get_backend(torch.device(device))._shutdown()
del pg
+
@requires_nccl()
@skip_but_pass_in_sandcastle_if(not TEST_MULTIGPU, "NCCL test requires 2+ GPUs")
def test_destruct_before_terminate_pg(self):
@@ -1273,6 +1236,7 @@ class ProcessGroupNCCLTest(MultiProcessTestCase):
# force destruction before terminating comms, destructor would terminate comms
del pg
+
@requires_nccl()
@skip_but_pass_in_sandcastle_if(not TEST_MULTIGPU, "NCCL test requires 2+ GPUs")
def test_abort_in_destroy_pg(self):
@@ -1296,9 +1260,7 @@ class ProcessGroupNCCLTest(MultiProcessTestCase):
pg.allreduce([t])
@requires_nccl()
- @skip_but_pass_in_sandcastle_if(
- torch.cuda.device_count() < 2, "NCCL test requires 2+ GPUs"
- )
+ @skip_but_pass_in_sandcastle_if(torch.cuda.device_count() < 2, "NCCL test requires 2+ GPUs")
def test_close_multi_pg_unordered(self):
store = c10d.FileStore(self.file_name, self.world_size)
pg = self._create_process_group_nccl(store, self.opts())
@@ -1328,9 +1290,7 @@ class ProcessGroupNCCLTest(MultiProcessTestCase):
dist.destroy_process_group()
@requires_nccl()
- @skip_but_pass_in_sandcastle_if(
- torch.cuda.device_count() < 2, "NCCL test requires 2+ GPUs"
- )
+ @skip_but_pass_in_sandcastle_if(torch.cuda.device_count() < 2, "NCCL test requires 2+ GPUs")
def test_abort_in_destroy_multi_pgs(self):
os.environ["TORCH_NCCL_ABORT_IN_DESTROY_PG"] = "1"
store = c10d.FileStore(self.file_name, self.world_size)
@@ -1351,10 +1311,9 @@ class ProcessGroupNCCLTest(MultiProcessTestCase):
# shutdown all NCCL PGs in one shot
dist.destroy_process_group()
+
@requires_nccl()
- @skip_but_pass_in_sandcastle_if(
- torch.cuda.device_count() < 2, "NCCL test requires 2+ GPUs"
- )
+ @skip_but_pass_in_sandcastle_if(torch.cuda.device_count() < 2, "NCCL test requires 2+ GPUs")
def test_abort_in_destroy_mixed_empty_pgs(self):
os.environ["TORCH_NCCL_ABORT_IN_DESTROY_PG"] = "1"
store = c10d.FileStore(self.file_name, self.world_size)
@@ -1376,9 +1335,7 @@ class ProcessGroupNCCLTest(MultiProcessTestCase):
dist.destroy_process_group()
@requires_nccl()
- @skip_but_pass_in_sandcastle_if(
- torch.cuda.device_count() < 2, "NCCL test requires 2+ GPUs"
- )
+ @skip_but_pass_in_sandcastle_if(torch.cuda.device_count() < 2, "NCCL test requires 2+ GPUs")
def test_file_store_check(self):
os.environ["TORCH_NCCL_ASYNC_ERROR_HANDLING"] = "0"
os.environ["TORCH_NCCL_ENABLE_MONITORING"] = "0"
@@ -1390,7 +1347,10 @@ class ProcessGroupNCCLTest(MultiProcessTestCase):
# e.g., self.file_name = tempfile.NamedTemporaryFile(delete=False).name
store = dist.FileStore(self.file_name, self.world_size)
dist.init_process_group(
- backend="nccl", rank=self.rank, world_size=self.world_size, store=store
+ backend="nccl",
+ rank=self.rank,
+ world_size=self.world_size,
+ store=store
)
pg = dist.distributed_c10d._get_default_group()
self.assertEqual(pg.rank(), self.rank)
@@ -1442,9 +1402,7 @@ class ProcessGroupNCCLTest(MultiProcessTestCase):
# 'timeout' kwarg taking precedence
opts = dist.ProcessGroupNCCL.Options()
opts._timeout = timedelta(seconds=123)
- dist.init_process_group(
- **base_opts, pg_options=opts, timeout=timedelta(seconds=1240)
- )
+ dist.init_process_group(**base_opts, pg_options=opts, timeout=timedelta(seconds=1240))
self._check_nccl_timeout(timedelta(seconds=1240))
dist.destroy_process_group()
@@ -1454,19 +1412,13 @@ class ProcessGroupNCCLTest(MultiProcessTestCase):
def test_set_nccl_pg_timeout(self, backend):
store = c10d.FileStore(self.file_name, self.world_size)
opts = dict(
- backend=backend,
- store=store,
- rank=self.rank,
- world_size=self.world_size,
- timeout=timedelta(seconds=123),
+ backend=backend, store=store, rank=self.rank, world_size=self.world_size, timeout=timedelta(seconds=123)
)
dist.init_process_group(**opts)
pg = dist.distributed_c10d._get_default_group()
pg.allreduce(torch.rand(10).cuda(self.rank))
self._check_nccl_timeout(timedelta(seconds=123))
- pg._get_backend(torch.device(f"cuda:{self.rank}"))._set_default_timeout(
- timedelta(seconds=23)
- )
+ pg._get_backend(torch.device(f"cuda:{self.rank}"))._set_default_timeout(timedelta(seconds=23))
self._check_nccl_timeout(timedelta(seconds=23))
pg.allreduce(torch.rand(10).cuda(self.rank))
c10d.distributed_c10d._set_pg_timeout(timedelta(seconds=252), pg)
@@ -1488,9 +1440,7 @@ class ProcessGroupNCCLTest(MultiProcessTestCase):
# allgather_base is GPU number agnostic.
# Each rank contribute one tensor regardless of GPU counts
tensor = torch.tensor([self.rank]).cuda(local_device_id)
- output_t = torch.empty((self.world_size), dtype=tensor.dtype).cuda(
- local_device_id
- )
+ output_t = torch.empty((self.world_size), dtype=tensor.dtype).cuda(local_device_id)
allgather_base(output_t, tensor)
@@ -1526,7 +1476,7 @@ class ProcessGroupNCCLTest(MultiProcessTestCase):
# Test `ncclCommSplit` for smaller subgroups of the world when
# we've passed a specific device_id to init_process_group.
store = c10d.FileStore(self.file_name, self.world_size)
- device = torch.device(f"cuda:{self.rank}")
+ device = torch.device(f'cuda:{self.rank}')
pg = self._create_process_group_nccl(store, self.opts(), device_id=device)
backend = pg._get_backend(torch.device(device))
@@ -1566,6 +1516,7 @@ class ProcessGroupNCCLTest(MultiProcessTestCase):
new_pg.broadcast(broadcast_tensor, 0).wait()
self.assertEqual(backend.comm_split_count(), 1)
+
@requires_nccl_version((2, 18), "Need NCCL 2.18+ for ncclCommSplit")
@skip_but_pass_in_sandcastle_if(not TEST_MULTIGPU, "NCCL test requires 2+ GPUs")
def test_non_blocking_with_eager_init(self):
@@ -1574,7 +1525,7 @@ class ProcessGroupNCCLTest(MultiProcessTestCase):
os.environ["TORCH_NCCL_USE_COMM_NONBLOCKING"] = "1"
os.environ["TORCH_NCCL_NONBLOCKING_TIMEOUT"] = "100"
store = c10d.FileStore(self.file_name, self.world_size)
- device = torch.device(f"cuda:{self.rank}")
+ device = torch.device(f'cuda:{self.rank}')
# bound device to triger eager init mode
pg = self._create_process_group_nccl(store, self.opts(), device_id=device)
backend = pg._get_backend(torch.device(device))
@@ -1590,26 +1541,25 @@ class ProcessGroupNCCLTest(MultiProcessTestCase):
new_pg.broadcast(broadcast_tensor, 0).wait()
self.assertEqual(backend.comm_split_count(), 1)
+
@requires_nccl()
@skip_but_pass_in_sandcastle_if(not TEST_MULTIGPU, "NCCL test requires 2+ GPUs")
def test_get_uid(self):
store = c10d.FileStore(self.file_name, self.world_size)
- device = torch.device(f"cuda:{self.rank}")
+ device = torch.device(f'cuda:{self.rank}')
pg = self._create_process_group_nccl(store, self.opts(), device_id=device)
from torch.distributed.distributed_c10d import _get_process_group_uid
-
self.assertEqual(_get_process_group_uid(pg), 0)
pg_2 = c10d.new_group([0, 1])
self.assertEqual(_get_process_group_uid(pg_2), 1)
+
@requires_nccl()
@skip_but_pass_in_sandcastle_if(not TEST_MULTIGPU, "NCCL test requires 2+ GPUs")
def test_set_process_group_desc(self):
store = c10d.FileStore(self.file_name, self.world_size)
- device = torch.device(f"cuda:{self.rank}")
- pg_default = self._create_process_group_nccl(
- store, self.opts(), device_id=device
- )
+ device = torch.device(f'cuda:{self.rank}')
+ pg_default = self._create_process_group_nccl(store, self.opts(), device_id=device)
self.assertEqual(pg_default.group_desc, "default_pg")
pg_1 = c10d.new_group([0, 1], group_desc="test_purpose")
self.assertEqual(pg_1.group_desc, "test_purpose")
@@ -1629,9 +1579,7 @@ class DistributedDataParallelTest(
def _get_process_group(self):
store = self._get_store()
- c10d.init_process_group(
- "nccl", store=store, rank=self.rank, world_size=self.world_size
- )
+ c10d.init_process_group("nccl", store=store, rank=self.rank, world_size=self.world_size)
return c10d.distributed_c10d._get_default_group()
def _test_nccl_backend(
@@ -1651,9 +1599,7 @@ class DistributedDataParallelTest(
os.environ["TORCH_NCCL_BLOCKING_WAIT"] = "1"
store = c10d.FileStore(self.file_name, self.world_size)
# provide sufficient timeout to initialize NCCL comm.
- pg = c10d.ProcessGroupNCCL(
- store, self.rank, self.world_size, timeout=timedelta(seconds=15)
- )
+ pg = c10d.ProcessGroupNCCL(store, self.rank, self.world_size, timeout=timedelta(seconds=15))
pg_gloo = c10d.ProcessGroupGloo(store, self.rank, self.world_size)
pg.barrier().wait(timedelta(seconds=5))
# Simulate stuckness in rank 0.
@@ -1800,7 +1746,7 @@ class DistributedDataParallelTest(
# Input 2**15, so that the gradients will overflow with a
# world_size of 2, unless we normalize the gradient by the
# world_size before the reduction
- input = torch.tensor([[2**15]]).cuda(gpus[0]).half()
+ input = torch.tensor([[2 ** 15]]).cuda(gpus[0]).half()
# Step model
ddp_model.train()
@@ -2304,9 +2250,7 @@ class DistributedDataParallelTest(
c10d.destroy_process_group(process_group)
store = c10d.FileStore(recovery_filename, self.world_size)
- c10d.init_process_group(
- "nccl", store=store, rank=self.rank, world_size=self.world_size
- )
+ c10d.init_process_group("nccl", store=store, rank=self.rank, world_size=self.world_size)
process_group = c10d.distributed_c10d._get_default_group()
ddp = DistributedDataParallel(
model,
@@ -2385,7 +2329,9 @@ class DistributedDataParallelTest(
layer_formats, layer_dtypes, bucketsizes
):
with first_bucket_size(bucketsize):
- model_msg = f"rank = {self.rank} formats = {formats} dtypes = {dtypes} bucketsize = {bucketsize} "
+ model_msg = (
+ f"rank = {self.rank} formats = {formats} dtypes = {dtypes} bucketsize = {bucketsize} "
+ )
try:
m = ConvNet(layer_devs, formats, dtypes)
m_ddp = DistributedDataParallel(
@@ -2668,9 +2614,7 @@ class DistributedDataParallelTest(
# Get GPU model with the hook registered.
# Test the hook with different algorithmic configs.
for use_error_feedback, warm_start, batch_tensors_with_same_shape in product(
- [True, False],
- [True, False],
- [True, False],
+ [True, False], [True, False], [True, False],
):
state = powerSGD.PowerSGDState(
process_group=process_group,
@@ -2908,14 +2852,10 @@ class DistributedDataParallelTest(
seq_tensor = seq_tensor[permutation_idx]
embedded_seq_tensor = embed(seq_tensor)
packed_input = torch.nn.utils.rnn.pack_padded_sequence(
- embedded_seq_tensor,
- seq_lengths,
- batch_first=True,
+ embedded_seq_tensor, seq_lengths, batch_first=True,
)
packed_input_ddp = torch.nn.utils.rnn.pack_padded_sequence(
- embedded_seq_tensor.detach().clone(),
- seq_lengths,
- batch_first=True,
+ embedded_seq_tensor.detach().clone(), seq_lengths, batch_first=True,
)
# Move the input to GPU explicitly for the local model
packed_output, (ht, ct) = lstm(packed_input.to(self.rank))
@@ -2934,9 +2874,7 @@ class DistributedDataParallelTest(
def test_channels_last_contig(self):
process_group = self._get_process_group()
device = torch.device(f"cuda:{self.rank}")
- tensor = torch.ones((2, 16, 768, 1152), dtype=torch.float32, device=device).to(
- memory_format=torch.channels_last
- )
+ tensor = torch.ones((2, 16, 768, 1152), dtype=torch.float32, device=device).to(memory_format=torch.channels_last)
process_group.broadcast([tensor]).wait()
@requires_nccl()
@@ -2947,16 +2885,10 @@ class DistributedDataParallelTest(
super().__init__()
self.hin = hin
self.win = win
- self.weight = nn.Parameter(
- torch.ones(
- (n_features, n_features, hin, win // 2 + 1), dtype=torch.cfloat
- )
- )
+ self.weight = nn.Parameter(torch.ones((n_features, n_features, hin, win // 2 + 1), dtype=torch.cfloat))
def forward(self, x):
- xc = torch.fft.rfft2(
- x, s=(self.hin, self.win), dim=(-2, -1), norm="ortho"
- )
+ xc = torch.fft.rfft2(x, s=(self.hin, self.win), dim=(-2, -1), norm="ortho")
xcw = torch.einsum("nchw,cohw->nohw", xc, self.weight)
x = torch.fft.irfft2(xcw, dim=(-2, -1), norm="ortho")
return x
@@ -2983,6 +2915,7 @@ class DistributedDataParallelTest(
class WorkHookTest(MultiProcessTestCase):
+
@property
def world_size(self):
return 2
@@ -3007,9 +2940,7 @@ class WorkHookTest(MultiProcessTestCase):
def _get_process_group(self):
store = self._get_store()
- c10d.init_process_group(
- "nccl", store=store, rank=self.rank, world_size=self.world_size
- )
+ c10d.init_process_group("nccl", store=store, rank=self.rank, world_size=self.world_size)
return c10d.distributed_c10d._get_default_group()
@requires_nccl()
@@ -3076,10 +3007,7 @@ class WorkHookTest(MultiProcessTestCase):
self.assertEqual(
tensor_list,
- [
- torch.ones([2, 3]).cuda(self.rank) * self.world_size
- for _ in range(self.world_size)
- ],
+ [torch.ones([2, 3]).cuda(self.rank) * self.world_size for _ in range(self.world_size)],
)
@requires_nccl()
@@ -3118,11 +3046,7 @@ class WorkHookTest(MultiProcessTestCase):
# from rank0 to other ranks. However, this is DDP's internal implementation,
# which is subject to change in future versions.
self.assertTrue(num_hook_fired[OpType.BROADCAST] > 0)
- ctor_allreduce = (
- num_hook_fired[OpType.ALLREDUCE]
- if OpType.ALLREDUCE in num_hook_fired
- else 0
- )
+ ctor_allreduce = num_hook_fired[OpType.ALLREDUCE] if OpType.ALLREDUCE in num_hook_fired else 0
x = torch.zeros(2, 1000).cuda(self.rank)
ddp(x).sum().backward()
@@ -3204,7 +3128,6 @@ class WorkHookTest(MultiProcessTestCase):
self.assertEqual(num_hook_fired, work_count)
self.assertEqual(work, seq)
-
class NcclErrorHandlingTest(MultiProcessTestCase):
def setUp(self):
super().setUp()
@@ -3275,9 +3198,7 @@ class NcclErrorHandlingTest(MultiProcessTestCase):
self.assertTrue(t.is_alive())
if prev_nccl_async_error_handling is not None:
- os.environ[
- "TORCH_NCCL_ASYNC_ERROR_HANDLING"
- ] = prev_nccl_async_error_handling
+ os.environ["TORCH_NCCL_ASYNC_ERROR_HANDLING"] = prev_nccl_async_error_handling
def _test_nccl_errors_blocking(self, func):
store = c10d.FileStore(self.file_name, self.world_size)
@@ -3365,9 +3286,7 @@ class NcclErrorHandlingTest(MultiProcessTestCase):
# It seems the error message would be different depending on
# whether the test is run on CI machine and devGPU. Skipping
# the error message check to make both sides happy.
- process_group.barrier().wait(
- timeout=timedelta(seconds=self.op_timeout_sec)
- )
+ process_group.barrier().wait(timeout=timedelta(seconds=self.op_timeout_sec))
def _run_invalid_nccl_blocking_wait_env(self, val):
os.environ["TORCH_NCCL_BLOCKING_WAIT"] = val
@@ -3398,19 +3317,13 @@ class NcclErrorHandlingTest(MultiProcessTestCase):
# to coordinate btwn ranks.
pg_gloo = c10d.ProcessGroupGloo(store, self.rank, self.world_size)
failed_collective_timeout = timedelta(milliseconds=100)
- process_group.allreduce(torch.rand(10).cuda(self.rank)).wait(
- timeout=timedelta(seconds=5)
- )
+ process_group.allreduce(torch.rand(10).cuda(self.rank)).wait(timeout=timedelta(seconds=5))
if self.rank == 0:
# This should timeout in about 1 second.
# Watchdog may abort timed out work resulting in NCCL error instead of operation timed out.
- with self.assertRaisesRegex(
- dist.DistBackendError, self.blocking_wait_error_msg
- ):
- process_group.allreduce(torch.rand(10).cuda(self.rank)).wait(
- timeout=failed_collective_timeout
- )
+ with self.assertRaisesRegex(dist.DistBackendError, self.blocking_wait_error_msg):
+ process_group.allreduce(torch.rand(10).cuda(self.rank)).wait(timeout=failed_collective_timeout)
# Now do a barrier to tell other rank to go ahead.
pg_gloo.barrier().wait()
else:
@@ -3418,9 +3331,8 @@ class NcclErrorHandlingTest(MultiProcessTestCase):
try:
pg_gloo.barrier().wait()
except Exception as e:
- raise ValueError(
- f"Rank {self.rank} barrier timed out waiting for rank 0 with error: {str(e)}"
- ) from e
+ raise ValueError(f"Rank {self.rank} barrier timed out waiting for rank 0 with error: {str(e)}") from e
+
class CommTest(test_c10d_common.AbstractCommTest, MultiProcessTestCase):
@@ -3428,6 +3340,7 @@ class CommTest(test_c10d_common.AbstractCommTest, MultiProcessTestCase):
def device(self):
return f"cuda:{self.rank}"
+
def setUp(self):
super().setUp()
# TORCH_NCCL_BLOCKING_WAIT overrides TORCH_NCCL_ASYNC_ERROR_HANDLING hence tests
@@ -3477,9 +3390,7 @@ class CommTest(test_c10d_common.AbstractCommTest, MultiProcessTestCase):
@skip_if_lt_x_gpu(2)
def test_broadcast_coalesced_nccl(self):
store = c10d.FileStore(self.file_name, self.world_size)
- c10d.init_process_group(
- backend="nccl", store=store, rank=self.rank, world_size=self.world_size
- )
+ c10d.init_process_group(backend="nccl", store=store, rank=self.rank, world_size=self.world_size)
process_group = c10d.distributed_c10d._get_default_group()
device = torch.device("cuda:%d" % self.rank)
ranks = [0, 1]
@@ -3490,51 +3401,29 @@ class CommTest(test_c10d_common.AbstractCommTest, MultiProcessTestCase):
@skip_if_lt_x_gpu(2)
def test_all_reduce_coalesced_nccl(self):
store = c10d.FileStore(self.file_name, self.world_size)
- c10d.init_process_group(
- backend="nccl", store=store, rank=self.rank, world_size=self.world_size
- )
+ c10d.init_process_group(backend="nccl", store=store, rank=self.rank, world_size=self.world_size)
process_group = c10d.distributed_c10d._get_default_group()
device = torch.device("cuda:%d" % self.rank)
- tensors = [
- torch.full((60 + i,), self.rank + 1 + i, device=device, dtype=torch.float)
- for i in range(5)
- ]
+ tensors = [torch.full((60 + i,), self.rank + 1 + i, device=device, dtype=torch.float) for i in range(5)]
torch.distributed.all_reduce_coalesced(tensors, group=process_group)
for i, t in enumerate(tensors):
- self.assertEqual(
- t,
- torch.full_like(
- t, self.world_size * (i + (self.world_size + 1.0) / 2.0)
- ),
- )
+ self.assertEqual(t, torch.full_like(t, self.world_size * (i + (self.world_size + 1.) / 2.)))
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_all_reduce_coalesced_manager_nccl(self):
store = c10d.FileStore(self.file_name, self.world_size)
- c10d.init_process_group(
- backend="nccl", store=store, rank=self.rank, world_size=self.world_size
- )
+ c10d.init_process_group(backend="nccl", store=store, rank=self.rank, world_size=self.world_size)
process_group = c10d.distributed_c10d._get_default_group()
device = torch.device("cuda:%d" % self.rank)
- tensors = [
- torch.full((60 + i,), self.rank + 1 + i, device=device, dtype=torch.float)
- for i in range(5)
- ]
- with torch.distributed._coalescing_manager(
- group=process_group, device=device, async_ops=True
- ) as cm:
+ tensors = [torch.full((60 + i,), self.rank + 1 + i, device=device, dtype=torch.float) for i in range(5)]
+ with torch.distributed._coalescing_manager(group=process_group, device=device, async_ops=True) as cm:
for tensor in tensors:
torch.distributed.all_reduce(tensor)
self.assertEqual(len(cm.works), 1)
cm.wait()
for i, t in enumerate(tensors):
- self.assertEqual(
- t,
- torch.full_like(
- t, self.world_size * (i + (self.world_size + 1.0) / 2.0)
- ),
- )
+ self.assertEqual(t, torch.full_like(t, self.world_size * (i + (self.world_size + 1.) / 2.)))
@requires_nccl()
@skip_if_lt_x_gpu(2)
@@ -3542,7 +3431,6 @@ class CommTest(test_c10d_common.AbstractCommTest, MultiProcessTestCase):
def test_intra_node_comm_all_reduce(self):
from torch._C._distributed_c10d import _get_intra_node_comm_usage_counter
from torch.testing._internal.common_cuda import SM80OrLater
-
for peer in range(self.world_size):
if peer == self.rank:
continue
@@ -3583,15 +3471,13 @@ class CommTest(test_c10d_common.AbstractCommTest, MultiProcessTestCase):
self.assertTrue(t.eq(expect).all())
self.assertEqual(_get_intra_node_comm_usage_counter(), 2)
- t = torch.full((10 * 1024**2 // 2,), self.rank, dtype=torch.bfloat16).cuda()
+ t = torch.full((10 * 1024 ** 2 // 2,), self.rank, dtype=torch.bfloat16).cuda()
c10d.all_reduce(t, c10d.ReduceOp.SUM)
self.assertTrue(t.eq(expect).all())
self.assertEqual(_get_intra_node_comm_usage_counter(), 3)
# Verify that IntraNodeComm is not used beyond 10MB
- t = torch.full(
- (10 * 1024**2 // 2 + 1,), self.rank, dtype=torch.bfloat16
- ).cuda()
+ t = torch.full((10 * 1024 ** 2 // 2 + 1,), self.rank, dtype=torch.bfloat16).cuda()
c10d.all_reduce(t, c10d.ReduceOp.SUM)
self.assertTrue(t.eq(expect).all())
self.assertEqual(_get_intra_node_comm_usage_counter(), 3)
@@ -3649,9 +3535,7 @@ class CommTest(test_c10d_common.AbstractCommTest, MultiProcessTestCase):
self._test_pass_nccl_options(pg_opts)
@requires_nccl()
- @requires_nccl_version(
- (2, 17), "Need NCCL 2.17+ for configuring NCCL communicators"
- )
+ @requires_nccl_version((2, 17), "Need NCCL 2.17+ for configuring NCCL communicators")
@skip_if_lt_x_gpu(2)
def test_pass_nccl_options_config(self):
pg_opts = c10d.ProcessGroupNCCL.Options()
@@ -3668,14 +3552,10 @@ class CommTest(test_c10d_common.AbstractCommTest, MultiProcessTestCase):
# Tests if comms were configured
nccl_debug_file_content = nccl_debug_file.read()
- max_ctas = re.search(rb"Max CTAs.*(\d+)|$", nccl_debug_file_content).group(1)
- min_ctas = re.search(rb"Min CTAs.*(\d+)|$", nccl_debug_file_content).group(1)
- cga_cluster_size = re.search(
- rb"CGA cluster.*(\d+)|$", nccl_debug_file_content
- ).group(1)
- net_name = re.search(
- rb"Using network.([a-zA-z]+)|$", nccl_debug_file_content
- ).group(1)
+ max_ctas = re.search(rb'Max CTAs.*(\d+)|$', nccl_debug_file_content).group(1)
+ min_ctas = re.search(rb'Min CTAs.*(\d+)|$', nccl_debug_file_content).group(1)
+ cga_cluster_size = re.search(rb'CGA cluster.*(\d+)|$', nccl_debug_file_content).group(1)
+ net_name = re.search(rb'Using network.([a-zA-z]+)|$', nccl_debug_file_content).group(1)
self.assertEqual(pg_opts.config.max_ctas, int(max_ctas))
self.assertEqual(pg_opts.config.min_ctas, int(min_ctas))
self.assertEqual(pg_opts.config.cga_cluster_size, int(cga_cluster_size))
@@ -3768,8 +3648,8 @@ class CommTest(test_c10d_common.AbstractCommTest, MultiProcessTestCase):
def test_tensor_dtype_complex(self):
self._test_tensor_dtype_complex(backend="nccl")
-
class CompilerTest(test_c10d_common.CompilerTest):
+
@property
def world_size(self):
return 2
@@ -3792,7 +3672,9 @@ class CompilerTest(test_c10d_common.CompilerTest):
@skip_if_lt_x_gpu(2)
def test_allgather_work_wait_gpu(self):
- self._test_allgather_work_wait(torch.ones(2, 2, device=self.rank) * self.rank)
+ self._test_allgather_work_wait(
+ torch.ones(2, 2, device=self.rank) * self.rank
+ )
@skip_if_lt_x_gpu(2)
def test_allgather_into_tensor_work_wait_gpu(self):
@@ -3814,15 +3696,21 @@ class CompilerTest(test_c10d_common.CompilerTest):
@skip_if_lt_x_gpu(2)
def test_broadcast_work_wait_gpu(self):
- self._test_broadcast_work_wait(torch.ones(2, 2, device=self.rank) * self.rank)
+ self._test_broadcast_work_wait(
+ torch.ones(2, 2, device=self.rank) * self.rank
+ )
@skip_if_lt_x_gpu(2)
def test_scatter_work_wait_gpu(self):
- self._test_scatter_work_wait(torch.ones(2, 2, device=self.rank) * self.rank)
+ self._test_scatter_work_wait(
+ torch.ones(2, 2, device=self.rank) * self.rank
+ )
@skip_if_lt_x_gpu(2)
def test_alltoall_work_wait_gpu(self):
- self._test_alltoall_work_wait(torch.ones(2, 2, device=self.rank) * self.rank)
+ self._test_alltoall_work_wait(
+ torch.ones(2, 2, device=self.rank) * self.rank
+ )
@skip_if_lt_x_gpu(2)
def test_nested_comm_tensor_wrapping(self):
@@ -3847,9 +3735,7 @@ class CompilerTest(test_c10d_common.CompilerTest):
store=store,
)
output_tensor = torch.zeros(2, dtype=torch.int64).to(self.rank)
- input_tensors = torch.arange(self.world_size * 2, dtype=torch.int64).to(
- self.rank
- )
+ input_tensors = torch.arange(self.world_size * 2, dtype=torch.int64).to(self.rank)
input_tensors = torch.reshape(input_tensors, (self.world_size, 2))
dist.reduce_scatter_tensor(output_tensor, input_tensors)
self.assertEqual(output_tensor, input_tensors[self.rank] * self.world_size)
@@ -3871,15 +3757,11 @@ class CompilerTest(test_c10d_common.CompilerTest):
dist.reduce_scatter_tensor(output_tensors[i], input_tensors[i])
self.assertEqual(output_tensors, input_tensors[self.rank] * self.world_size)
-
class SetDeviceMethod(Enum):
TORCH_CUDA_SET = auto() # torch.cuda.set_device
COLLECTIVE_ARGUMENT = auto() # broadcast_object_list(device=)
-
-class NcclProcessGroupWithDispatchedCollectivesTests(
- test_c10d_common.ProcessGroupWithDispatchedCollectivesTests
-):
+class NcclProcessGroupWithDispatchedCollectivesTests(test_c10d_common.ProcessGroupWithDispatchedCollectivesTests):
@requires_nccl()
@skip_if_lt_x_gpu(1)
def test_collectives(self):
@@ -3911,7 +3793,6 @@ class NcclProcessGroupWithDispatchedCollectivesTests(
dist.all_gather_into_tensor(output_tensor, tensor)
self.assertEqual(output_tensor, tensor)
-
class LargeCommTest(test_c10d_common.AbstractLargeCommTest, MultiProcessTestCase):
def setUp(self):
super().setUp()
@@ -3952,9 +3833,7 @@ class LargeCommTest(test_c10d_common.AbstractLargeCommTest, MultiProcessTestCase
f"need world size of 4 to get 2 subgroup PGs, but got world size of {world_size}"
)
store = c10d.FileStore(self.file_name, world_size)
- c10d.init_process_group(
- backend="nccl", store=store, rank=self.rank, world_size=world_size
- )
+ c10d.init_process_group(backend="nccl", store=store, rank=self.rank, world_size=world_size)
# every rank creates the same sub groups
# including unused sub groups in the current rank
a_group = c10d.new_group([0, 1])
@@ -3974,24 +3853,12 @@ class LargeCommTest(test_c10d_common.AbstractLargeCommTest, MultiProcessTestCase
input = torch.ones((10,), device=device) * self.rank
if self.rank == 0 or self.rank == 2:
gather_list = [torch.empty_like(input) for _ in range(subgroup.size())]
- torch.distributed.gather(
- input,
- gather_list=gather_list,
- dst=self.rank,
- group=subgroup,
- async_op=False,
- )
+ torch.distributed.gather(input, gather_list=gather_list, dst=self.rank, group=subgroup, async_op=False)
for src in range(len(gather_list)):
expected = (torch.ones_like(input) * self.rank) + src
self.assertEqual(gather_list[src], expected)
else:
- torch.distributed.gather(
- input,
- gather_list=None,
- dst=self.rank - 1,
- group=subgroup,
- async_op=False,
- )
+ torch.distributed.gather(input, gather_list=None, dst=self.rank - 1, group=subgroup, async_op=False)
@requires_nccl()
@skip_if_lt_x_gpu(4)
@@ -4013,15 +3880,11 @@ class LargeCommTest(test_c10d_common.AbstractLargeCommTest, MultiProcessTestCase
# another weird thing- what's the point of making me specify some empty objects in my list?
# empty list should be valid imo. (but it throws an error)
gather_list = [{}, {}]
- torch.distributed.gather_object(
- input, object_gather_list=gather_list, dst=self.rank, group=subgroup
- )
+ torch.distributed.gather_object(input, object_gather_list=gather_list, dst=self.rank, group=subgroup)
for src in range(len(gather_list)):
self.assertEqual(gather_list[src]["rank"], self.rank + src)
else:
- torch.distributed.gather_object(
- input, object_gather_list=None, dst=self.rank - 1, group=subgroup
- )
+ torch.distributed.gather_object(input, object_gather_list=None, dst=self.rank - 1, group=subgroup)
@requires_nccl()
@skip_if_lt_x_gpu(4)
@@ -4082,10 +3945,7 @@ class LargeCommTest(test_c10d_common.AbstractLargeCommTest, MultiProcessTestCase
@requires_nccl()
@skip_if_lt_x_gpu(4)
- @parametrize(
- "set_device",
- [SetDeviceMethod.TORCH_CUDA_SET, SetDeviceMethod.COLLECTIVE_ARGUMENT],
- )
+ @parametrize("set_device", [SetDeviceMethod.TORCH_CUDA_SET, SetDeviceMethod.COLLECTIVE_ARGUMENT])
def test_broadcast_object_list_subgroup(self, set_device: SetDeviceMethod):
world_size = 4
if self.rank >= world_size:
@@ -4098,9 +3958,7 @@ class LargeCommTest(test_c10d_common.AbstractLargeCommTest, MultiProcessTestCase
device = torch.device("cuda:%d" % self.rank)
if self.rank == 0 or self.rank == 2:
x = [{}]
- c10d.broadcast_object_list(
- x, src=self.rank + 1, group=subgroup, device=device
- )
+ c10d.broadcast_object_list(x, src=self.rank + 1, group=subgroup, device=device)
expected = [{"rank": self.rank + 1}]
self.assertEqual(x, expected)
else:
@@ -4127,6 +3985,7 @@ class LargeCommTest(test_c10d_common.AbstractLargeCommTest, MultiProcessTestCase
c10d.scatter(x, scatter_list=scatter_list, src=self.rank, group=subgroup)
self.assertEqual(x, expected)
+
@requires_nccl()
@skip_if_lt_x_gpu(4)
def test_scatter_object_list_subgroup(self):
@@ -4158,10 +4017,8 @@ class LargeCommTest(test_c10d_common.AbstractLargeCommTest, MultiProcessTestCase
)
self.assertEqual(scatter_object_output_list, expected)
-
instantiate_parametrized_tests(LargeCommTest)
-
class SparseCollective(MultiProcessTestCase):
@property
def world_size(self):
@@ -4185,9 +4042,7 @@ class SparseCollective(MultiProcessTestCase):
class ToyModel(nn.Module):
def __init__(self, rank, vocab_size, embedding_dim):
super().__init__()
- self.embedding = nn.Embedding(vocab_size, embedding_dim, sparse=True).to(
- rank
- )
+ self.embedding = nn.Embedding(vocab_size, embedding_dim, sparse=True).to(rank)
self.linear = nn.Linear(embedding_dim, 1).to(rank)
def forward(self, inputs):
@@ -4212,14 +4067,12 @@ class SparseCollective(MultiProcessTestCase):
vocab_size = 5
- model = SparseCollective.ToyModel(
- self.rank, vocab_size=vocab_size, embedding_dim=10
- )
+ model = SparseCollective.ToyModel(self.rank, vocab_size=vocab_size, embedding_dim=10)
ddp_model = DistributedDataParallel(model)
inputs = torch.tensor([[1, 0, 0], [0, 0, 0], [0, 0, 0]]).to(self.rank)
# set sparse metadata on the DDP model
indices = torch.Tensor(list(range(vocab_size)))
- ddp_model._set_sparse_metadata({"embedding.weight": indices})
+ ddp_model._set_sparse_metadata({"embedding.weight" : indices})
# forward pass
try:
output = ddp_model(inputs)
@@ -4239,26 +4092,22 @@ class SparseCollective(MultiProcessTestCase):
class NCCLTraceTestBase(MultiProcessTestCase):
def setUp(self):
super().setUp()
- os.environ[
- "TORCH_NCCL_ENABLE_TIMING"
- ] = "0" # see 'timing_enabled' parametrized tests
- os.environ["TORCH_NCCL_TRACE_BUFFER_SIZE"] = "1000"
- os.environ["TORCH_NCCL_DUMP_ON_TIMEOUT"] = "1"
+ os.environ["TORCH_NCCL_ENABLE_TIMING"] = '0' # see 'timing_enabled' parametrized tests
+ os.environ["TORCH_NCCL_TRACE_BUFFER_SIZE"] = '1000'
+ os.environ["TORCH_NCCL_DUMP_ON_TIMEOUT"] = '1'
self.tempdir = tempfile.TemporaryDirectory()
os.environ["TORCH_NCCL_DEBUG_INFO_TEMP_FILE"] = self._trace_basename()
os.environ["TORCH_NCCL_DEBUG_INFO_PIPE_FILE"] = self._trace_basename()
self._spawn_processes()
@classmethod
- def _run(
- cls, parent_conn, rank: int, test_name: str, file_name: str, parent_pipe
- ) -> None:
+ def _run(cls, parent_conn, rank: int, test_name: str, file_name: str, parent_pipe) -> None:
cls.parent = parent_conn
super()._run(rank, test_name, file_name, parent_pipe)
@property
def local_device(self):
- return torch.device("cuda", self.rank_to_GPU[self.rank][0])
+ return torch.device('cuda', self.rank_to_GPU[self.rank][0])
def _join_processes(self, fn):
fn()
@@ -4277,14 +4126,15 @@ class NCCLTraceTestBase(MultiProcessTestCase):
def wrap(*positional, args, **kwargs):
args = (next(piter), *args)
return proc(*positional, args=args, **kwargs)
-
self._start_processes(wrap)
def _create_process_group_nccl(self):
store = dist.FileStore(self.file_name, self.world_size)
c10d.init_process_group(
- "nccl", world_size=self.world_size, rank=self.rank, store=store
- )
+ "nccl",
+ world_size=self.world_size,
+ rank=self.rank,
+ store=store)
pg = c10d.distributed_c10d._get_default_group()
return pg
@@ -4314,8 +4164,8 @@ class NCCLTraceTestBase(MultiProcessTestCase):
def started_or_scheduled(self, timing_enabled):
return "started" if timing_enabled else "scheduled"
-
class NCCLTraceTest(NCCLTraceTestBase):
+
@requires_nccl()
@skip_but_pass_in_sandcastle_if(not TEST_MULTIGPU, "NCCL test requires 2+ GPUs")
@parametrize("timing_enabled", [True, False])
@@ -4336,41 +4186,39 @@ class NCCLTraceTest(NCCLTraceTestBase):
time.sleep(1)
t = pickle.loads(torch._C._distributed_c10d._dump_nccl_trace())
- ver = t["version"]
+ ver = t['version']
self.assertEqual(ver, "1.5")
- pg_config = t["pg_config"]
+ pg_config = t['pg_config']
self.assertEqual(len(pg_config), 1)
- default_pg_info = pg_config["0"]
- self.assertIn("name", default_pg_info)
- self.assertIn("desc", default_pg_info)
- self.assertIn("ranks", default_pg_info)
- global_ranks = pg_config["0"]["ranks"]
+ default_pg_info = pg_config['0']
+ self.assertIn('name', default_pg_info)
+ self.assertIn('desc', default_pg_info)
+ self.assertIn('ranks', default_pg_info)
+ global_ranks = pg_config['0']['ranks']
self.assertEqual(len(json.loads(global_ranks)), self.world_size)
- t = t["entries"]
+ t = t['entries']
self.assertEqual(len(t), 2)
last = t[-1]
- self.assertEqual(last["process_group"], ("0", "default_pg"))
- self.assertEqual(last["state"], "completed")
- s = last["time_discovered_started_ns"]
- f = last["time_discovered_completed_ns"]
- self.assertEqual(last["record_id"], 1)
+ self.assertEqual(last['process_group'], ('0', 'default_pg'))
+ self.assertEqual(last['state'], 'completed')
+ s = last['time_discovered_started_ns']
+ f = last['time_discovered_completed_ns']
+ self.assertEqual(last['record_id'], 1)
self.assertIsNotNone(f)
if timing_enabled:
self.assertIsNotNone(s)
self.assertTrue(s <= f)
- self.assertIn("test_c10d_nccl.py", str(last["frames"]))
- self.assertEqual(last["input_sizes"], ((3, 4),))
- self.assertEqual(last["output_sizes"], ((3, 4),))
- self.assertEqual(last["seq_id"], 2)
+ self.assertIn('test_c10d_nccl.py', str(last['frames']))
+ self.assertEqual(last['input_sizes'], ((3, 4),))
+ self.assertEqual(last['output_sizes'], ((3, 4),))
+ self.assertEqual(last['seq_id'], 2)
now = datetime.now()
- event_created_time = datetime.fromtimestamp(
- last["time_created_ns"] / 1000000000
- )
+ event_created_time = datetime.fromtimestamp(last['time_created_ns'] / 1000000000)
before_test = now - timedelta(minutes=1)
self.assertTrue(before_test < event_created_time < now)
if timing_enabled:
# very loose bounds, measured 0.036 ms on devgpu
- self.assertTrue(0 < last["duration_ms"] < 100)
+ self.assertTrue(0 < last['duration_ms'] < 100)
else:
self.assertTrue("duration_ms" not in last)
@@ -4382,22 +4230,22 @@ class NCCLTraceTest(NCCLTraceTestBase):
while time.time() - start_time < timeout:
if os.path.exists(file_path):
return open(file_path, mode)
- time.sleep(0.1)
+ time.sleep(.1)
raise FileNotFoundError
if self.rank == self.MAIN_PROCESS_RANK:
for c in self.children_pipes:
- self.assertEqual(c.recv(), "next")
+ self.assertEqual(c.recv(), 'next')
dump_file = self._trace_name(rank=0)
pipe_file = dump_file + ".pipe"
- with open_file_with_timeout(pipe_file, "w") as f:
- f.write("1\n")
- with open_file_with_timeout(dump_file, "rb", timeout=10.0) as f:
- self.assertTrue("all_reduce" in str(pickle.load(f)))
+ with open_file_with_timeout(pipe_file, 'w') as f:
+ f.write('1\n')
+ with open_file_with_timeout(dump_file, 'rb', timeout=10.0) as f:
+ self.assertTrue('all_reduce' in str(pickle.load(f)))
for c in self.children_pipes:
- c.send("next")
+ c.send('next')
return
pg = self._create_process_group_nccl()
@@ -4407,13 +4255,13 @@ class NCCLTraceTest(NCCLTraceTestBase):
f = pg.allreduce(a)
f.wait()
torch.cuda.synchronize(device=device)
- self.parent.send("next")
+ self.parent.send('next')
self.parent.recv()
@requires_nccl()
@skip_but_pass_in_sandcastle_if(not TEST_MULTIGPU, "NCCL test requires 2+ GPUs")
def test_long(self):
- os.environ["TORCH_NCCL_TRACE_BUFFER_SIZE"] = "10"
+ os.environ["TORCH_NCCL_TRACE_BUFFER_SIZE"] = '10'
if self.rank == self.MAIN_PROCESS_RANK:
return
pg = self._create_process_group_nccl()
@@ -4433,16 +4281,16 @@ class NCCLTraceTest(NCCLTraceTestBase):
f.wait()
torch.cuda.synchronize(device=device)
t = pickle.loads(torch._C._distributed_c10d._dump_nccl_trace())
- t = t["entries"]
+ t = t['entries']
self.assertEqual(len(t), 10)
first = t[0]
last = t[-1]
- self.assertEqual(last["profiling_name"], "nccl:all_reduce")
- self.assertEqual(last["state"], "completed")
- self.assertIn("test_c10d_nccl.py", str(last["frames"]))
- self.assertEqual(last["input_sizes"], ((3, 4),))
- self.assertEqual(last["output_sizes"], ((3, 4),))
- self.assertEqual(last["seq_id"] - first["seq_id"], 9)
+ self.assertEqual(last['profiling_name'], 'nccl:all_reduce')
+ self.assertEqual(last['state'], 'completed')
+ self.assertIn('test_c10d_nccl.py', str(last['frames']))
+ self.assertEqual(last['input_sizes'], ((3, 4),))
+ self.assertEqual(last['output_sizes'], ((3, 4),))
+ self.assertEqual(last['seq_id'] - first['seq_id'], 9)
@requires_nccl()
@skip_but_pass_in_sandcastle_if(not TEST_MULTIGPU, "NCCL test requires 2+ GPUs")
@@ -4450,9 +4298,9 @@ class NCCLTraceTest(NCCLTraceTestBase):
def test_trace_while_active(self, timing_enabled):
if self.rank == self.MAIN_PROCESS_RANK:
for c in self.children_pipes:
- self.assertEqual(c.recv(), "next")
+ self.assertEqual(c.recv(), 'next')
for c in self.children_pipes:
- c.send("next")
+ c.send('next')
return
pg = self._create_process_group_nccl()
@@ -4469,19 +4317,17 @@ class NCCLTraceTest(NCCLTraceTestBase):
pg.allreduce(a).wait()
e.synchronize()
t = pickle.loads(torch._C._distributed_c10d._dump_nccl_trace())
- t = t["entries"]
- self.assertEqual(t[-1]["profiling_name"], "nccl:all_reduce")
+ t = t['entries']
+ self.assertEqual(t[-1]['profiling_name'], 'nccl:all_reduce')
if self.rank == 0:
- self.assertEqual(t[-1]["seq_id"], 1)
- self.assertEqual(t[-1]["state"], "completed")
+ self.assertEqual(t[-1]['seq_id'], 1)
+ self.assertEqual(t[-1]['state'], 'completed')
else:
- self.assertEqual(t[-1]["seq_id"], 2)
- self.assertEqual(
- t[-1]["state"], self.started_or_scheduled(timing_enabled)
- )
+ self.assertEqual(t[-1]['seq_id'], 2)
+ self.assertEqual(t[-1]['state'], self.started_or_scheduled(timing_enabled))
- self.parent.send("next")
- self.assertEqual("next", self.parent.recv())
+ self.parent.send('next')
+ self.assertEqual('next', self.parent.recv())
if self.rank == 0:
pg.allreduce(a).wait()
torch.cuda.synchronize(device=device)
@@ -4492,9 +4338,9 @@ class NCCLTraceTest(NCCLTraceTestBase):
def test_trace_while_stuck(self, timing_enabled):
if self.rank == self.MAIN_PROCESS_RANK:
for c in self.children_pipes:
- self.assertEqual(c.recv(), "next")
+ self.assertEqual(c.recv(), 'next')
for c in self.children_pipes:
- c.send("next")
+ c.send('next')
return
pg = self._create_process_group_nccl()
@@ -4514,20 +4360,18 @@ class NCCLTraceTest(NCCLTraceTestBase):
# give the other thread some time to fill the cuda buffer
time.sleep(5)
t = pickle.loads(torch._C._distributed_c10d._dump_nccl_trace())
- t = t["entries"]
- self.assertEqual(t[-1]["profiling_name"], "nccl:all_reduce")
+ t = t['entries']
+ self.assertEqual(t[-1]['profiling_name'], 'nccl:all_reduce')
if self.rank == 0:
- self.assertEqual(t[-1]["seq_id"], 1)
- self.assertEqual(t[-1]["state"], "completed")
+ self.assertEqual(t[-1]['seq_id'], 1)
+ self.assertEqual(t[-1]['state'], 'completed')
else:
- self.assertEqual(t[-1]["seq_id"], 2)
- self.assertEqual(
- t[-1]["state"], self.started_or_scheduled(timing_enabled)
- )
- self.assertIsNone(t[-1]["time_discovered_completed_ns"])
+ self.assertEqual(t[-1]['seq_id'], 2)
+ self.assertEqual(t[-1]['state'], self.started_or_scheduled(timing_enabled))
+ self.assertIsNone(t[-1]['time_discovered_completed_ns'])
# this will eventually cause the missing rank 0
# to continue which will unblock the non-zero ranks
- self.parent.send("next")
+ self.parent.send('next')
if self.rank != 0:
pg.allreduce(a).wait()
@@ -4541,20 +4385,17 @@ class NCCLTraceTest(NCCLTraceTestBase):
else:
gather_trace()
- self.assertEqual("next", self.parent.recv())
+ self.assertEqual('next', self.parent.recv())
if self.rank == 0:
pg.allreduce(a).wait()
torch.cuda.synchronize(device=device)
@requires_nccl()
@skip_but_pass_in_sandcastle_if(not TEST_MULTIGPU, "NCCL test requires 2+ GPUs")
- @parametrize(
- "op_sizes_per_coalesce",
- [
- [(2, 3)],
- [(2, 3), (5, 5), (1,)],
- ],
- )
+ @parametrize("op_sizes_per_coalesce", [
+ [(2, 3)],
+ [(2, 3), (5, 5), (1,)],
+ ])
@parametrize("timing_enabled", [True, False])
def test_batched_send_recv(self, op_sizes_per_coalesce, timing_enabled):
"""
@@ -4589,7 +4430,7 @@ class NCCLTraceTest(NCCLTraceTestBase):
time.sleep(1)
t = pickle.loads(torch._C._distributed_c10d._dump_nccl_trace())
- self.assertEqual(len(t["entries"]), num_coalesced_ops * (ops_per_coalesce + 1))
+ self.assertEqual(len(t['entries']), num_coalesced_ops * (ops_per_coalesce + 1))
expected_record_id = 0
expected_seq = 1
@@ -4597,61 +4438,44 @@ class NCCLTraceTest(NCCLTraceTestBase):
for seq in range(num_coalesced_ops):
first_op = seq * (ops_per_coalesce + 1)
coalesced_op = first_op + ops_per_coalesce
- for p2p_op_idx, input_sizes in zip(
- range(first_op, coalesced_op, 1), op_sizes_per_coalesce
- ):
+ for p2p_op_idx, input_sizes in zip(range(first_op, coalesced_op, 1), op_sizes_per_coalesce):
# the indivudal ops inside the coalescing group the individual op metadata,
# but not the timing info coming from the actual coalesced kernel
- profiling_name = (
- "nccl:recv 0<-1" if self.rank == 0 else "nccl:send 1->0"
- )
- self.assertEqual(
- t["entries"][p2p_op_idx]["record_id"], expected_record_id
- )
+ profiling_name = 'nccl:recv 0<-1' if self.rank == 0 else 'nccl:send 1->0'
+ self.assertEqual(t['entries'][p2p_op_idx]['record_id'], expected_record_id)
expected_record_id += 1
- self.assertEqual(
- t["entries"][p2p_op_idx]["profiling_name"], profiling_name
- )
- self.assertEqual(t["entries"][p2p_op_idx]["seq_id"], expected_seq)
- self.assertEqual(t["entries"][p2p_op_idx]["op_id"], expected_op_id)
+ self.assertEqual(t['entries'][p2p_op_idx]['profiling_name'], profiling_name)
+ self.assertEqual(t['entries'][p2p_op_idx]['seq_id'], expected_seq)
+ self.assertEqual(t['entries'][p2p_op_idx]['op_id'], expected_op_id)
expected_op_id += 1
- self.assertEqual(t["entries"][p2p_op_idx]["input_sizes"], [input_sizes])
- self.assertEqual(
- t["entries"][p2p_op_idx]["output_sizes"], [input_sizes]
- )
+ self.assertEqual(t['entries'][p2p_op_idx]['input_sizes'], [input_sizes])
+ self.assertEqual(t['entries'][p2p_op_idx]['output_sizes'], [input_sizes])
# duration doesn't get tagged onto individual ops yet, nor is their state updated
- self.assertEqual(t["entries"][p2p_op_idx]["state"], "scheduled")
- self.assertTrue("duration_ms" not in t["entries"][p2p_op_idx])
+ self.assertEqual(t['entries'][p2p_op_idx]['state'], 'scheduled')
+ self.assertTrue('duration_ms' not in t['entries'][p2p_op_idx])
# the coalesced op has no metadata but indicates that coalescing was used,
# and accurately reflects the timing and state info for the whole group
- self.assertEqual(
- t["entries"][coalesced_op]["record_id"], expected_record_id
- )
+ self.assertEqual(t['entries'][coalesced_op]['record_id'], expected_record_id)
expected_record_id += 1
- self.assertEqual(
- t["entries"][coalesced_op]["profiling_name"], "nccl:coalesced"
- )
- self.assertEqual(t["entries"][coalesced_op]["seq_id"], expected_seq)
+ self.assertEqual(t['entries'][coalesced_op]['profiling_name'], 'nccl:coalesced')
+ self.assertEqual(t['entries'][coalesced_op]['seq_id'], expected_seq)
expected_seq += 1
- self.assertEqual(t["entries"][coalesced_op]["state"], "completed")
- self.assertEqual(t["entries"][coalesced_op]["input_sizes"], [])
- self.assertEqual(t["entries"][coalesced_op]["output_sizes"], [])
+ self.assertEqual(t['entries'][coalesced_op]['state'], 'completed')
+ self.assertEqual(t['entries'][coalesced_op]['input_sizes'], [])
+ self.assertEqual(t['entries'][coalesced_op]['output_sizes'], [])
if timing_enabled:
- duration = t["entries"][coalesced_op]["duration_ms"]
+ duration = t['entries'][coalesced_op]['duration_ms']
self.assertTrue(0.001 < duration < 10000, duration)
else:
- self.assertTrue("duration_ms" not in t["entries"][coalesced_op])
+ self.assertTrue('duration_ms' not in t['entries'][coalesced_op])
@requires_nccl()
@skip_but_pass_in_sandcastle_if(not TEST_MULTIGPU, "NCCL test requires 2+ GPUs")
- @parametrize(
- "op_sizes",
- [
- [(2, 3)],
- [(2, 3), (5, 5), (1,)],
- ],
- )
+ @parametrize("op_sizes", [
+ [(2, 3)],
+ [(2, 3), (5, 5), (1,)],
+ ])
@parametrize("timing_enabled", [True, False])
def test_individual_send_recv(self, op_sizes, timing_enabled):
"""
@@ -4681,26 +4505,26 @@ class NCCLTraceTest(NCCLTraceTestBase):
time.sleep(1)
t = pickle.loads(torch._C._distributed_c10d._dump_nccl_trace())
- self.assertEqual(len(t["entries"]), num_repeats * (ops_per_repeat))
+ self.assertEqual(len(t['entries']), num_repeats * (ops_per_repeat))
expected_seq = 1
expected_op_id = 1
for seq in range(num_repeats * ops_per_repeat):
input_sizes = op_sizes[seq % ops_per_repeat]
- profiling_name = "nccl:recv 0<-1" if self.rank == 0 else "nccl:send 1->0"
- self.assertEqual(t["entries"][seq]["profiling_name"], profiling_name)
- self.assertEqual(t["entries"][seq]["seq_id"], expected_seq)
+ profiling_name = 'nccl:recv 0<-1' if self.rank == 0 else 'nccl:send 1->0'
+ self.assertEqual(t['entries'][seq]['profiling_name'], profiling_name)
+ self.assertEqual(t['entries'][seq]['seq_id'], expected_seq)
expected_seq += 1
- self.assertEqual(t["entries"][seq]["op_id"], expected_op_id)
+ self.assertEqual(t['entries'][seq]['op_id'], expected_op_id)
expected_op_id += 1
- self.assertEqual(t["entries"][seq]["input_sizes"], [input_sizes])
- self.assertEqual(t["entries"][seq]["output_sizes"], [input_sizes])
- self.assertEqual(t["entries"][seq]["state"], "completed")
+ self.assertEqual(t['entries'][seq]['input_sizes'], [input_sizes])
+ self.assertEqual(t['entries'][seq]['output_sizes'], [input_sizes])
+ self.assertEqual(t['entries'][seq]['state'], 'completed')
if timing_enabled:
- duration = t["entries"][seq]["duration_ms"]
+ duration = t['entries'][seq]['duration_ms']
self.assertTrue(0.001 < duration < 10000, duration)
else:
- self.assertTrue("duration_ms" not in t["entries"][seq])
+ self.assertTrue('duration_ms' not in t['entries'][seq])
# TODO(whc) support and test coalesced collectives that use the c++ start/end group thingy instead of python
# coalescing manager
@@ -4742,32 +4566,17 @@ class NCCLTraceTest(NCCLTraceTestBase):
t = pickle.loads(torch._C._distributed_c10d._dump_nccl_trace())
- self.assertEqual(
- len(t["entries"]), 1
- ) # one for the reduce_scatter_tensor_coalesced, one for the endCoalescing
- self.assertEqual(
- t["entries"][0]["profiling_name"], "nccl:reduce_scatter_tensor_coalesced"
- )
- self.assertEqual(t["entries"][0]["seq_id"], 1)
- self.assertEqual(t["entries"][0]["input_sizes"], [[2, 2], [2, 2]])
- self.assertEqual(
- t["entries"][0]["output_sizes"],
- [
- [
- 2,
- ],
- [
- 2,
- ],
- ],
- )
- self.assertEqual(t["entries"][0]["state"], "completed")
+ self.assertEqual(len(t['entries']), 1) # one for the reduce_scatter_tensor_coalesced, one for the endCoalescing
+ self.assertEqual(t['entries'][0]['profiling_name'], "nccl:reduce_scatter_tensor_coalesced")
+ self.assertEqual(t['entries'][0]['seq_id'], 1)
+ self.assertEqual(t['entries'][0]['input_sizes'], [[2, 2], [2, 2]])
+ self.assertEqual(t['entries'][0]['output_sizes'], [[2,], [2,]])
+ self.assertEqual(t['entries'][0]['state'], 'completed')
if timing_enabled:
- duration = t["entries"][0]["duration_ms"]
+ duration = t['entries'][0]['duration_ms']
self.assertTrue(0.001 < duration < 10000, duration)
else:
- self.assertTrue("duration_ms" not in t["entries"][0])
-
+ self.assertTrue('duration_ms' not in t['entries'][0])
class NCCLTraceTestDumpOnTimeoutBase(NCCLTraceTestBase):
timeout_sec = 1
@@ -4779,8 +4588,7 @@ class NCCLTraceTestDumpOnTimeoutBase(NCCLTraceTestBase):
world_size=self.world_size,
rank=self.rank,
store=store,
- timeout=timedelta(seconds=NCCLTraceTestDumpOnTimeoutBase.timeout_sec),
- )
+ timeout=timedelta(seconds=NCCLTraceTestDumpOnTimeoutBase.timeout_sec))
pg = c10d.distributed_c10d._get_default_group()
return pg
@@ -4797,31 +4605,28 @@ class NCCLTraceTestDumpOnTimeoutBase(NCCLTraceTestBase):
except TimeoutError:
return None
-
class NCCLTraceTestDumpOnTimeout(NCCLTraceTestDumpOnTimeoutBase):
@requires_nccl()
@skip_but_pass_in_sandcastle_if(not TEST_MULTIGPU, "NCCL test requires 2+ GPUs")
@parametrize("timing_enabled", [True, False])
def test_timeout_dumps(self, timing_enabled):
# dump on heartbeatmonitor thread
- os.environ["TORCH_NCCL_COORD_CHECK_MILSEC"] = "1000"
+ os.environ['TORCH_NCCL_COORD_CHECK_MILSEC'] = '1000'
# need rank0 to crash before looking for its output file
- os.environ["TORCH_NCCL_HEARTBEAT_TIMEOUT_SEC"] = "1"
+ os.environ['TORCH_NCCL_HEARTBEAT_TIMEOUT_SEC'] = '1'
if self.rank == self.MAIN_PROCESS_RANK:
# wait for rank0 to crash before looking for its output file
# we rely on rank0 holding off its abort long enough to dump the debug info
self.assertEqual(self._wait_process(0, timeout=90), -6)
- with open(self._trace_name(rank=0), "rb") as f:
+ with open(self._trace_name(rank=0), 'rb') as f:
t = pickle.load(f)
- t = t["entries"]
+ t = t['entries']
self.assertEqual(len(t), 2)
- self.assertEqual(t[0]["seq_id"], 1)
- self.assertEqual(t[0]["state"], "completed")
- self.assertEqual(t[1]["seq_id"], 2)
- self.assertEqual(
- t[1]["state"], self.started_or_scheduled(timing_enabled)
- )
+ self.assertEqual(t[0]['seq_id'], 1)
+ self.assertEqual(t[0]['state'], 'completed')
+ self.assertEqual(t[1]['seq_id'], 2)
+ self.assertEqual(t[1]['state'], self.started_or_scheduled(timing_enabled))
self.assertFalse(os.path.exists(self._trace_name(rank=1)))
@@ -4843,12 +4648,10 @@ class NCCLTraceTestDumpOnTimeout(NCCLTraceTestDumpOnTimeoutBase):
# rank 0 will crash before it passes the sync, but rank1 will exit quickly and cleanly
torch.cuda.synchronize()
-
instantiate_parametrized_tests(ProcessGroupNCCLTest)
instantiate_parametrized_tests(NCCLTraceTestDumpOnTimeout)
instantiate_parametrized_tests(NCCLTraceTest)
-
class NCCLTraceTestTimeoutDumpOnStuckRanks(NCCLTraceTestDumpOnTimeoutBase):
def _check_return_codes(self, elapsed_time):
# the base test infra assumes processes exit with matching return codes,
@@ -4860,9 +4663,9 @@ class NCCLTraceTestTimeoutDumpOnStuckRanks(NCCLTraceTestDumpOnTimeoutBase):
@skip_but_pass_in_sandcastle_if(not TEST_MULTIGPU, "NCCL test requires 2+ GPUs")
def test_timeout_dumps_on_stuck_ranks(self):
# need rank0 to crash quicker after detecting timeout
- os.environ["TORCH_NCCL_HEARTBEAT_TIMEOUT_SEC"] = "1"
+ os.environ['TORCH_NCCL_HEARTBEAT_TIMEOUT_SEC'] = '1'
# restore this env var to its prior default in case another test changed it
- os.environ["TORCH_NCCL_COORD_CHECK_MILSEC"] = "1000"
+ os.environ['TORCH_NCCL_COORD_CHECK_MILSEC'] = '1000'
if self.rank == self.MAIN_PROCESS_RANK:
# wait for both rank0 and 1 to crash before looking for both ranks' output
@@ -4871,16 +4674,16 @@ class NCCLTraceTestTimeoutDumpOnStuckRanks(NCCLTraceTestDumpOnTimeoutBase):
self.assertEqual(self._wait_process(1, timeout=90), -6)
self.assertTrue(os.path.exists(self._trace_name(rank=1)))
self.assertTrue(os.path.exists(self._trace_name(rank=0)))
- with open(self._trace_name(rank=0), "rb") as f:
+ with open(self._trace_name(rank=0), 'rb') as f:
t = pickle.load(f)
- t = t["entries"]
+ t = t['entries']
self.assertEqual(len(t), 2)
- with open(self._trace_name(rank=1), "rb") as f:
+ with open(self._trace_name(rank=1), 'rb') as f:
t = pickle.load(f)
- t = t["entries"]
+ t = t['entries']
self.assertEqual(len(t), 1)
- self.assertEqual(t[0]["seq_id"], 1)
- self.assertEqual(t[0]["state"], "completed")
+ self.assertEqual(t[0]['seq_id'], 1)
+ self.assertEqual(t[0]['state'], 'completed')
return
pg = self._create_process_group_nccl()
@@ -4900,7 +4703,6 @@ class NCCLTraceTestTimeoutDumpOnStuckRanks(NCCLTraceTestDumpOnTimeoutBase):
# getting the global signal to dump the debugging info.
time.sleep(600)
-
class NcclErrorDumpTest(NCCLTraceTestBase):
def _wait_process(self, rank, timeout):
try:
@@ -4921,10 +4723,10 @@ class NcclErrorDumpTest(NCCLTraceTestBase):
@skip_if_rocm
def test_nccl_errors_dump(self):
os.environ["TORCH_NCCL_ASYNC_ERROR_HANDLING"] = "1"
- os.environ["TORCH_NCCL_TRACE_BUFFER_SIZE"] = "1000"
- os.environ["TORCH_NCCL_DUMP_ON_TIMEOUT"] = "1"
+ os.environ["TORCH_NCCL_TRACE_BUFFER_SIZE"] = '1000'
+ os.environ["TORCH_NCCL_DUMP_ON_TIMEOUT"] = '1'
# need rank0 to dump before abort
- os.environ["TORCH_NCCL_HEARTBEAT_TIMEOUT_SEC"] = "5"
+ os.environ['TORCH_NCCL_HEARTBEAT_TIMEOUT_SEC'] = '5'
if self.rank == self.MAIN_PROCESS_RANK:
# wait for both rank0 and 1 to crash before looking for dump
diff --git a/test/distributed/test_c10d_object_collectives.py b/test/distributed/test_c10d_object_collectives.py
index f8a4731f47..aadd3b2f5f 100644
--- a/test/distributed/test_c10d_object_collectives.py
+++ b/test/distributed/test_c10d_object_collectives.py
@@ -2,7 +2,7 @@
import os
import sys
-from functools import partial, wraps
+from functools import wraps, partial
import torch
import torch.distributed as dist
@@ -11,21 +11,23 @@ if not dist.is_available():
print("Distributed not available, skipping tests", file=sys.stderr)
sys.exit(0)
-from torch.testing._internal.common_distributed import MultiProcessTestCase, TEST_SKIPS
+from torch.testing._internal.common_distributed import (
+ MultiProcessTestCase,
+ TEST_SKIPS
+)
-from torch.testing._internal.common_utils import run_tests, TEST_WITH_DEV_DBG_ASAN
+from torch.testing._internal.common_utils import (
+ run_tests,
+ TEST_WITH_DEV_DBG_ASAN,
+)
if TEST_WITH_DEV_DBG_ASAN:
- print(
- "Skip dev-asan as torch + multiprocessing spawn have known issues",
- file=sys.stderr,
- )
+ print("Skip dev-asan as torch + multiprocessing spawn have known issues", file=sys.stderr)
sys.exit(0)
BACKEND = dist.Backend.NCCL if torch.cuda.is_available() else dist.Backend.GLOO
WORLD_SIZE = min(4, max(2, torch.cuda.device_count()))
-
def with_comms(func=None):
if func is None:
return partial(
@@ -39,10 +41,8 @@ def with_comms(func=None):
self.dist_init()
func(self)
self.destroy_comms()
-
return wrapper
-
class TestObjectCollectives(MultiProcessTestCase):
def setUp(self):
super().setUp()
@@ -52,11 +52,8 @@ class TestObjectCollectives(MultiProcessTestCase):
@property
def device(self):
- return (
- torch.device(self.rank)
- if BACKEND == dist.Backend.NCCL
+ return torch.device(self.rank) if BACKEND == dist.Backend.NCCL \
else torch.device("cpu")
- )
@property
def world_size(self):
@@ -86,7 +83,9 @@ class TestObjectCollectives(MultiProcessTestCase):
@with_comms()
def test_all_gather_object(self):
output = [None] * dist.get_world_size()
- dist.all_gather_object(object_list=output, obj=self.rank)
+ dist.all_gather_object(
+ object_list=output,
+ obj=self.rank)
for i, v in enumerate(output):
self.assertEqual(i, v, f"rank: {self.rank}")
@@ -94,12 +93,15 @@ class TestObjectCollectives(MultiProcessTestCase):
@with_comms()
def test_gather_object(self):
output = [None] * dist.get_world_size() if self.rank == 0 else None
- dist.gather_object(obj=self.rank, object_gather_list=output)
+ dist.gather_object(
+ obj=self.rank,
+ object_gather_list=output)
if self.rank == 0:
for i, v in enumerate(output):
self.assertEqual(i, v, f"rank: {self.rank}")
+
@with_comms()
def test_broadcast_object_list(self):
val = 99 if self.rank == 0 else None
@@ -114,8 +116,8 @@ class TestObjectCollectives(MultiProcessTestCase):
input_list = list(range(dist.get_world_size())) if self.rank == 0 else None
output_list = [None]
dist.scatter_object_list(
- scatter_object_output_list=output_list, scatter_object_input_list=input_list
- )
+ scatter_object_output_list=output_list,
+ scatter_object_input_list=input_list)
self.assertEqual(self.rank, output_list[0])
@@ -159,6 +161,5 @@ class TestObjectCollectives(MultiProcessTestCase):
dist.broadcast_object_list(out_list, src=ranks[0], group=my_pg)
self.assertEqual(ranks[0], out_list[0])
-
if __name__ == "__main__":
run_tests()
diff --git a/test/distributed/test_c10d_pypg.py b/test/distributed/test_c10d_pypg.py
index 1d6743f038..32f3359185 100644
--- a/test/distributed/test_c10d_pypg.py
+++ b/test/distributed/test_c10d_pypg.py
@@ -1,26 +1,27 @@
# Owner(s): ["oncall: distributed"]
import os
-import weakref
-
-import test_c10d_common
import torch
import torch.distributed as dist
-import torch.nn as nn
-from torch._C._distributed_c10d import _create_work_from_future
+from torch.testing._internal.common_utils import (
+ run_tests,
+)
from torch.futures import Future
+import torch.nn as nn
from torch.nn.parallel import DistributedDataParallel as DDP
-from torch.testing._internal.common_distributed import MultiProcessTestCase
-from torch.testing._internal.common_utils import run_tests
-
+import test_c10d_common
+import weakref
+from torch._C._distributed_c10d import _create_work_from_future
+from torch.testing._internal.common_distributed import (
+ MultiProcessTestCase,
+)
def create_work(result):
future = Future()
future.set_result(result)
return _create_work_from_future(future)
-
class MyWork(dist._Work):
def __init__(self, result, pg):
super().__init__()
@@ -37,12 +38,10 @@ class MyWork(dist._Work):
self.pg_().get_future_count += 1
return self.future_
-
class LonelyRankProcessGroup(dist.ProcessGroup):
"""
This PG only supports world_size of 1
"""
-
def __init__(self, rank, world, use_wrapper):
super().__init__(rank, world)
assert rank == 0
@@ -89,7 +88,6 @@ class LonelyRankProcessGroup(dist.ProcessGroup):
def __repr__(self):
return f"PLG w:{self._world} r:{self._rank}"
-
# We cannot use parametrize as some tests are defined on the base class and use _get_process_group
class AbstractDDPSingleRank(test_c10d_common.CommonDistributedDataParallelTest):
def setUp(self):
@@ -114,7 +112,10 @@ class AbstractDDPSingleRank(test_c10d_common.CommonDistributedDataParallelTest):
pg = self._get_process_group()
torch.manual_seed(123)
- model = nn.Sequential(nn.Linear(2, 2), nn.ReLU())
+ model = nn.Sequential(
+ nn.Linear(2, 2),
+ nn.ReLU()
+ )
wrapped_model = model
input_tensor = torch.rand(2)
model = DDP(model, process_group=pg)
@@ -137,22 +138,17 @@ class AbstractDDPSingleRank(test_c10d_common.CommonDistributedDataParallelTest):
def test_ddp_with_pypg_with_grad_views(self):
pg = self._get_process_group()
- self._test_ddp_with_process_group(
- pg, [torch.device("cpu")], device_ids=None, gradient_as_bucket_view=True
- )
-
+ self._test_ddp_with_process_group(pg, [torch.device("cpu")], device_ids=None, gradient_as_bucket_view=True)
class TestDDPWithWorkSubclass(AbstractDDPSingleRank, MultiProcessTestCase):
@property
def use_wrapper(self):
return False
-
class TestDDPWithWorkWrapper(AbstractDDPSingleRank, MultiProcessTestCase):
@property
def use_wrapper(self):
return True
-
-if __name__ == "__main__":
+if __name__ == '__main__':
run_tests()
diff --git a/test/distributed/test_c10d_spawn.py b/test/distributed/test_c10d_spawn.py
index 97dc628c8a..a6fb33d4ea 100644
--- a/test/distributed/test_c10d_spawn.py
+++ b/test/distributed/test_c10d_spawn.py
@@ -7,8 +7,10 @@ import tempfile
import torch
import torch.distributed as c10d
import torch.multiprocessing as mp
-from torch.testing._internal.common_distributed import MultiProcessTestCase
-from torch.testing._internal.common_utils import load_tests, NO_MULTIPROCESSING_SPAWN
+from torch.testing._internal.common_distributed import \
+ MultiProcessTestCase
+from torch.testing._internal.common_utils import load_tests, \
+ NO_MULTIPROCESSING_SPAWN
# Torch distributed.nn is not available in windows
# check #42095, it errors on import.
@@ -23,11 +25,11 @@ except ImportError:
load_tests = load_tests
if not c10d.is_available():
- print("c10d not available, skipping tests", file=sys.stderr)
+ print('c10d not available, skipping tests', file=sys.stderr)
sys.exit(0)
if NO_MULTIPROCESSING_SPAWN:
- print("spawn not available, skipping tests", file=sys.stderr)
+ print('spawn not available, skipping tests', file=sys.stderr)
sys.exit(0)
@@ -38,14 +40,14 @@ class AbstractProcessGroupShareTensorTest:
ws = self.world_size
# file store will delete the test file on destruction
file = tempfile.NamedTemporaryFile(delete=False)
- ctx = mp.get_context("spawn")
+ ctx = mp.get_context('spawn')
c2p = ctx.Queue(2)
p2c = ctx.Queue(2)
ps = []
for i in range(ws):
p = ctx.Process(
- target=f, args=(i, file.name, shared_tensors, ws, init_pg, c2p, p2c)
- )
+ target=f,
+ args=(i, file.name, shared_tensors, ws, init_pg, c2p, p2c))
p.start()
ps.append(p)
@@ -55,7 +57,7 @@ class AbstractProcessGroupShareTensorTest:
self.assertEqual(
expected,
result,
- msg=f"Expect rank {pid} to receive tensor {expected} but got {result}.",
+ msg=f"Expect rank {pid} to receive tensor {expected} but got {result}."
)
for _ in range(ws):
@@ -68,8 +70,7 @@ class AbstractProcessGroupShareTensorTest:
# spawn mode. See https://bugs.python.org/issue33884.
@classmethod
def _test_broadcast_process(
- cls, rank, filename, shared_tensors, world_size, init_pg, c2p, p2c
- ):
+ cls, rank, filename, shared_tensors, world_size, init_pg, c2p, p2c):
pg = init_pg(rank, filename, world_size)
xs = [shared_tensors[rank]]
pg.broadcast(xs).wait()
@@ -78,8 +79,7 @@ class AbstractProcessGroupShareTensorTest:
@classmethod
def _test_allreduce_process(
- cls, rank, filename, shared_tensors, world_size, init_pg, c2p, p2c
- ):
+ cls, rank, filename, shared_tensors, world_size, init_pg, c2p, p2c):
pg = init_pg(rank, filename, world_size)
xs = [shared_tensors[rank]]
pg.allreduce(xs, op=c10d.ReduceOp.SUM).wait()
@@ -88,8 +88,7 @@ class AbstractProcessGroupShareTensorTest:
@classmethod
def _test_allgather_process(
- cls, rank, filename, shared_tensors, world_size, init_pg, c2p, p2c
- ):
+ cls, rank, filename, shared_tensors, world_size, init_pg, c2p, p2c):
pg = init_pg(rank, filename, world_size)
xs = [shared_tensors[rank]]
ys = [[torch.zeros_like(xs[0]) for i in range(world_size)]]
diff --git a/test/distributed/test_c10d_spawn_gloo.py b/test/distributed/test_c10d_spawn_gloo.py
index 95897a2938..70009e9eb3 100644
--- a/test/distributed/test_c10d_spawn_gloo.py
+++ b/test/distributed/test_c10d_spawn_gloo.py
@@ -11,29 +11,19 @@ import torch.distributed as c10d
import torch.nn as nn
from test_c10d_spawn import _torch_dist_nn_available, TestDistributedNNFunctions
from torch.testing._internal.common_cuda import TEST_CUDA, TEST_MULTIGPU
-from torch.testing._internal.common_distributed import (
- create_device,
- requires_gloo,
- skip_if_lt_x_gpu,
-)
-from torch.testing._internal.common_utils import (
- run_tests,
- skip_but_pass_in_sandcastle_if,
- TEST_WITH_DEV_DBG_ASAN,
- TestCase,
-)
+from torch.testing._internal.common_distributed import requires_gloo, \
+ create_device, skip_if_lt_x_gpu
+from torch.testing._internal.common_utils import TestCase, run_tests, skip_but_pass_in_sandcastle_if, TEST_WITH_DEV_DBG_ASAN
# Fails on Python-3.9, see https://github.com/pytorch/pytorch/issues/51619
if sys.version_info < (3, 9):
+ class ProcessGroupShareTensorTest(test_c10d_spawn.AbstractProcessGroupShareTensorTest, TestCase):
- class ProcessGroupShareTensorTest(
- test_c10d_spawn.AbstractProcessGroupShareTensorTest, TestCase
- ):
@classmethod
def opts(cls, threads=2):
opts = c10d.ProcessGroupGloo._Options()
opts._timeout = 5.0
- opts._devices = [create_device(interface="lo")]
+ opts._devices = [create_device(interface='lo')]
opts._threads = threads
return opts
@@ -41,59 +31,42 @@ if sys.version_info < (3, 9):
def _init_pg_gloo(cls, rank, filename, world_size):
store = c10d.FileStore(filename, world_size)
backend = c10d.ProcessGroupGloo(
- store, rank, world_size, ProcessGroupShareTensorTest.opts()
- )
+ store, rank, world_size, ProcessGroupShareTensorTest.opts())
# set process group backends manually
- c10d.init_process_group(
- backend="gloo", store=store, rank=rank, world_size=world_size
- )
+ c10d.init_process_group(backend="gloo", store=store, rank=rank, world_size=world_size)
pg = c10d.distributed_c10d._get_default_group()
- pg._register_backend(
- torch.device("cpu"), c10d.ProcessGroup.BackendType.GLOO, backend
- )
- pg._register_backend(
- torch.device("cuda"), c10d.ProcessGroup.BackendType.GLOO, backend
- )
+ pg._register_backend(torch.device("cpu"), c10d.ProcessGroup.BackendType.GLOO, backend)
+ pg._register_backend(torch.device("cuda"), c10d.ProcessGroup.BackendType.GLOO, backend)
return pg
- @skip_but_pass_in_sandcastle_if(
- not TEST_MULTIGPU, "At least 2 CUDA GPUS needed"
- )
+ @skip_but_pass_in_sandcastle_if(not TEST_MULTIGPU, "At least 2 CUDA GPUS needed")
def test_shared_broadcast_gloo(self):
self._test_multiprocess(
ProcessGroupShareTensorTest._test_broadcast_process,
[torch.ones(2, 2).to(i) * i for i in range(self.world_size)],
ProcessGroupShareTensorTest._init_pg_gloo,
- 1,
- )
+ 1)
- @skip_but_pass_in_sandcastle_if(
- not TEST_MULTIGPU, "At least 2 CUDA GPUS needed"
- )
+ @skip_but_pass_in_sandcastle_if(not TEST_MULTIGPU, "At least 2 CUDA GPUS needed")
def test_shared_allreduce_gloo(self):
self._test_multiprocess(
ProcessGroupShareTensorTest._test_allreduce_process,
[torch.ones(2, 2).to(i) for i in range(self.world_size)],
ProcessGroupShareTensorTest._init_pg_gloo,
- 1,
- )
+ 1)
- @skip_but_pass_in_sandcastle_if(
- not TEST_MULTIGPU, "At least 2 CUDA GPUS needed"
- )
+ @skip_but_pass_in_sandcastle_if(not TEST_MULTIGPU, "At least 2 CUDA GPUS needed")
def test_shared_allgather_gloo(self):
self._test_multiprocess(
ProcessGroupShareTensorTest._test_allgather_process,
[torch.ones(2, 2).to(i) * i for i in range(self.world_size)],
ProcessGroupShareTensorTest._init_pg_gloo,
- self.world_size,
- )
+ self.world_size)
@classmethod
def _test_allgather_chunk_process(
- cls, rank, filename, shared_tensor, world_size, init_pg, c2p, p2c
- ):
+ cls, rank, filename, shared_tensor, world_size, init_pg, c2p, p2c):
pg = init_pg(rank, filename, world_size)
chunks = torch.chunk(shared_tensor, world_size, dim=0)
x = chunks[rank]
@@ -103,16 +76,13 @@ if sys.version_info < (3, 9):
c2p.put((rank, chunks[1].to("cpu"), ys[1].to("cpu")))
p2c.get()
- @skip_but_pass_in_sandcastle_if(
- not TEST_MULTIGPU, "At least 2 CUDA GPUS needed"
- )
+ @skip_but_pass_in_sandcastle_if(not TEST_MULTIGPU, "At least 2 CUDA GPUS needed")
def test_shared_allgather_chunk_gloo(self):
self._test_multiprocess(
ProcessGroupShareTensorTest._test_allgather_chunk_process,
torch.tensor(range(4)).reshape(2, 2),
ProcessGroupShareTensorTest._init_pg_gloo,
- self.world_size,
- )
+ self.world_size)
class DistributedDataParallelSingleProcessTest(TestCase):
@@ -129,9 +99,7 @@ class DistributedDataParallelSingleProcessTest(TestCase):
def _test_base(self, net, inp, check_allclose=True):
store = c10d.FileStore(self.file.name, self.world_size)
- c10d.init_process_group(
- backend="gloo", store=store, rank=self.rank, world_size=self.world_size
- )
+ c10d.init_process_group(backend="gloo", store=store, rank=self.rank, world_size=self.world_size)
process_group = c10d.distributed_c10d._get_default_group()
if inp[0].is_cuda:
device_ids = [torch.cuda.current_device()]
@@ -139,7 +107,9 @@ class DistributedDataParallelSingleProcessTest(TestCase):
device_ids = None
ddp = nn.parallel.DistributedDataParallel(
- copy.deepcopy(net), device_ids=device_ids, process_group=process_group
+ copy.deepcopy(net),
+ device_ids=device_ids,
+ process_group=process_group
)
net_opt = torch.optim.Adam(net.parameters(), lr=0.001)
@@ -191,9 +161,7 @@ class DistributedDataParallelSingleProcessTest(TestCase):
self.output_dim = output_dim
self.hidden_layers = hidden_layers
- self.lstm = nn.LSTM(
- input_dim, hidden_dim, hidden_layers, batch_first=True
- )
+ self.lstm = nn.LSTM(input_dim, hidden_dim, hidden_layers, batch_first=True)
self.h2o = nn.Linear(hidden_dim, output_dim)
def forward(self, x, y):
@@ -206,7 +174,7 @@ class DistributedDataParallelSingleProcessTest(TestCase):
net = Net(INPUT_DIM, HIDDEN_DIM, OUTPUT_DIM, N_LAYERS).to(0)
inp = [
torch.randn((BATCH_SIZE, SEQ_LEN, INPUT_DIM)).to(0),
- torch.rand((BATCH_SIZE, SEQ_LEN, OUTPUT_DIM)).to(0),
+ torch.rand((BATCH_SIZE, SEQ_LEN, OUTPUT_DIM)).to(0)
]
# Not checking result allclose as the parameter inconsistency exist
@@ -216,70 +184,53 @@ class DistributedDataParallelSingleProcessTest(TestCase):
# Skip dev-asan as torch + multiprocessing spawn have known issues
if not TEST_WITH_DEV_DBG_ASAN:
-
class TestDistributedNNFunctionsGloo(TestDistributedNNFunctions):
# Test Common Ops First.
@requires_gloo()
@skip_if_lt_x_gpu(2)
- @skip_but_pass_in_sandcastle_if(
- not _torch_dist_nn_available, "torch.distributed.nn is not available"
- )
+ @skip_but_pass_in_sandcastle_if(not _torch_dist_nn_available, "torch.distributed.nn is not available")
def test_broadcast(self):
self._test_broadcast("gloo")
@requires_gloo()
@skip_if_lt_x_gpu(2)
- @skip_but_pass_in_sandcastle_if(
- not _torch_dist_nn_available, "torch.distributed.nn is not available"
- )
+ @skip_but_pass_in_sandcastle_if(not _torch_dist_nn_available, "torch.distributed.nn is not available")
def test_reduce(self):
self._test_reduce("gloo")
@requires_gloo()
@skip_if_lt_x_gpu(2)
- @skip_but_pass_in_sandcastle_if(
- not _torch_dist_nn_available, "torch.distributed.nn is not available"
- )
+ @skip_but_pass_in_sandcastle_if(not _torch_dist_nn_available, "torch.distributed.nn is not available")
def test_allreduce(self):
self._test_allreduce("gloo")
@requires_gloo()
@skip_if_lt_x_gpu(2)
- @skip_but_pass_in_sandcastle_if(
- not _torch_dist_nn_available, "torch.distributed.nn is not available"
- )
+ @skip_but_pass_in_sandcastle_if(not _torch_dist_nn_available, "torch.distributed.nn is not available")
def test_all_gather(self):
self._test_all_gather("gloo")
@requires_gloo()
@skip_if_lt_x_gpu(2)
- @skip_but_pass_in_sandcastle_if(
- not _torch_dist_nn_available, "torch.distributed.nn is not available"
- )
+ @skip_but_pass_in_sandcastle_if(not _torch_dist_nn_available, "torch.distributed.nn is not available")
def test_all_to_all(self):
self._test_all_to_all("gloo")
@requires_gloo()
@skip_if_lt_x_gpu(2)
- @skip_but_pass_in_sandcastle_if(
- not _torch_dist_nn_available, "torch.distributed.nn is not available"
- )
+ @skip_but_pass_in_sandcastle_if(not _torch_dist_nn_available, "torch.distributed.nn is not available")
def test_all_to_all_single(self):
self._test_all_to_all_single("gloo")
# Test Ops only supported in GLOO.
@requires_gloo()
@skip_if_lt_x_gpu(2)
- @skip_but_pass_in_sandcastle_if(
- not _torch_dist_nn_available, "torch.distributed.nn is not available"
- )
+ @skip_but_pass_in_sandcastle_if(not _torch_dist_nn_available, "torch.distributed.nn is not available")
def test_gather(self):
store = c10d.FileStore(self.file_name, self.world_size)
# This is required because these functions calls directly to the .dist and needs
# the world to be initialized
- c10d.init_process_group(
- store=store, rank=self.rank, world_size=self.world_size, backend="gloo"
- )
+ c10d.init_process_group(store=store, rank=self.rank, world_size=self.world_size, backend='gloo')
device = torch.device(f"cuda:{self.rank}")
x = torch.ones(5, 5, device=device) + self.rank
x.requires_grad = True
@@ -301,16 +252,12 @@ if not TEST_WITH_DEV_DBG_ASAN:
@requires_gloo()
@skip_if_lt_x_gpu(2)
- @skip_but_pass_in_sandcastle_if(
- not _torch_dist_nn_available, "torch.distributed.nn is not available"
- )
+ @skip_but_pass_in_sandcastle_if(not _torch_dist_nn_available, "torch.distributed.nn is not available")
def test_scatter(self):
store = c10d.FileStore(self.file_name, self.world_size)
# This is required because these functions calls directly to the .dist and needs
# the world to be initialized
- c10d.init_process_group(
- store=store, rank=self.rank, world_size=self.world_size, backend="gloo"
- )
+ c10d.init_process_group(store=store, rank=self.rank, world_size=self.world_size, backend='gloo')
device = torch.device(f"cuda:{self.rank}")
x0 = torch.ones(5, 5, device=device)
x1 = torch.ones(5, 5, device=device) + 1
@@ -335,5 +282,5 @@ if not TEST_WITH_DEV_DBG_ASAN:
self.assertEqual(x0.grad, torch.zeros(5, 5, device=device))
-if __name__ == "__main__":
+if __name__ == '__main__':
run_tests()
diff --git a/test/distributed/test_c10d_spawn_nccl.py b/test/distributed/test_c10d_spawn_nccl.py
index 67d6fae242..b543b80032 100644
--- a/test/distributed/test_c10d_spawn_nccl.py
+++ b/test/distributed/test_c10d_spawn_nccl.py
@@ -1,18 +1,20 @@
# Owner(s): ["oncall: distributed"]
import sys
-
import test_c10d_spawn
import torch
import torch.distributed as c10d
from test_c10d_spawn import _torch_dist_nn_available, TestDistributedNNFunctions
from torch.testing._internal.common_cuda import TEST_MULTIGPU
-from torch.testing._internal.common_distributed import requires_nccl, skip_if_lt_x_gpu
+from torch.testing._internal.common_distributed import (
+ requires_nccl,
+ skip_if_lt_x_gpu,
+)
from torch.testing._internal.common_utils import (
+ TestCase,
run_tests,
skip_but_pass_in_sandcastle_if,
TEST_WITH_DEV_DBG_ASAN,
- TestCase,
)
NO_NCCL = not hasattr(c10d, "ProcessGroupNCCL")
@@ -28,9 +30,7 @@ if sys.version_info < (3, 9):
store = c10d.FileStore(filename, world_size)
return c10d.ProcessGroupNCCL(store, rank, world_size)
- @skip_but_pass_in_sandcastle_if(
- not TEST_MULTIGPU, "At least 2 CUDA GPUS needed"
- )
+ @skip_but_pass_in_sandcastle_if(not TEST_MULTIGPU, "At least 2 CUDA GPUS needed")
@skip_but_pass_in_sandcastle_if(NO_NCCL, "NCCL needed")
def test_shared_broadcast_nccl(self):
self._test_multiprocess(
@@ -40,9 +40,7 @@ if sys.version_info < (3, 9):
1,
)
- @skip_but_pass_in_sandcastle_if(
- not TEST_MULTIGPU, "At least 2 CUDA GPUS needed"
- )
+ @skip_but_pass_in_sandcastle_if(not TEST_MULTIGPU, "At least 2 CUDA GPUS needed")
@skip_but_pass_in_sandcastle_if(NO_NCCL, "NCCL needed")
def test_shared_allreduce_nccl(self):
self._test_multiprocess(
@@ -65,9 +63,7 @@ if sys.version_info < (3, 9):
c2p.put((rank, torch.ones(2, 2), x.to("cpu")))
p2c.get()
- @skip_but_pass_in_sandcastle_if(
- not TEST_MULTIGPU, "At least 2 CUDA GPUS needed"
- )
+ @skip_but_pass_in_sandcastle_if(not TEST_MULTIGPU, "At least 2 CUDA GPUS needed")
@skip_but_pass_in_sandcastle_if(NO_NCCL, "NCCL needed")
def test_shared_reduce_nccl(self):
self._test_multiprocess(
@@ -77,9 +73,7 @@ if sys.version_info < (3, 9):
1,
)
- @skip_but_pass_in_sandcastle_if(
- not TEST_MULTIGPU, "At least 2 CUDA GPUS needed"
- )
+ @skip_but_pass_in_sandcastle_if(not TEST_MULTIGPU, "At least 2 CUDA GPUS needed")
@skip_but_pass_in_sandcastle_if(NO_NCCL, "NCCL needed")
def test_shared_allgather_nccl(self):
self._test_multiprocess(
@@ -105,66 +99,50 @@ if not TEST_WITH_DEV_DBG_ASAN:
@requires_nccl()
@skip_if_lt_x_gpu(2)
- @skip_but_pass_in_sandcastle_if(
- not _torch_dist_nn_available, "torch.distributed.nn is not available"
- )
+ @skip_but_pass_in_sandcastle_if(not _torch_dist_nn_available, "torch.distributed.nn is not available")
def test_reduce(self):
self._test_reduce("nccl")
@requires_nccl()
@skip_if_lt_x_gpu(2)
- @skip_but_pass_in_sandcastle_if(
- not _torch_dist_nn_available, "torch.distributed.nn is not available"
- )
+ @skip_but_pass_in_sandcastle_if(not _torch_dist_nn_available, "torch.distributed.nn is not available")
def test_allreduce(self):
self._test_allreduce("nccl")
@requires_nccl()
@skip_if_lt_x_gpu(2)
- @skip_but_pass_in_sandcastle_if(
- not _torch_dist_nn_available, "torch.distributed.nn is not available"
- )
+ @skip_but_pass_in_sandcastle_if(not _torch_dist_nn_available, "torch.distributed.nn is not available")
def test_all_gather(self):
self._test_all_gather("nccl")
@requires_nccl()
@skip_if_lt_x_gpu(2)
- @skip_but_pass_in_sandcastle_if(
- not _torch_dist_nn_available, "torch.distributed.nn is not available"
- )
+ @skip_but_pass_in_sandcastle_if(not _torch_dist_nn_available, "torch.distributed.nn is not available")
def test_all_to_all(self):
self._test_all_to_all("nccl")
@requires_nccl()
@skip_if_lt_x_gpu(2)
- @skip_but_pass_in_sandcastle_if(
- not _torch_dist_nn_available, "torch.distributed.nn is not available"
- )
+ @skip_but_pass_in_sandcastle_if(not _torch_dist_nn_available, "torch.distributed.nn is not available")
def test_all_to_all_single(self):
self._test_all_to_all_single("nccl")
# Test Ops only supported in NCCL.
@requires_nccl()
@skip_if_lt_x_gpu(2)
- @skip_but_pass_in_sandcastle_if(
- not _torch_dist_nn_available, "torch.distributed.nn is not available"
- )
+ @skip_but_pass_in_sandcastle_if(not _torch_dist_nn_available, "torch.distributed.nn is not available")
def test_reduce_scatter(self):
store = c10d.FileStore(self.file_name, self.world_size)
# This is required because these functions calls directly to the .dist and needs
# the world to be initialized
- c10d.init_process_group(
- store=store, rank=self.rank, world_size=self.world_size, backend="nccl"
- )
+ c10d.init_process_group(store=store, rank=self.rank, world_size=self.world_size, backend='nccl')
device = torch.device(f"cuda:{self.rank}")
x0 = torch.ones(5, 5, device=device) + self.rank
x1 = torch.ones(5, 5, device=device) + self.rank + 1
x0.requires_grad = True
x1.requires_grad = True
y = torch.empty_like(x0)
- expected = (
- 1 + self.world_size
- ) * self.world_size / 2 + self.world_size * self.rank
+ expected = (1 + self.world_size) * self.world_size / 2 + self.world_size * self.rank
y = torch.distributed.nn.reduce_scatter(y, [x0, x1])
self.assertEqual(y, torch.ones(5, 5, device=device) * expected)
z = y.sin().sum()
@@ -178,19 +156,16 @@ if not TEST_WITH_DEV_DBG_ASAN:
@requires_nccl()
@skip_if_lt_x_gpu(2)
- @skip_but_pass_in_sandcastle_if(
- not _torch_dist_nn_available, "torch.distributed.nn is not available"
- )
+ @skip_but_pass_in_sandcastle_if(not _torch_dist_nn_available, "torch.distributed.nn is not available")
def test_reduce_scatter_non_contiguous(self):
store = c10d.FileStore(self.file_name, self.world_size)
# This is required because these functions calls directly to the .dist and needs
# the world to be initialized
- c10d.init_process_group(
- store=store, rank=self.rank, world_size=self.world_size, backend="nccl"
- )
+ c10d.init_process_group(store=store, rank=self.rank, world_size=self.world_size, backend='nccl')
device = torch.device(f"cuda:{self.rank}")
class NonContiguousGrad(torch.autograd.Function):
+
@staticmethod
def forward(ctx, input):
return input
@@ -209,14 +184,10 @@ if not TEST_WITH_DEV_DBG_ASAN:
@requires_nccl()
@skip_if_lt_x_gpu(2)
- @skip_but_pass_in_sandcastle_if(
- not _torch_dist_nn_available, "torch.distributed.nn is not available"
- )
+ @skip_but_pass_in_sandcastle_if(not _torch_dist_nn_available, "torch.distributed.nn is not available")
def test_all_gather_base(self):
store = c10d.FileStore(self.file_name, self.world_size)
- c10d.init_process_group(
- store=store, rank=self.rank, world_size=self.world_size, backend="nccl"
- )
+ c10d.init_process_group(store=store, rank=self.rank, world_size=self.world_size, backend='nccl')
device = torch.device(f"cuda:{self.rank}")
x = torch.ones(5, 5, device=device) + self.rank
@@ -227,10 +198,7 @@ if not TEST_WITH_DEV_DBG_ASAN:
self.assertEqual(output.size(), torch.Size((5 * self.world_size, 5)))
for idx in range(self.world_size):
- self.assertEqual(
- output[5 * idx : 5 * (idx + 1)],
- torch.ones(5, 5, device=device) + idx,
- )
+ self.assertEqual(output[5 * idx : 5 * (idx + 1)], torch.ones(5, 5, device=device) + idx)
y = torch.sum(output.view(self.world_size, 5, 5), axis=0)
z = y.sin().sum()
diff --git a/test/distributed/test_c10d_spawn_ucc.py b/test/distributed/test_c10d_spawn_ucc.py
index 81f7ec6210..ecd4bc2230 100644
--- a/test/distributed/test_c10d_spawn_ucc.py
+++ b/test/distributed/test_c10d_spawn_ucc.py
@@ -1,19 +1,21 @@
# Owner(s): ["oncall: distributed"]
import sys
-
import test_c10d_spawn
import torch
import torch.distributed as c10d
from test_c10d_spawn import _torch_dist_nn_available, TestDistributedNNFunctions
from torch.testing._internal.common_cuda import TEST_MULTIGPU
-from torch.testing._internal.common_distributed import requires_ucc, skip_if_lt_x_gpu
+from torch.testing._internal.common_distributed import (
+ requires_ucc,
+ skip_if_lt_x_gpu,
+)
from torch.testing._internal.common_utils import (
+ TestCase,
run_tests,
skip_but_pass_in_sandcastle,
skip_but_pass_in_sandcastle_if,
TEST_WITH_DEV_DBG_ASAN,
- TestCase,
)
NO_UCC = not hasattr(c10d, "ProcessGroupUCC")
@@ -27,14 +29,10 @@ if sys.version_info < (3, 9):
@classmethod
def _init_pg_ucc(cls, rank, filename, world_size):
store = c10d.FileStore(filename, world_size)
- c10d.init_process_group(
- backend="ucc", store=store, rank=rank, world_size=world_size
- )
+ c10d.init_process_group(backend="ucc", store=store, rank=rank, world_size=world_size)
return c10d.distributed_c10d._get_default_group()
- @skip_but_pass_in_sandcastle_if(
- not TEST_MULTIGPU, "At least 2 CUDA GPUS needed"
- )
+ @skip_but_pass_in_sandcastle_if(not TEST_MULTIGPU, "At least 2 CUDA GPUS needed")
@skip_but_pass_in_sandcastle_if(NO_UCC, "UCC needed")
def test_shared_broadcast_ucc(self):
self._test_multiprocess(
@@ -44,9 +42,7 @@ if sys.version_info < (3, 9):
1,
)
- @skip_but_pass_in_sandcastle_if(
- not TEST_MULTIGPU, "At least 2 CUDA GPUS needed"
- )
+ @skip_but_pass_in_sandcastle_if(not TEST_MULTIGPU, "At least 2 CUDA GPUS needed")
@skip_but_pass_in_sandcastle_if(NO_UCC, "UCC needed")
def test_shared_allreduce_ucc(self):
self._test_multiprocess(
@@ -56,9 +52,7 @@ if sys.version_info < (3, 9):
1,
)
- @skip_but_pass_in_sandcastle_if(
- not TEST_MULTIGPU, "At least 2 CUDA GPUS needed"
- )
+ @skip_but_pass_in_sandcastle_if(not TEST_MULTIGPU, "At least 2 CUDA GPUS needed")
@skip_but_pass_in_sandcastle_if(NO_UCC, "UCC needed")
def test_shared_allgather_ucc(self):
self._test_multiprocess(
@@ -84,47 +78,34 @@ if not TEST_WITH_DEV_DBG_ASAN:
@requires_ucc()
@skip_if_lt_x_gpu(2)
- @skip_but_pass_in_sandcastle_if(
- not _torch_dist_nn_available, "torch.distributed.nn is not available"
- )
+ @skip_but_pass_in_sandcastle_if(not _torch_dist_nn_available, "torch.distributed.nn is not available")
def test_reduce(self):
self._test_reduce("ucc")
@requires_ucc()
@skip_if_lt_x_gpu(2)
- @skip_but_pass_in_sandcastle_if(
- not _torch_dist_nn_available, "torch.distributed.nn is not available"
- )
+ @skip_but_pass_in_sandcastle_if(not _torch_dist_nn_available, "torch.distributed.nn is not available")
def test_allreduce(self):
self._test_allreduce("ucc")
@requires_ucc()
@skip_if_lt_x_gpu(2)
- @skip_but_pass_in_sandcastle_if(
- not _torch_dist_nn_available, "torch.distributed.nn is not available"
- )
- @skip_but_pass_in_sandcastle(
- "runs into illegal memory access on first assertEqual check when run locally"
- )
+ @skip_but_pass_in_sandcastle_if(not _torch_dist_nn_available, "torch.distributed.nn is not available")
+ @skip_but_pass_in_sandcastle("runs into illegal memory access on first assertEqual check when run locally")
def test_all_gather(self):
self._test_all_gather("ucc")
@requires_ucc()
@skip_if_lt_x_gpu(2)
- @skip_but_pass_in_sandcastle_if(
- not _torch_dist_nn_available, "torch.distributed.nn is not available"
- )
+ @skip_but_pass_in_sandcastle_if(not _torch_dist_nn_available, "torch.distributed.nn is not available")
def test_all_to_all(self):
self._test_all_to_all("ucc")
@requires_ucc()
@skip_if_lt_x_gpu(2)
- @skip_but_pass_in_sandcastle_if(
- not _torch_dist_nn_available, "torch.distributed.nn is not available"
- )
+ @skip_but_pass_in_sandcastle_if(not _torch_dist_nn_available, "torch.distributed.nn is not available")
def test_all_to_all_single(self):
self._test_all_to_all_single("ucc")
-
if __name__ == "__main__":
run_tests()
diff --git a/test/distributed/test_c10d_ucc.py b/test/distributed/test_c10d_ucc.py
index 75294c90b5..32557f8851 100644
--- a/test/distributed/test_c10d_ucc.py
+++ b/test/distributed/test_c10d_ucc.py
@@ -23,9 +23,9 @@ import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from test_c10d_common import (
gpus_for_rank,
+ Task,
ModuleForDdpCommHook,
SparseGradientModule,
- Task,
)
from torch import nn
from torch.nn.parallel import DistributedDataParallel
@@ -36,10 +36,10 @@ from torch.testing._internal.common_distributed import (
verify_ddp_error_logged,
)
from torch.testing._internal.common_utils import (
- retry_on_connect_failures,
+ TestCase,
run_tests,
+ retry_on_connect_failures,
skip_but_pass_in_sandcastle,
- TestCase,
)
@@ -207,7 +207,7 @@ class ProcessGroupUCCTest(MultiProcessTestCase):
# Single input tests
tests = simple_reduce_tests(self.rank, self.world_size)
- for op, input, expected in tests:
+ for (op, input, expected) in tests:
opts = c10d.AllreduceOptions()
opts.reduceOp = op
tensor = fn(input)
@@ -260,7 +260,7 @@ class ProcessGroupUCCTest(MultiProcessTestCase):
def _test_reduce_basics(self, fn):
pg = self._create_process_group_ucc()
- for op, input, output in simple_reduce_tests(self.rank, self.world_size):
+ for (op, input, output) in simple_reduce_tests(self.rank, self.world_size):
for root in range(self.world_size):
opts = c10d.ReduceOptions()
opts.reduceOp = op
@@ -346,9 +346,7 @@ class DistributedDataParallelTest(
def _get_process_group(self):
store = self._get_store()
- c10d.init_process_group(
- "ucc", store=store, rank=self.rank, world_size=self.world_size
- )
+ c10d.init_process_group("ucc", store=store, rank=self.rank, world_size=self.world_size)
return c10d.distributed_c10d._get_default_group()
def _test_ucc_backend(
@@ -385,9 +383,7 @@ class DistributedDataParallelTest(
# TODO: test_ucc_backend_2gpu_module and test_ucc_backend_4gpu_module
# require broadcast_coalesced which is not supported by ucc currently
- @skip_but_pass_in_sandcastle(
- "requires broadcast coalesced, which is not supported by ucc currently"
- )
+ @skip_but_pass_in_sandcastle("requires broadcast coalesced, which is not supported by ucc currently")
@requires_ucc()
@skip_if_lt_x_gpu(4)
def test_ucc_backend_2gpu_module(self):
@@ -395,9 +391,7 @@ class DistributedDataParallelTest(
devices = [torch.device("cuda:" + str(i)) for i in int_devices]
self._test_ucc_backend(devices, None, multi_device=True)
- @skip_but_pass_in_sandcastle(
- "requires broadcast coalesced, which is not supported by ucc currently"
- )
+ @skip_but_pass_in_sandcastle("requires broadcast coalesced, which is not supported by ucc currently")
@requires_ucc()
@skip_if_lt_x_gpu(8)
def test_ucc_backend_4gpu_module(self):
@@ -652,9 +646,7 @@ class DistributedDataParallelTest(
# Check that the gradients are sparse and identical
vanilla_parameter = next(vanilla_model.parameters())
ddp_parameter = next(ddp_model.parameters())
- self.assertEqual(
- vanilla_parameter.grad.coalesce(), ddp_parameter.grad.coalesce()
- )
+ self.assertEqual(vanilla_parameter.grad.coalesce(), ddp_parameter.grad.coalesce())
@requires_ucc()
@skip_if_lt_x_gpu(2)
@@ -882,9 +874,7 @@ class DistributedDataParallelTest(
ModuleForDdpCommHook(), process_group=process_group
)
- expected_err = (
- "Communication hook: return annotation should be torch.futures.Future"
- )
+ expected_err = "Communication hook: return annotation should be torch.futures.Future"
with self.assertRaisesRegex(
ValueError,
expected_err,
@@ -1045,6 +1035,7 @@ class CommTest(test_c10d_common.AbstractCommTest, MultiProcessTestCase):
class CompilerTest(test_c10d_common.CompilerTest):
+
@property
def world_size(self):
return 2
@@ -1067,11 +1058,15 @@ class CompilerTest(test_c10d_common.CompilerTest):
@skip_if_lt_x_gpu(2)
def test_allgather_work_wait_gpu(self):
- self._test_allgather_work_wait(torch.ones(2, 2, device=self.rank) * self.rank)
+ self._test_allgather_work_wait(
+ torch.ones(2, 2, device=self.rank) * self.rank
+ )
@skip_if_lt_x_gpu(2)
def test_broadcast_work_wait_gpu(self):
- self._test_broadcast_work_wait(torch.ones(2, 2, device=self.rank) * self.rank)
+ self._test_broadcast_work_wait(
+ torch.ones(2, 2, device=self.rank) * self.rank
+ )
@skip_if_lt_x_gpu(2)
def test_nested_comm_tensor_wrapping_gpu(self):
@@ -1091,21 +1086,28 @@ class CompilerTest(test_c10d_common.CompilerTest):
)
def test_allgather_work_wait_cpu(self):
- self._test_allgather_work_wait(torch.ones(2, 2) * self.rank)
+ self._test_allgather_work_wait(
+ torch.ones(2, 2) * self.rank
+ )
def test_broadcast_work_wait_cpu(self):
- self._test_broadcast_work_wait(torch.ones(2, 2) * self.rank)
+ self._test_broadcast_work_wait(
+ torch.ones(2, 2) * self.rank
+ )
def test_nested_comm_tensor_wrapping_cpu(self):
- self._test_nested_comm_tensor_wrapping(torch.ones(2, 2) * self.rank)
+ self._test_nested_comm_tensor_wrapping(
+ torch.ones(2, 2) * self.rank
+ )
def test_consecutive_comm_work_wait_cpu(self):
- self._test_consecutive_comm_work_wait(torch.ones(2, 2) * self.rank)
+ self._test_consecutive_comm_work_wait(
+ torch.ones(2, 2) * self.rank
+ )
-class UccProcessGroupWithDispatchedCollectivesTests(
- test_c10d_common.ProcessGroupWithDispatchedCollectivesTests
-):
+class UccProcessGroupWithDispatchedCollectivesTests(test_c10d_common.ProcessGroupWithDispatchedCollectivesTests):
+
@skip_but_pass_in_sandcastle("Fails on M60")
@requires_ucc()
@skip_if_lt_x_gpu(1)
diff --git a/test/distributed/test_collective_utils.py b/test/distributed/test_collective_utils.py
index 727850680a..3b0c2e0199 100644
--- a/test/distributed/test_collective_utils.py
+++ b/test/distributed/test_collective_utils.py
@@ -6,8 +6,8 @@ import torch.distributed as c10d
from torch.distributed.collective_utils import all_gather, broadcast
from torch.testing._internal.common_distributed import MultiProcessTestCase
-
class TestCollectiveUtils(MultiProcessTestCase):
+
def setUp(self):
super().setUp()
self._spawn_processes()
@@ -26,9 +26,7 @@ class TestCollectiveUtils(MultiProcessTestCase):
Basic unit test for broadcast using a process group of default world size.
"""
store = c10d.FileStore(self.file_name, self.world_size)
- c10d.init_process_group(
- backend="gloo", store=store, rank=self.rank, world_size=self.world_size
- )
+ c10d.init_process_group(backend="gloo", store=store, rank=self.rank, world_size=self.world_size)
pg = c10d.new_group(pg_options=self.opts())
func = mock.MagicMock()
@@ -79,9 +77,7 @@ class TestCollectiveUtils(MultiProcessTestCase):
Basic unit test for all_gather using a process group of default world size.
"""
store = c10d.FileStore(self.file_name, self.world_size)
- c10d.init_process_group(
- backend="gloo", store=store, rank=self.rank, world_size=self.world_size
- )
+ c10d.init_process_group(backend="gloo", store=store, rank=self.rank, world_size=self.world_size)
pg = c10d.new_group(pg_options=self.opts())
func = mock.MagicMock()
@@ -89,9 +85,7 @@ class TestCollectiveUtils(MultiProcessTestCase):
res = all_gather(data_or_fn=func, pg=pg)
func.assert_called_once()
- assert res == list(
- range(self.world_size)
- ), f"Expect res to be list of 0 through {self.world_size} (got {res})"
+ assert res == list(range(self.world_size)), f"Expect res to be list of 0 through {self.world_size} (got {res})"
def test_all_gather_result_no_pg(self) -> None:
"""
diff --git a/test/distributed/test_data_parallel.py b/test/distributed/test_data_parallel.py
index 8e380ca9df..3d88fc3851 100644
--- a/test/distributed/test_data_parallel.py
+++ b/test/distributed/test_data_parallel.py
@@ -1,61 +1,48 @@
# Owner(s): ["oncall: distributed"]
import contextlib
-import functools
import io
-from collections import OrderedDict
from copy import deepcopy
+from collections import OrderedDict
from itertools import product
+import functools
import torch
-import torch.nn.functional as F
-import torch.nn.parallel as dp
from torch import nn
from torch.cuda.amp import autocast
-from torch.testing._internal.common_cuda import TEST_CUDA, TEST_MULTIGPU
-from torch.testing._internal.common_device_type import (
- dtypes,
- instantiate_device_type_tests,
- onlyCUDA,
- skipMeta,
-)
-from torch.testing._internal.common_utils import (
- _assertGradAndGradgradChecks,
- dtype2prec_DONTUSE,
- gradcheck,
- run_tests,
- skip_but_pass_in_sandcastle_if,
- TestCase,
-)
+import torch.nn.parallel as dp
+from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA
+from torch.testing._internal.common_device_type import instantiate_device_type_tests, dtypes, onlyCUDA, skipMeta
+from torch.testing._internal.common_utils import run_tests, TestCase
+from torch.testing._internal.common_utils import _assertGradAndGradgradChecks, gradcheck
+from torch.testing._internal.common_utils import dtype2prec_DONTUSE
+from torch.testing._internal.common_utils import skip_but_pass_in_sandcastle_if
+import torch.nn.functional as F
NO_NCCL = not hasattr(torch.distributed, "ProcessGroupNCCL")
# batched grad doesn't support data parallel
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
-_assertGradAndGradgradChecks = functools.partial(
- _assertGradAndGradgradChecks, check_batched_grad=False
-)
-
+_assertGradAndGradgradChecks = functools.partial(_assertGradAndGradgradChecks, check_batched_grad=False)
class TestDataParallel(TestCase):
+
@skip_but_pass_in_sandcastle_if(not TEST_MULTIGPU, "multi-GPU not supported")
def test_data_parallel_buffers_requiring_grad(self):
class TestModule(nn.Module):
def __init__(self, t):
super().__init__()
- self.register_buffer("t_rg", t)
- self.register_buffer("t_not_rg", t.clone().detach())
+ self.register_buffer('t_rg', t)
+ self.register_buffer('t_not_rg', t.clone().detach())
def forward(self, x):
return x * self.t_rg + self.t_not_rg
- m = TestModule(
- torch.randn(100, device="cuda", requires_grad=True, dtype=torch.double)
- )
+ m = TestModule(torch.randn(100, device='cuda', requires_grad=True, dtype=torch.double))
self.assertTrue(m.t_rg.requires_grad)
dpm = nn.DataParallel(m, [0, 1])
- inp = torch.randn(2, 100, device="cuda", dtype=torch.double)
+ inp = torch.randn(2, 100, device='cuda', dtype=torch.double)
def fn(t):
return dpm(inp)
@@ -64,12 +51,12 @@ class TestDataParallel(TestCase):
@skip_but_pass_in_sandcastle_if(not TEST_MULTIGPU, "multi-GPU not supported")
def test_data_parallel_rnn(self):
+
class TestModule(torch.nn.Module):
+
def __init__(self):
super().__init__()
- self.rnn = torch.nn.LSTM(
- 300, 1024, 1, batch_first=True, bidirectional=True
- )
+ self.rnn = torch.nn.LSTM(300, 1024, 1, batch_first=True, bidirectional=True)
def forward(self, x):
self.rnn.flatten_parameters()
@@ -99,9 +86,8 @@ class TestDataParallel(TestCase):
@skip_but_pass_in_sandcastle_if(not TEST_MULTIGPU, "multi-GPU not supported")
def test_data_parallel_lazy_linear(self):
- with self.assertRaisesRegex(
- ValueError, "Attempted to use an uninitialized parameter"
- ):
+
+ with self.assertRaisesRegex(ValueError, 'Attempted to use an uninitialized parameter'):
model_dp = torch.nn.DataParallel(torch.nn.LazyLinear(10).to(0))
model_dp(torch.rand(10, 10).to(0))
@@ -147,23 +133,23 @@ class TestDataParallel(TestCase):
def test_parallel_apply_passes_exception(self):
# we define and instantiate a module that will throw a KeyError
class TestModule(nn.Module):
+
def forward(self, *args):
- return {}["wonderful"]
+ return {}['wonderful']
l1 = TestModule().to("cuda", torch.float)
# and check that parallel_apply passes on the exception
# (we can use a single device twice for this test)
- with self.assertRaisesRegex(
- KeyError,
- "Caught KeyError in replica \\d "
- "on device 0.\nOriginal Traceback"
- "[\\s\\S]+wonderful",
- ):
+ with self.assertRaisesRegex(KeyError,
+ 'Caught KeyError in replica \\d '
+ 'on device 0.\nOriginal Traceback'
+ '[\\s\\S]+wonderful'):
dp.parallel_apply(modules=(l1, l1), inputs=(None, None))
@skip_but_pass_in_sandcastle_if(not TEST_MULTIGPU, "multi-GPU not supported")
def test_data_parallel_multiple_input(self):
class TestModule(nn.Module):
+
def forward(self, var1, var2, float1, var3=None):
if var3 is None:
return float1 * (var1 * var2)
@@ -219,13 +205,13 @@ class TestDataParallel(TestCase):
out = dpm(var1, var2, float1, var3=var3)
local_test(out)
- kwarg_wrap = {"var3": var3}
+ kwarg_wrap = {'var3': var3}
out = dp.data_parallel(
- m, (var1, var2, float1), (0, 1), module_kwargs=kwarg_wrap
- )
+ m, (var1, var2, float1), (0, 1), module_kwargs=kwarg_wrap)
local_test(out)
- out = dp.data_parallel(m, (var1, var2, float1), (0,), module_kwargs=kwarg_wrap)
+ out = dp.data_parallel(
+ m, (var1, var2, float1), (0,), module_kwargs=kwarg_wrap)
local_test(out)
@skip_but_pass_in_sandcastle_if(not TEST_MULTIGPU, "multi-GPU not supported")
@@ -237,7 +223,8 @@ class TestDataParallel(TestCase):
@skip_but_pass_in_sandcastle_if(not TEST_MULTIGPU, "multi-GPU not supported")
def test_data_parallel_model_device(self):
- r"""Test device[0] check at forward time."""
+ r"""Test device[0] check at forward time.
+ """
l = nn.Linear(2, 2)
inp = torch.randn(2, 2)
inp_cuda0 = inp.cuda(0)
@@ -259,12 +246,8 @@ class TestDataParallel(TestCase):
expect_device = torch.device(f"cuda:{device_ids[0]}")
if should_fail:
-
def assert_correct():
- return self.assertRaisesRegex(
- RuntimeError, error_msg.format(expect_device)
- )
-
+ return self.assertRaisesRegex(RuntimeError, error_msg.format(expect_device))
else:
assert_correct = dummy_ctx_manager
@@ -280,14 +263,14 @@ class TestDataParallel(TestCase):
with assert_correct():
nn.parallel.data_parallel(inner_m.to(dp_device), inp, device_ids)
- test(l.to("cpu"), None, inp, None, should_fail=True)
+ test(l.to('cpu'), None, inp, None, should_fail=True)
test(l.cuda(1), None, inp_cuda0, None, should_fail=True)
test(l.cuda(), None, inp_cuda0, [1, 0], should_fail=True)
test(l.cuda(), None, inp_cuda0, None, should_fail=False)
- test(l.cpu(), "cuda", inp_cuda0, None, should_fail=False)
+ test(l.cpu(), 'cuda', inp_cuda0, None, should_fail=False)
test(l.cuda(1), None, inp_cuda1, [1, 0], should_fail=False)
- test(l.cpu(), "cuda:1", inp_cuda1, [1, 0], should_fail=False)
+ test(l.cpu(), 'cuda:1', inp_cuda1, [1, 0], should_fail=False)
s = nn.Sequential(l.cpu())
test(s, None, inp, None, should_fail=True)
@@ -408,10 +391,8 @@ class TestDataParallel(TestCase):
def test_data_parallel_nested_output(self):
def fn(input):
return [
- input,
- (input.sin(), input.cos(), [input.add(1)]),
- input,
- OrderedDict(a=input, b=[input.sin()]),
+ input, (input.sin(), input.cos(), [input.add(1)]), input,
+ OrderedDict(a=input, b=[input.sin()])
]
class Net(nn.Module):
@@ -431,11 +412,11 @@ class TestDataParallel(TestCase):
self.assertIsInstance(output[2], torch.Tensor)
self.assertIsInstance(output[3], dict)
self.assertEqual(len(output[3]), 2)
- self.assertIn("a", output[3])
- self.assertIn("b", output[3])
- self.assertIsInstance(output[3]["a"], torch.Tensor)
- self.assertIsInstance(output[3]["b"], list)
- self.assertIsInstance(output[3]["b"][0], torch.Tensor)
+ self.assertIn('a', output[3])
+ self.assertIn('b', output[3])
+ self.assertIsInstance(output[3]['a'], torch.Tensor)
+ self.assertIsInstance(output[3]['b'], list)
+ self.assertIsInstance(output[3]['b'][0], torch.Tensor)
@skip_but_pass_in_sandcastle_if(not TEST_MULTIGPU, "multi-GPU not supported")
def test_data_parallel_nested_input(self):
@@ -456,14 +437,14 @@ class TestDataParallel(TestCase):
def test_data_parallel_module_zero_inputs(self):
class TestModule(nn.Module):
def forward(self):
- t = torch.eye(2, 3, device="cuda:0")
+ t = torch.eye(2, 3, device='cuda:0')
return t + (1 - t)
def test_helper(output, expected):
self.assertEqual(output.get_device(), 0)
self.assertEqual(output, expected)
- expected = torch.ones(2, 3, device="cuda:0")
+ expected = torch.ones(2, 3, device='cuda:0')
model = TestModule()
test_helper(nn.DataParallel(model, [0])(), expected)
@@ -473,8 +454,8 @@ class TestDataParallel(TestCase):
@skip_but_pass_in_sandcastle_if(not TEST_MULTIGPU, "multi-GPU not supported")
def test_data_parallel_device_args(self):
- cuda0 = torch.device("cuda:0")
- cuda1 = torch.device("cuda:1")
+ cuda0 = torch.device('cuda:0')
+ cuda1 = torch.device('cuda:1')
# test output_device
l = nn.Linear(10, 5).to(cuda0, torch.float)
@@ -494,12 +475,9 @@ class TestDataParallel(TestCase):
def gradient_penalty(net, x):
output = net(x)
loss = torch.autograd.grad(
- outputs=output,
- inputs=x,
+ outputs=output, inputs=x,
grad_outputs=x.new_ones(output.size()),
- create_graph=True,
- retain_graph=True,
- )[0].mean()
+ create_graph=True, retain_graph=True)[0].mean()
return loss
net = nn.Linear(4, 1).cuda()
@@ -512,9 +490,9 @@ class TestDataParallel(TestCase):
grads = [p.grad for p in net.parameters()]
self.assertEqual(2, len(grads))
self.assertEqual(
- torch.tensor([[0.25, 0.25, 0.25, 0.25]], device="cuda:0"), grads[0]
- )
- self.assertEqual(torch.tensor([0.0], device="cuda:0"), grads[1])
+ torch.tensor([[0.25, 0.25, 0.25, 0.25]], device='cuda:0'),
+ grads[0])
+ self.assertEqual(torch.tensor([0.0], device='cuda:0'), grads[1])
def _test_scatter(self, tensor):
x = tensor.detach().requires_grad_()
@@ -545,9 +523,7 @@ class TestDataParallel(TestCase):
class Cplx(torch.nn.Module):
def __init__(self):
super().__init__()
- self.cplx = torch.nn.Parameter(
- torch.zeros(1, 10, dtype=torch.cfloat).cuda()
- )
+ self.cplx = torch.nn.Parameter(torch.zeros(1, 10, dtype=torch.cfloat).cuda())
def forward(self, x):
return x + self.cplx
@@ -561,8 +537,8 @@ class TestDataParallel(TestCase):
def _test_gather(self, output_device):
inputs = (
- torch.randn(2, 4, device="cuda:0", requires_grad=True, dtype=torch.double),
- torch.randn(2, 4, device="cuda:1", requires_grad=True, dtype=torch.double),
+ torch.randn(2, 4, device='cuda:0', requires_grad=True, dtype=torch.double),
+ torch.randn(2, 4, device='cuda:1', requires_grad=True, dtype=torch.double),
)
result = dp.gather(inputs, output_device)
self.assertEqual(result.size(), torch.Size([4, 4]))
@@ -578,14 +554,12 @@ class TestDataParallel(TestCase):
result.backward(grad)
self.assertEqual(inputs[0].grad, grad[:2])
self.assertEqual(inputs[1].grad, grad[2:])
- _assertGradAndGradgradChecks(
- self, lambda x, y: dp.gather((x, y), output_device), inputs
- )
+ _assertGradAndGradgradChecks(self, lambda x, y: dp.gather((x, y), output_device), inputs)
# test scalar inputs, should stack into a vector in this case
inputs = (
- torch.randn((), device="cuda:0", requires_grad=True, dtype=torch.double),
- torch.randn((), device="cuda:1", requires_grad=True, dtype=torch.double),
+ torch.randn((), device='cuda:0', requires_grad=True, dtype=torch.double),
+ torch.randn((), device='cuda:1', requires_grad=True, dtype=torch.double),
)
result = dp.gather(inputs, output_device)
self.assertEqual(result.size(), torch.Size([2]))
@@ -601,9 +575,7 @@ class TestDataParallel(TestCase):
result.backward(grad)
self.assertEqual(inputs[0].grad, grad[0])
self.assertEqual(inputs[1].grad, grad[1])
- _assertGradAndGradgradChecks(
- self, lambda x, y: dp.gather((x, y), output_device), inputs
- )
+ _assertGradAndGradgradChecks(self, lambda x, y: dp.gather((x, y), output_device), inputs)
@skip_but_pass_in_sandcastle_if(not TEST_MULTIGPU, "multi-GPU not supported")
def test_gather_cpu(self):
@@ -616,11 +588,11 @@ class TestDataParallel(TestCase):
@skip_but_pass_in_sandcastle_if(not TEST_MULTIGPU, "multi-GPU not supported")
def test_gather_different_len_dicts(self):
inputs = (
- {"a": torch.randn(1, 2, requires_grad=True, device="cuda:0")},
+ {'a': torch.randn(1, 2, requires_grad=True, device="cuda:0")},
{
- "b": torch.randn(1, 2, requires_grad=True, device="cuda:1"),
- "a": torch.randn(1, 2, requires_grad=True, device="cuda:1"),
- },
+ 'b': torch.randn(1, 2, requires_grad=True, device="cuda:1"),
+ 'a': torch.randn(1, 2, requires_grad=True, device="cuda:1"),
+ }
)
with self.assertRaises(ValueError):
_ = dp.gather(inputs, target_device=0)
@@ -646,19 +618,9 @@ class TestDataParallel(TestCase):
for devices in [(0, 1), [0, 1]]:
replicas = dp.replicate(net, devices)
for i, replica in enumerate(replicas):
- self.assertEqual(
- replica.bn.running_mean.get_device(),
- i,
- msg="buffer on wrong device",
- )
- self.assertEqual(
- replica.bn.running_var.get_device(), i, msg="buffer on wrong device"
- )
- self.assertEqual(
- replica.bn.num_batches_tracked.get_device(),
- i,
- msg="buffer on wrong device",
- )
+ self.assertEqual(replica.bn.running_mean.get_device(), i, msg='buffer on wrong device')
+ self.assertEqual(replica.bn.running_var.get_device(), i, msg='buffer on wrong device')
+ self.assertEqual(replica.bn.num_batches_tracked.get_device(), i, msg='buffer on wrong device')
@skip_but_pass_in_sandcastle_if(not TEST_MULTIGPU, "multi-GPU not supported")
def test_zero_grad(self):
@@ -671,9 +633,8 @@ class TestDataParallel(TestCase):
def forward(self, x):
with self._testcase.assertWarnsRegex(
- UserWarning,
- r"Calling \.zero_grad\(\) from a module created with nn\.DataParallel\(\) has no effect.",
- ):
+ UserWarning,
+ r"Calling \.zero_grad\(\) from a module created with nn\.DataParallel\(\) has no effect."):
self.zero_grad()
return x
@@ -711,18 +672,10 @@ class TestDataParallel(TestCase):
def __init__(self, layouts, dtype_list):
super().__init__()
self.dtypes = dtype_list
- self.conv0 = torch.nn.Conv2d(8, 16, (2, 2)).to(
- memory_format=layouts[0], dtype=dtype_list[0]
- )
- self.conv1 = torch.nn.Conv2d(16, 32, (2, 2)).to(
- memory_format=layouts[1], dtype=dtype_list[1]
- )
- self.conv2 = torch.nn.Conv2d(32, 16, (2, 2)).to(
- memory_format=layouts[2], dtype=dtype_list[2]
- )
- self.conv3 = torch.nn.Conv2d(16, 8, (2, 2)).to(
- memory_format=layouts[3], dtype=dtype_list[3]
- )
+ self.conv0 = torch.nn.Conv2d(8, 16, (2, 2)).to(memory_format=layouts[0], dtype=dtype_list[0])
+ self.conv1 = torch.nn.Conv2d(16, 32, (2, 2)).to(memory_format=layouts[1], dtype=dtype_list[1])
+ self.conv2 = torch.nn.Conv2d(32, 16, (2, 2)).to(memory_format=layouts[2], dtype=dtype_list[2])
+ self.conv3 = torch.nn.Conv2d(16, 8, (2, 2)).to(memory_format=layouts[3], dtype=dtype_list[3])
def forward(self, x):
x = x.to(self.dtypes[0])
@@ -732,25 +685,19 @@ class TestDataParallel(TestCase):
x = self.conv3(x)
return x
- layer_formats = (
- [torch.contiguous_format] * 4,
- [torch.channels_last] * 2 + [torch.contiguous_format] * 2,
- [torch.channels_last] * 4,
- )
- layer_dtypes = (
- [torch.float] * 4,
- [torch.float] * 2 + [torch.half] * 2,
- [torch.half] * 4,
- )
+ layer_formats = ([torch.contiguous_format] * 4,
+ [torch.channels_last] * 2 + [torch.contiguous_format] * 2,
+ [torch.channels_last] * 4,)
+ layer_dtypes = ([torch.float] * 4,
+ [torch.float] * 2 + [torch.half] * 2,
+ [torch.half] * 4,)
ndevs = torch.cuda.device_count()
input = torch.randn(ndevs * 8, 8, 8, 8, device="cuda:0", dtype=torch.float)
target = torch.randn(ndevs * 8, 8, 4, 4, device="cuda:0", dtype=torch.float)
device_ids = list(range(ndevs))
- with torch.backends.cudnn.flags(
- enabled=True, deterministic=True, benchmark=False
- ):
+ with torch.backends.cudnn.flags(enabled=True, deterministic=True, benchmark=False):
for formats, dtype_list in product(layer_formats, layer_dtypes):
model_msg = f"formats = {formats} dtypes = {dtypes}"
try:
@@ -759,13 +706,10 @@ class TestDataParallel(TestCase):
opt = torch.optim.SGD(m.parameters(), lr=0.1)
opt_dp = torch.optim.SGD(m_dp.parameters(), lr=0.1)
has_half = any(p.dtype is torch.half for p in m.parameters())
- tol = 1.0e-3 if has_half else 1.0e-5
+ tol = 1.e-3 if has_half else 1.e-5
except BaseException:
# Prints case-specific debugging info to narrow down failing case.
- print(
- "Caught exception during model creation for " + model_msg,
- flush=True,
- )
+ print("Caught exception during model creation for " + model_msg, flush=True)
raise
# 2 iters: First iter creates grads, second iter tries zeroed grads.
for it in range(2):
@@ -774,28 +718,14 @@ class TestDataParallel(TestCase):
try:
F.mse_loss(m(input).float(), target).backward()
F.mse_loss(m_dp(input).float(), target).backward()
- for i, ((layer_name, m_child), m_dp_child) in enumerate(
- zip(m.named_children(), m_dp.module.children())
- ):
+ for i, ((layer_name, m_child), m_dp_child) in enumerate(zip(m.named_children(),
+ m_dp.module.children())):
named_msg = layer_name + ".weight " + iter_msg
- self.assertTrue(
- m_child.weight.grad.is_contiguous(
- memory_format=formats[i]
- ),
- named_msg,
- )
- self.assertTrue(
- m_dp_child.weight.grad.is_contiguous(
- memory_format=formats[i]
- ),
- named_msg,
- )
- for j, ((param_name, p), p_dp) in enumerate(
- zip(m_child.named_parameters(), m_dp_child.parameters())
- ):
- named_msg = (
- layer_name + "." + param_name + " " + iter_msg
- )
+ self.assertTrue(m_child.weight.grad.is_contiguous(memory_format=formats[i]), named_msg)
+ self.assertTrue(m_dp_child.weight.grad.is_contiguous(memory_format=formats[i]), named_msg)
+ for j, ((param_name, p), p_dp) in enumerate(zip(m_child.named_parameters(),
+ m_dp_child.parameters())):
+ named_msg = layer_name + "." + param_name + " " + iter_msg
self.assertEqual(p.grad, p_dp.grad, rtol=tol, atol=tol)
opt.step()
opt_dp.step()
@@ -803,10 +733,7 @@ class TestDataParallel(TestCase):
opt_dp.zero_grad()
except BaseException:
# Makes sure we still get info if an error occurred somewhere other than the asserts.
- print(
- "Caught exception during iterations at " + named_msg,
- flush=True,
- )
+ print("Caught exception during iterations at " + named_msg, flush=True)
raise
@skip_but_pass_in_sandcastle_if(not TEST_MULTIGPU, "multi-GPU not supported")
@@ -852,6 +779,7 @@ class TestDataParallel(TestCase):
class TestDataParallelDeviceType(TestCase):
+
@onlyCUDA
@skipMeta
@dtypes(torch.float, torch.double, torch.half)
@@ -894,13 +822,13 @@ class TestDataParallelDeviceType(TestCase):
self.l = l
def forward(self, input):
- return self.l(input["data"])
+ return self.l(input['data'])
l = nn.Linear(10, 5).to(device, dtype)
i = torch.randn(20, 10, device=device, dtype=dtype)
expected_out = l(i)
n = nn.DataParallel(Net())
- out = n(input={"data": i, "unused": []})
+ out = n(input={'data': i, 'unused': []})
self.assertEqual(out.get_device(), 0)
self.assertEqual(out, expected_out, atol=dtype2prec_DONTUSE[dtype], rtol=0)
@@ -914,13 +842,13 @@ class TestDataParallelDeviceType(TestCase):
self.l = l
def forward(self, input):
- return self.l(input["data"])
+ return self.l(input['data'])
l = nn.Linear(10, 5).to(device, dtype)
i = torch.randn(20, 10, device=device, dtype=dtype)
expected_out = l(i)
n = nn.DataParallel(Net())
- out = n(input={"data": i, "unused": {}})
+ out = n(input={'data': i, 'unused': {}})
self.assertEqual(out.get_device(), 0)
self.assertEqual(out, expected_out, atol=dtype2prec_DONTUSE[dtype], rtol=0)
@@ -934,19 +862,19 @@ class TestDataParallelDeviceType(TestCase):
self.l = l
def forward(self, input):
- return self.l(input["data"])
+ return self.l(input['data'])
l = nn.Linear(10, 5).to(device, dtype)
i = torch.randn(20, 10, device=device, dtype=dtype)
expected_out = l(i)
n = nn.DataParallel(Net())
- out = n(input={"data": i, "unused": ()})
+ out = n(input={'data': i, 'unused': ()})
self.assertEqual(out.get_device(), 0)
self.assertEqual(out, expected_out, atol=dtype2prec_DONTUSE[dtype], rtol=0)
instantiate_device_type_tests(TestDataParallelDeviceType, globals())
-if __name__ == "__main__":
+if __name__ == '__main__':
TestCase._default_dtype_check_enabled = True
run_tests()
diff --git a/test/distributed/test_distributed_spawn.py b/test/distributed/test_distributed_spawn.py
index 22e3fb3b06..9867dcc373 100644
--- a/test/distributed/test_distributed_spawn.py
+++ b/test/distributed/test_distributed_spawn.py
@@ -2,10 +2,10 @@
import os
import sys
-from os import path
import torch
import torch.distributed as dist
+from os import path
torch.backends.cuda.matmul.allow_tf32 = False
@@ -13,21 +13,13 @@ if not dist.is_available():
print("Distributed not available, skipping tests", file=sys.stderr)
sys.exit(0)
-from torch.testing._internal.common_utils import (
- NO_MULTIPROCESSING_SPAWN,
- run_tests,
- TEST_WITH_DEV_DBG_ASAN,
-)
+from torch.testing._internal.common_utils import run_tests, TEST_WITH_DEV_DBG_ASAN, NO_MULTIPROCESSING_SPAWN
from torch.testing._internal.distributed.distributed_test import (
- DistributedTest,
- TestDistBackend,
+ DistributedTest, TestDistBackend
)
if TEST_WITH_DEV_DBG_ASAN:
- print(
- "Skip dev-asan as torch + multiprocessing spawn have known issues",
- file=sys.stderr,
- )
+ print("Skip dev-asan as torch + multiprocessing spawn have known issues", file=sys.stderr)
sys.exit(0)
if NO_MULTIPROCESSING_SPAWN:
@@ -53,13 +45,12 @@ if (
BACKEND = os.environ["BACKEND"]
if BACKEND in _allowed_backends:
-
class TestDistBackendWithSpawn(TestDistBackend, DistributedTest._DistTestBase):
+
def setUp(self):
super().setUp()
self._spawn_processes()
torch.backends.cudnn.flags(enabled=True, allow_tf32=False).__enter__()
-
else:
print(f"Invalid backend {BACKEND}. Tests will not be run!")
diff --git a/test/distributed/test_dynamo_distributed.py b/test/distributed/test_dynamo_distributed.py
index 22ce528590..9383598ac6 100644
--- a/test/distributed/test_dynamo_distributed.py
+++ b/test/distributed/test_dynamo_distributed.py
@@ -1,62 +1,54 @@
# Owner(s): ["module: dynamo"]
-import contextlib
import copy
import functools
-import random
-import unittest
-from contextlib import contextmanager
from io import StringIO
from typing import List
+import random
+import unittest
from unittest.mock import patch
-
+import contextlib
import numpy as np
import torch
+from torch._C import FileCheck
import torch._dynamo
-import torch._dynamo.logging
+from torch._dynamo.backends.distributed import DDPOptimizer
import torch._dynamo.test_case
+from contextlib import contextmanager
from torch import nn
-from torch._C import FileCheck
from torch._dynamo import config
-from torch._dynamo.backends.distributed import DDPOptimizer
-from torch._dynamo.comptime import comptime
-from torch._dynamo.testing import collect_results
from torch._dynamo.utils import same
+from torch._dynamo.testing import collect_results
+from torch.utils._triton import has_triton
+from torch.distributed.fsdp.wrap import transformer_auto_wrap_policy, lambda_auto_wrap_policy
from torch._higher_order_ops.wrap import tag_activation_checkpoint
-from torch.distributed._functional_collectives import _maybe_wrap_tensor
-from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
-from torch.distributed.fsdp.wrap import (
- lambda_auto_wrap_policy,
- transformer_auto_wrap_policy,
-)
from torch.nn.parallel import DistributedDataParallel as DDP
-from torch.testing._internal.common_cuda import (
- PLATFORM_SUPPORTS_FLASH_ATTENTION,
- PLATFORM_SUPPORTS_MEM_EFF_ATTENTION,
-)
+from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
from torch.testing._internal.common_distributed import (
- _dynamo_dist_per_rank_init,
- DynamoDistributedMultiProcTestCase,
DynamoDistributedSingleProcTestCase,
+ DynamoDistributedMultiProcTestCase,
import_transformers_or_skip,
- requires_nccl,
skip_if_lt_x_gpu,
+ requires_nccl,
+ _dynamo_dist_per_rank_init,
)
from torch.testing._internal.common_utils import requires_cuda
-from torch.utils._triton import has_triton
-
+import torch._dynamo.logging
+from torch.testing._internal.common_cuda import (
+ PLATFORM_SUPPORTS_FLASH_ATTENTION, PLATFORM_SUPPORTS_MEM_EFF_ATTENTION
+)
+from torch._dynamo.comptime import comptime
+from torch.distributed._functional_collectives import _maybe_wrap_tensor
def reset_rng_state():
torch.manual_seed(1337)
random.seed(1337)
np.random.seed(1337)
-
def init_weights(m):
if isinstance(m, nn.Linear):
nn.init.xavier_uniform_(m.weight)
m.bias.data.fill_(0.01)
-
class ToyModel(nn.Module):
def __init__(self, in_feat=10, hidden_feat=5000, out_feat=5, ctx_manager=None):
super().__init__()
@@ -75,22 +67,15 @@ class ToyModel(nn.Module):
else:
return self.net(inputs)
-
-def get_model(
- device, bsz=20, in_feat=10, hidden_feat=5000, out_feat=5, ctx_manager=None
-):
- m = ToyModel(
- in_feat=in_feat,
- hidden_feat=hidden_feat,
- out_feat=out_feat,
- ctx_manager=ctx_manager,
- ).to(device)
+def get_model(device, bsz=20, in_feat=10, hidden_feat=5000, out_feat=5, ctx_manager=None):
+ m = ToyModel(in_feat=in_feat, hidden_feat=hidden_feat, out_feat=out_feat, ctx_manager=ctx_manager).to(device)
m.apply(init_weights)
inputs = torch.rand(bsz, in_feat).to(device)
outputs = m(inputs)
return m, inputs, outputs
+
class ToyInnerModel(nn.Module):
def __init__(self):
super().__init__()
@@ -100,14 +85,11 @@ class ToyInnerModel(nn.Module):
def forward(self, inputs):
return self.layers(inputs)
-
class ToyOuterModel(nn.Module):
def __init__(self, device):
super().__init__()
self.layers = [ToyInnerModel().to(device) for _ in range(2)]
- self.layers = nn.Sequential(
- self.layers[0], nn.ReLU(), self.layers[1], nn.ReLU()
- )
+ self.layers = nn.Sequential(self.layers[0], nn.ReLU(), self.layers[1], nn.ReLU())
def forward(self, inputs):
return self.layers(inputs)
@@ -127,17 +109,16 @@ def find_first_node(gm, func):
return None
-def apply_fsdp_with_checkpointing(
- model, wrap_policy, checkpoint_policy, use_activation_checkpointing=True
-):
+def apply_fsdp_with_checkpointing(model, wrap_policy, checkpoint_policy, use_activation_checkpointing=True):
from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import (
apply_activation_checkpointing,
checkpoint_wrapper,
CheckpointImpl,
)
-
model = FSDP(
- copy.deepcopy(model), auto_wrap_policy=wrap_policy, use_orig_params=True
+ copy.deepcopy(model),
+ auto_wrap_policy=wrap_policy,
+ use_orig_params=True
)
if use_activation_checkpointing:
checkpoint_wrapper_fn = functools.partial(
@@ -145,13 +126,12 @@ def apply_fsdp_with_checkpointing(
checkpoint_impl=CheckpointImpl.NO_REENTRANT,
)
apply_activation_checkpointing(
- model,
- checkpoint_wrapper_fn=checkpoint_wrapper_fn,
- check_fn=checkpoint_policy,
+ model, checkpoint_wrapper_fn=checkpoint_wrapper_fn, check_fn=checkpoint_policy,
)
return model
+
def get_custom_model(device):
class MyCustomLinear(torch.nn.Module):
def __init__(self):
@@ -197,26 +177,22 @@ def get_custom_model(device):
correct_outputs = m(*inputs)
return m, inputs, correct_outputs
-
def get_hf_bert(rank):
# Note: use @import_transformers_or_skip on your test case if you use this
# in a multiprocessing test
try:
- from transformers import AutoModelForMaskedLM, BertConfig
+ from transformers import BertConfig, AutoModelForMaskedLM
except ImportError as e:
raise unittest.SkipTest("Unable to import transformers") from e
batch_size, max_length, config, device = 4, 512, BertConfig(), f"cuda:{rank}"
model = AutoModelForMaskedLM.from_config(config).to(device)
input_ids = torch.randint(0, config.vocab_size, (batch_size, max_length)).to(device)
- decoder_ids = torch.randint(0, config.vocab_size, (batch_size, max_length)).to(
- device
- )
- inputs = {"input_ids": input_ids, "labels": decoder_ids}
+ decoder_ids = torch.randint(0, config.vocab_size, (batch_size, max_length)).to(device)
+ inputs = {'input_ids': input_ids, 'labels': decoder_ids}
model.train()
return model, inputs
-
class CheckSplitsCompiler:
def __init__(self):
self.compiler_called = 0
@@ -250,7 +226,6 @@ class FakeDDP(nn.Module):
with self._inside_ddp_forward():
return self.module.forward(*inputs, **kwargs)
-
def run_hf_bert_ddp(self, model, inputs, backend):
reset_rng_state()
correct_outputs = model(**inputs)
@@ -264,13 +239,10 @@ def run_hf_bert_ddp(self, model, inputs, backend):
opt_loss.backward()
inputs_flat = [inputs[k] for k in inputs]
- correct_results = collect_results(
- model, correct_outputs.logits, correct_loss, inputs_flat
- )
+ correct_results = collect_results(model, correct_outputs.logits, correct_loss, inputs_flat)
opt_results = collect_results(opt_model, opt_outputs.logits, opt_loss, inputs_flat)
self.assertTrue(same(correct_results, opt_results))
-
class TestFakeDistributedSingleProc(torch._dynamo.test_case.TestCase):
@unittest.skipIf(not has_triton(), "Inductor+gpu needs triton and recent GPU arch")
@patch.object(config, "optimize_ddp", True)
@@ -298,6 +270,7 @@ class TestFakeDistributedSingleProc(torch._dynamo.test_case.TestCase):
opt_model = torch._dynamo.optimize("aot_eager")(model)
opt_model()
+
@patch.object(config, "optimize_ddp", True)
def test_symbol_splitting(self):
class Model(nn.Module):
@@ -318,23 +291,15 @@ class TestFakeDistributedSingleProc(torch._dynamo.test_case.TestCase):
opt_model = torch.compile(dynamic=True)(model)
opt_model(torch.randn(20, 512))
+
@patch.object(config, "optimize_ddp", True)
def test_call_method_forward(self):
class Model(nn.Module):
- def __init__(
- self,
- ):
+ def __init__(self,):
super().__init__()
layers = []
for l in range(2):
- layer = nn.ModuleList(
- [
- nn.LayerNorm(96),
- nn.MultiheadAttention(
- embed_dim=96, num_heads=4, batch_first=True
- ),
- ]
- )
+ layer = nn.ModuleList([nn.LayerNorm(96), nn.MultiheadAttention(embed_dim=96, num_heads=4, batch_first=True)])
layers.append(layer)
self.layers = nn.ModuleList(layers)
@@ -364,7 +329,6 @@ class TestMultiProc(DynamoDistributedMultiProcTestCase):
Prefer MultiThreadedTestCase for most tests. Perhaps use this one
sparingly for integration tests.
"""
-
@skip_if_lt_x_gpu(2)
@patch.object(config, "optimize_ddp", False)
def test_ddp_baseline_aot_eager_multiprocess(self):
@@ -420,10 +384,11 @@ class TestMultiProc(DynamoDistributedMultiProcTestCase):
@unittest.skipIf(not has_triton(), "Inductor+gpu needs triton and recent GPU arch")
@patch.object(config, "optimize_ddp", False)
def test_ddp_activation_checkpointing(self):
+
from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import (
+ CheckpointImpl,
apply_activation_checkpointing,
checkpoint_wrapper,
- CheckpointImpl,
)
class MyModel(torch.nn.Module):
@@ -445,12 +410,8 @@ class TestMultiProc(DynamoDistributedMultiProcTestCase):
checkpoint_wrapper,
checkpoint_impl=CheckpointImpl.NO_REENTRANT,
)
- check_fn = lambda submodule: isinstance( # noqa: E731
- submodule, torch.nn.Linear
- )
- apply_activation_checkpointing(
- model, checkpoint_wrapper_fn=non_reentrant_wrapper, check_fn=check_fn
- )
+ check_fn = lambda submodule: isinstance(submodule, torch.nn.Linear) # noqa: E731
+ apply_activation_checkpointing(model, checkpoint_wrapper_fn=non_reentrant_wrapper, check_fn=check_fn)
model = DDP(model)
x = torch.randn(10, 64).cuda()
@@ -475,9 +436,9 @@ class TestMultiProc(DynamoDistributedMultiProcTestCase):
fsdp_m = FSDP(
m,
auto_wrap_policy=functools.partial(
- transformer_auto_wrap_policy, transformer_layer_cls=(nn.Linear,)
+ transformer_auto_wrap_policy, transformer_layer_cls=(nn.Linear, )
),
- use_orig_params=True,
+ use_orig_params=True
)
fsdp_m = torch._dynamo.optimize("aot_eager")(fsdp_m)
outputs = fsdp_m(inputs)
@@ -499,21 +460,20 @@ class TestMultiProc(DynamoDistributedMultiProcTestCase):
fsdp_m = FSDP(
m,
auto_wrap_policy=functools.partial(
- transformer_auto_wrap_policy, transformer_layer_cls=(nn.Linear,)
+ transformer_auto_wrap_policy, transformer_layer_cls=(nn.Linear, )
),
- use_orig_params=True,
+ use_orig_params=True
)
fsdp_m = torch._dynamo.optimize("inductor")(fsdp_m)
outputs = fsdp_m(inputs)
self.assertTrue(same(correct_outputs, outputs))
+
@skip_if_lt_x_gpu(1)
@unittest.skipIf(not has_triton(), "Inductor+gpu needs triton and recent GPU arch")
def test_fsdp_activation_checkpointing(self):
with _dynamo_dist_per_rank_init(self.rank, self.world_size):
- model, inputs = get_toy_model_for_activation_checkpointing(
- f"cuda:{self.rank}"
- )
+ model, inputs = get_toy_model_for_activation_checkpointing(f"cuda:{self.rank}")
is_inner = lambda module: isinstance(module, ToyInnerModel) # noqa: E731
wrap_policy = functools.partial(lambda_auto_wrap_policy, lambda_fn=is_inner)
model = apply_fsdp_with_checkpointing(model, wrap_policy, is_inner)
@@ -524,9 +484,9 @@ class TestMultiProc(DynamoDistributedMultiProcTestCase):
self.assertTrue(same(correct_outputs, outputs))
# Each FSDP module is a separate graph
self.assertEqual(cnt.frame_count, 2)
- self.assertTrue(
- find_first_node(cnt.graphs[0], tag_activation_checkpoint) is not None
- )
+ self.assertTrue(find_first_node(cnt.graphs[0], tag_activation_checkpoint) is not None)
+
+
@import_transformers_or_skip()
@unittest.skipIf(not has_triton(), "Inductor+gpu needs triton and recent GPU arch")
@@ -535,18 +495,24 @@ class TestMultiProc(DynamoDistributedMultiProcTestCase):
@patch.object(torch._inductor.config, "fallback_random", True)
@unittest.skipIf(
PLATFORM_SUPPORTS_FLASH_ATTENTION or PLATFORM_SUPPORTS_MEM_EFF_ATTENTION,
- "Inaccurate results with fused SDPA kernels",
+ "Inaccurate results with fused SDPA kernels"
)
def test_hf_bert_fsdp(self):
+
def apply_fsdp(model, wrap_policy):
model = FSDP(
- copy.deepcopy(model), auto_wrap_policy=wrap_policy, use_orig_params=True
+ copy.deepcopy(model),
+ auto_wrap_policy=wrap_policy,
+ use_orig_params=True
)
return model
with _dynamo_dist_per_rank_init(self.rank, self.world_size):
- for wrap_policy, test_instance in (
- (None, "FSDP without recursive wrapping"),
+ for (wrap_policy, test_instance) in (
+ (
+ None,
+ "FSDP without recursive wrapping"
+ ),
):
print(f"Running hf_bert test for {test_instance}")
model, inputs = get_hf_bert(self.rank)
@@ -564,14 +530,12 @@ class TestMultiProc(DynamoDistributedMultiProcTestCase):
opt_loss.backward()
inputs_flat = [inputs[k] for k in inputs]
- correct_results = collect_results(
- eager_model, correct_outputs.logits, correct_loss, inputs_flat
- )
- opt_results = collect_results(
- opt_model, opt_outputs.logits, opt_loss, inputs_flat
- )
+ correct_results = collect_results(eager_model, correct_outputs.logits, correct_loss, inputs_flat)
+ opt_results = collect_results(opt_model, opt_outputs.logits, opt_loss, inputs_flat)
self.assertTrue(same(correct_results, opt_results))
+
+
@import_transformers_or_skip()
@unittest.skipIf(not has_triton(), "Inductor+gpu needs triton and recent GPU arch")
# TODO(whc) Investigate why cudagraphs breaks inductor+fsdp for hf_bert
@@ -579,27 +543,20 @@ class TestMultiProc(DynamoDistributedMultiProcTestCase):
@patch.object(torch._inductor.config, "fallback_random", True)
def test_hf_bert_fsdp_activation_checkpointing(self):
from transformers.models.bert.modeling_bert import BertLayer
-
with _dynamo_dist_per_rank_init(self.rank, self.world_size):
- for wrap_policy, test_instance in (
+ for (wrap_policy, test_instance) in (
(
functools.partial(
- transformer_auto_wrap_policy, transformer_layer_cls=(BertLayer,)
+ transformer_auto_wrap_policy, transformer_layer_cls=(BertLayer, )
),
- "FSDP with recursive wrapping BertLayer instances",
+ "FSDP with recursive wrapping BertLayer instances"
),
):
- print(
- f"Running hf_bert_activation_checkpointing test for {test_instance}"
- )
+ print(f"Running hf_bert_activation_checkpointing test for {test_instance}")
model, inputs = get_hf_bert(self.rank)
- check_fn = lambda submodule: isinstance( # noqa: E731
- submodule, BertLayer
- )
+ check_fn = lambda submodule: isinstance(submodule, BertLayer) # noqa: E731
reset_rng_state()
- eager_model = apply_fsdp_with_checkpointing(
- model, wrap_policy, check_fn
- )
+ eager_model = apply_fsdp_with_checkpointing(model, wrap_policy, check_fn)
correct_outputs = eager_model(**inputs)
correct_loss = correct_outputs.loss
correct_loss.backward()
@@ -612,12 +569,8 @@ class TestMultiProc(DynamoDistributedMultiProcTestCase):
opt_loss.backward()
inputs_flat = [inputs[k] for k in inputs]
- correct_results = collect_results(
- eager_model, correct_outputs.logits, correct_loss, inputs_flat
- )
- opt_results = collect_results(
- opt_model, opt_outputs.logits, opt_loss, inputs_flat
- )
+ correct_results = collect_results(eager_model, correct_outputs.logits, correct_loss, inputs_flat)
+ opt_results = collect_results(opt_model, opt_outputs.logits, opt_loss, inputs_flat)
self.assertTrue(same(correct_results, opt_results))
@@ -631,15 +584,8 @@ class TestSingleProc(DynamoDistributedSingleProcTestCase):
Use TestMultiProc for things that really need to run on multiple nodes
"""
- def get_model(
- self, bsz=20, in_feat=10, hidden_feat=5000, out_feat=5, ctx_manager=None
- ):
- m = ToyModel(
- in_feat=in_feat,
- hidden_feat=hidden_feat,
- out_feat=out_feat,
- ctx_manager=ctx_manager,
- ).to(self.device)
+ def get_model(self, bsz=20, in_feat=10, hidden_feat=5000, out_feat=5, ctx_manager=None):
+ m = ToyModel(in_feat=in_feat, hidden_feat=hidden_feat, out_feat=out_feat, ctx_manager=ctx_manager).to(self.device)
m.apply(init_weights)
inputs = torch.rand(bsz, in_feat).to(self.device)
outputs = m(inputs)
@@ -709,20 +655,19 @@ class TestSingleProc(DynamoDistributedSingleProcTestCase):
]:
for ctx_manager, output_test in [
(
- lambda: torch.autocast(
- torch.device(self.device).type, torch.float16
- ),
+ lambda: torch.autocast(torch.device(self.device).type, torch.float16),
lambda out: self.assertEqual(out.dtype, torch.float16),
),
- (torch.enable_grad, lambda out: self.assertTrue(out.requires_grad)),
- (torch.no_grad, lambda out: self.assertTrue(not out.requires_grad)),
+ (
+ torch.enable_grad,
+ lambda out: self.assertTrue(out.requires_grad)
+ ),
+ (
+ torch.no_grad,
+ lambda out: self.assertTrue(not out.requires_grad)
+ ),
]:
- m, inputs, correct_outputs = self.get_model(
- out_feat=1000,
- hidden_feat=1000,
- in_feat=1000,
- ctx_manager=ctx_manager,
- )
+ m, inputs, correct_outputs = self.get_model(out_feat=1000, hidden_feat=1000, in_feat=1000, ctx_manager=ctx_manager)
# inp - 1000 * 1000 matrix of float32 (4 bytes) = 4MB
# hidden - 1000 * 1000 matrix of float32 (4 bytes) = 4MB
bucket_cap_mb = 3.5 # 4MB
@@ -730,9 +675,7 @@ class TestSingleProc(DynamoDistributedSingleProcTestCase):
compiler = get_compiler()
- @torch._dynamo.optimize(
- compiler.compile_fn if compiler else "aot_eager"
- )
+ @torch._dynamo.optimize(compiler.compile_fn if compiler else "aot_eager")
def opt_fn(inputs):
return ddp_m(inputs)
@@ -768,9 +711,7 @@ class TestSingleProc(DynamoDistributedSingleProcTestCase):
opt_outputs = opt_fn(inputs)
self.assertTrue(same(correct_outputs, opt_outputs))
- @torch._inductor.config.patch(
- {"layout_optimization": True, "keep_output_stride": False}
- )
+ @torch._inductor.config.patch({"layout_optimization": True, "keep_output_stride": False})
@patch.object(config, "optimize_ddp", True)
def _test_graph_split_inductor_layout_optimizations_impl(self, context):
assert config.optimize_ddp
@@ -781,22 +722,10 @@ class TestSingleProc(DynamoDistributedSingleProcTestCase):
def __init__(self):
super().__init__()
self.net = nn.Sequential(
- *[
- nn.Conv2d(channel_dim, channel_dim, 1, stride=1, bias=False),
- nn.ReLU(),
- ]
- + [
- nn.Conv2d(channel_dim, channel_dim, 1, stride=1, bias=False),
- nn.ReLU(),
- ]
- + [
- nn.Conv2d(channel_dim, channel_dim, 1, stride=1, bias=False),
- nn.ReLU(),
- ]
- + [
- nn.Conv2d(channel_dim, channel_dim, 1, stride=1, bias=False),
- nn.ReLU(),
- ]
+ *[nn.Conv2d(channel_dim, channel_dim, 1, stride=1, bias=False), nn.ReLU()]
+ + [nn.Conv2d(channel_dim, channel_dim, 1, stride=1, bias=False), nn.ReLU()]
+ + [nn.Conv2d(channel_dim, channel_dim, 1, stride=1, bias=False), nn.ReLU()]
+ + [nn.Conv2d(channel_dim, channel_dim, 1, stride=1, bias=False), nn.ReLU()]
)
def forward(self, inputs):
@@ -822,9 +751,7 @@ class TestSingleProc(DynamoDistributedSingleProcTestCase):
@unittest.skipIf(not has_triton(), "Inductor+gpu needs triton and recent GPU arch")
def test_graph_split_inductor_layout_optimizations_training(self):
- self._test_graph_split_inductor_layout_optimizations_impl(
- contextlib.nullcontext
- )
+ self._test_graph_split_inductor_layout_optimizations_impl(contextlib.nullcontext)
@unittest.skipIf(not has_triton(), "Inductor+gpu needs triton and recent GPU arch")
def test_graph_split_inductor_layout_optimizations_inference(self):
@@ -947,13 +874,14 @@ class TestSingleProc(DynamoDistributedSingleProcTestCase):
DDP._set_params_and_buffers_to_ignore_for_model(m, parameters_to_ignore)
ddp_m = DDP(m, device_ids=self.device_ids, bucket_cap_mb=25)
parameter_ids_to_ignore = [
- id(ddp_m.module.get_parameter(p)) for p in ddp_m.parameters_to_ignore
+ id(ddp_m.module.get_parameter(p))
+ for p in ddp_m.parameters_to_ignore
]
check_splits_compiler = CheckSplitsCompiler()
ddp_optimizer = DDPOptimizer(
bucket_bytes_cap=ddp_m.bucket_bytes_cap,
- backend_compile_fn=check_splits_compiler.compile_fn,
+ backend_compile_fn=check_splits_compiler.compile_fn
)
@torch._dynamo.optimize(ddp_optimizer.compile_fn)
@@ -1007,21 +935,17 @@ class TestSingleProc(DynamoDistributedSingleProcTestCase):
with self.assertRaisesRegex(
torch._dynamo.exc.BackendCompilerFailed,
- "DDPOptimizer backend: Found a higher order op in the graph",
+ "DDPOptimizer backend: Found a higher order op in the graph"
):
torch.compile(mod, backend=cnt)(*args)
+
def test_fsdp_orig_params_assert(self):
# Test with basic FSDP wrapping (outer wrap around whole model)
m, inputs, correct_outputs = get_model(f"cuda:{self.rank}")
fsdp_m = FSDP(m, use_orig_params=False)
fsdp_m = torch._dynamo.optimize()(fsdp_m)
- self.assertRaisesRegex(
- AssertionError,
- "Dynamo only supports FSDP with use_orig_params=True",
- fsdp_m,
- inputs,
- )
+ self.assertRaisesRegex(AssertionError, "Dynamo only supports FSDP with use_orig_params=True", fsdp_m, inputs)
def test_fsdp_skip_guards(self):
"""
@@ -1041,7 +965,7 @@ class TestSingleProc(DynamoDistributedSingleProcTestCase):
for skip_guards, expected_guard_source in (
(True, "local_fsdp_module"),
- (False, "local"),
+ (False, "local")
):
torch._dynamo.reset()
@@ -1063,13 +987,8 @@ class TestSingleProc(DynamoDistributedSingleProcTestCase):
ctx.print_guards(file=GUARDS_FILE)
return out
-
device = f"cuda:{self.rank}"
- m = ToyModel(
- in_feat=10,
- hidden_feat=5000,
- out_feat=5,
- ).to(device)
+ m = ToyModel(in_feat=10, hidden_feat=5000, out_feat=5,).to(device)
inputs = torch.rand(20, 10).to(device)
m.apply(init_weights)
correct_outputs = m(inputs)
@@ -1080,17 +999,14 @@ class TestSingleProc(DynamoDistributedSingleProcTestCase):
outputs = opt_m(inputs)
# far from an exhaustive check of all the expected guards, just check a couple of them.
- FileCheck().check("""local "L['self']" TYPE_MATCH""").check(
- """local "L['self']" ID_MATCH"""
- ).check(f"""{expected_guard_source} "L['self'].net" TYPE_MATCH""").check(
- f"""{expected_guard_source} "L['self'].net" ID_MATCH"""
- ).check(
- f"""{expected_guard_source} "L['self'].net[0]" TYPE_MATCH"""
- ).check(
- f"""{expected_guard_source} "L['self'].net[0]" ID_MATCH"""
- ).run(
- GUARDS_FILE.getvalue()
- )
+ FileCheck() \
+ .check("""local "L['self']" TYPE_MATCH""") \
+ .check("""local "L['self']" ID_MATCH""") \
+ .check(f"""{expected_guard_source} "L['self'].net" TYPE_MATCH""") \
+ .check(f"""{expected_guard_source} "L['self'].net" ID_MATCH""") \
+ .check(f"""{expected_guard_source} "L['self'].net[0]" TYPE_MATCH""") \
+ .check(f"""{expected_guard_source} "L['self'].net[0]" ID_MATCH""") \
+ .run(GUARDS_FILE.getvalue())
self.assertTrue(same(correct_outputs, outputs))
def test_fsdp_skip_register_attr_or_module(self):
@@ -1101,7 +1017,6 @@ class TestSingleProc(DynamoDistributedSingleProcTestCase):
before calling `register_attr_or_module`
in variables/builder.py
"""
-
class ToyModel(nn.Module):
def __init__(self, in_feat=10, hidden_feat=5000, out_feat=5):
super().__init__()
@@ -1117,11 +1032,7 @@ class TestSingleProc(DynamoDistributedSingleProcTestCase):
torch._dynamo.reset()
device = f"cuda:{self.rank}"
- m = ToyModel(
- in_feat=10,
- hidden_feat=5000,
- out_feat=5,
- ).to(device)
+ m = ToyModel(in_feat=10, hidden_feat=5000, out_feat=5,).to(device)
inputs = torch.rand(20, 10).to(device)
m.apply(init_weights)
correct_outputs = m(inputs)
@@ -1134,12 +1045,9 @@ class TestSingleProc(DynamoDistributedSingleProcTestCase):
"l__self___net_0_weight",
"l__self___net_0_bias",
"l__self___net_2_weight",
- "l__self___net_2_bias",
+ "l__self___net_2_bias"
]:
- self.assertFalse(
- name in node.name,
- f"FSDP module {name} should not be registered as attributes",
- )
+ self.assertFalse(name in node.name, f"FSDP module {name} should not be registered as attributes")
return gm
opt_m = torch._dynamo.optimize(backend=debug_compiler)(fsdp_m)
@@ -1153,7 +1061,6 @@ class TestSingleProc(DynamoDistributedSingleProcTestCase):
source are de-duplicated, meaning that they are each only passed once
as a graph input.
"""
-
class DuplicateModule(nn.Module):
def __init__(self) -> None:
super().__init__()
@@ -1184,7 +1091,6 @@ class TestSingleProc(DynamoDistributedSingleProcTestCase):
``a is b``, where ``a`` and ``b`` are certainly not the same. We check
this by checking for per-invocation recompiles.
"""
-
class BufModule(nn.Module):
def __init__(self) -> None:
super().__init__()
@@ -1228,7 +1134,6 @@ class TestSingleProc(DynamoDistributedSingleProcTestCase):
correctly both when the staticmethod is invoked from the class and from
the object itself.
"""
-
class ModuleWithStaticMethod(nn.Module):
def __init__(self, use_self: bool):
super().__init__()
@@ -1282,5 +1187,4 @@ class TestSingleProc(DynamoDistributedSingleProcTestCase):
if __name__ == "__main__":
from torch._dynamo.test_case import run_tests
-
run_tests()
diff --git a/test/distributed/test_fake_pg.py b/test/distributed/test_fake_pg.py
index 238b866601..195034e06e 100644
--- a/test/distributed/test_fake_pg.py
+++ b/test/distributed/test_fake_pg.py
@@ -1,24 +1,28 @@
# Owner(s): ["oncall: distributed"]
import sys
-import unittest
-
import torch
import torch.distributed as dist
-import torch.distributed._functional_collectives as funcol
import torch.nn as nn
-from torch.distributed._tensor import DeviceMesh, init_device_mesh, Shard
+import unittest
+import torch.distributed._functional_collectives as funcol
+from torch.fx.experimental.proxy_tensor import make_fx
+from torch.testing._internal.distributed.fake_pg import FakeStore
+from torch.testing import FileCheck
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
+from torch.distributed._tensor import DeviceMesh, init_device_mesh, Shard
+from torch.testing._internal.common_utils import (
+ TestCase,
+ run_tests,
+)
from torch.distributed.tensor.parallel import (
ColwiseParallel,
parallelize_module,
RowwiseParallel,
)
-from torch.fx.experimental.proxy_tensor import make_fx
-from torch.testing import FileCheck
-from torch.testing._internal.common_utils import run_tests, TestCase
-from torch.testing._internal.distributed._tensor.common_dtensor import MLPModule
-from torch.testing._internal.distributed.fake_pg import FakeStore
+from torch.testing._internal.distributed._tensor.common_dtensor import (
+ MLPModule,
+)
if not dist.is_available():
print("Distributed not available, skipping tests", file=sys.stderr)
@@ -26,7 +30,6 @@ if not dist.is_available():
HAS_CUDA = torch.cuda.is_available()
-
class TestFakePG(TestCase):
def tearDown(self):
super().tearDown()
@@ -34,7 +37,9 @@ class TestFakePG(TestCase):
def test_all_reduce(self):
store = FakeStore()
- dist.init_process_group(backend="fake", rank=1, world_size=2, store=store)
+ dist.init_process_group(
+ backend="fake", rank=1, world_size=2, store=store
+ )
output = torch.ones(3, 3) * dist.get_rank()
dist.all_reduce(output)
@@ -42,7 +47,9 @@ class TestFakePG(TestCase):
def test_allgather(self):
store = FakeStore()
- dist.init_process_group(backend="fake", rank=1, world_size=2, store=store)
+ dist.init_process_group(
+ backend="fake", rank=1, world_size=2, store=store
+ )
input_tensor = torch.ones(3, 3) * dist.get_rank()
output_tensors = [torch.empty_like(input_tensor) for _ in range(2)]
@@ -52,7 +59,9 @@ class TestFakePG(TestCase):
def test_reduce_scatter(self):
store = FakeStore()
- dist.init_process_group(backend="fake", rank=1, world_size=2, store=store)
+ dist.init_process_group(
+ backend="fake", rank=1, world_size=2, store=store
+ )
to_reduce_scatter = [torch.ones(3, 3) * rank for rank in range(2)]
output_tensor = torch.empty(3, 3)
@@ -60,20 +69,24 @@ class TestFakePG(TestCase):
dist.reduce_scatter(output_tensor, to_reduce_scatter)
self.assertEqual(tuple(output_tensor.shape), (3, 3))
- @unittest.skipIf(not HAS_CUDA, "No CUDA")
+ @unittest.skipIf(not HAS_CUDA, 'No CUDA')
def test_construct_fsdp(self):
store = FakeStore()
- dist.init_process_group(backend="fake", rank=0, world_size=2, store=store)
- FSDP(nn.Linear(2, 3, device="cuda"))
+ dist.init_process_group(
+ backend="fake", rank=0, world_size=2, store=store
+ )
+ FSDP(nn.Linear(2, 3, device='cuda'))
- @unittest.skipIf(not HAS_CUDA, "No CUDA")
+ @unittest.skipIf(not HAS_CUDA, 'No CUDA')
def test_fsdp_fake_e2e(self):
store = dist.HashStore()
- dist.init_process_group(backend="fake", rank=0, world_size=2, store=store)
+ dist.init_process_group(
+ backend="fake", rank=0, world_size=2, store=store
+ )
my_module = nn.Sequential(
- nn.Linear(2, 3, device="cuda"),
+ nn.Linear(2, 3, device='cuda'),
nn.ReLU(),
- nn.Linear(3, 2, device="cuda"),
+ nn.Linear(3, 2, device='cuda'),
)
sharded_module = FSDP(my_module, use_orig_params=True)
optim = torch.optim.Adam(sharded_module.parameters(), lr=0.0001)
@@ -83,17 +96,19 @@ class TestFakePG(TestCase):
loss.backward()
optim.step()
- @unittest.skipIf(not HAS_CUDA, "No CUDA")
+ @unittest.skipIf(not HAS_CUDA, 'No CUDA')
def test_fake_pg_tracing(self):
store = dist.HashStore()
- dist.init_process_group(backend="fake", rank=0, world_size=2, store=store)
+ dist.init_process_group(
+ backend="fake", rank=0, world_size=2, store=store
+ )
default_pg = dist.distributed_c10d._get_default_group()
def allgather_fn(tensor):
return funcol.all_gather_tensor(tensor, 0, default_pg)
- gm = make_fx(allgather_fn)(torch.randn(2, 2, device="cuda"))
+ gm = make_fx(allgather_fn)(torch.randn(2, 2, device='cuda'))
FileCheck().check("all_gather").check("wait_tensor").run(str(gm.graph))
def test_broadcast(self):
@@ -169,13 +184,15 @@ class TestFakePG(TestCase):
tp_size = 2
store = dist.HashStore()
- dist.init_process_group(
- backend="fake", rank=0, world_size=world_size, store=store
- )
+ dist.init_process_group(backend="fake", rank=0, world_size=world_size, store=store)
- device_mesh = DeviceMesh("cuda", torch.arange(0, world_size).view(-1, tp_size))
+ device_mesh = DeviceMesh(
+ "cuda", torch.arange(0, world_size).view(-1, tp_size)
+ )
device_mesh = init_device_mesh(
- "cuda", (world_size // tp_size, tp_size), mesh_dim_names=["dp", "tp"]
+ "cuda",
+ (world_size // tp_size, tp_size),
+ mesh_dim_names=["dp", "tp"]
)
sequence_parallelize_plan = {
@@ -187,6 +204,7 @@ class TestFakePG(TestCase):
"net2": RowwiseParallel(),
}
for parallel_plan in [sequence_parallelize_plan, pairwise_parallelize_plan]:
+
my_module = parallelize_module(
MLPModule(device="cuda"),
device_mesh["tp"],
@@ -194,7 +212,9 @@ class TestFakePG(TestCase):
)
sharded_module = FSDP(
- my_module, use_orig_params=True, device_mesh=device_mesh["dp"]
+ my_module,
+ use_orig_params=True,
+ device_mesh=device_mesh["dp"]
)
optim = torch.optim.Adam(sharded_module.parameters(), lr=0.0001)
diff --git a/test/distributed/test_functional_api.py b/test/distributed/test_functional_api.py
index 90f750d400..491da6551f 100644
--- a/test/distributed/test_functional_api.py
+++ b/test/distributed/test_functional_api.py
@@ -12,10 +12,10 @@ import torch.distributed._tensor as dt
import torch.distributed.distributed_c10d as c10d
from functorch import make_fx
-from torch._inductor.utils import run_and_get_code
from torch.testing import FileCheck
from torch.testing._internal.distributed.fake_pg import FakeStore
from torch.utils._triton import has_triton
+from torch._inductor.utils import run_and_get_code
if not dist.is_available():
print("Distributed not available, skipping tests", file=sys.stderr)
@@ -24,8 +24,8 @@ if not dist.is_available():
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
MultiThreadedTestCase,
- requires_nccl,
TEST_SKIPS,
+ requires_nccl,
)
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
@@ -401,6 +401,7 @@ class TestMakeFx(MultiThreadedTestCase):
self.assertFalse(torch.fx._symbolic_trace.is_fx_tracing())
def test_all_reduce_tracing(self):
+
def allred(input):
return ft_c.all_reduce(input, "sum", group=dist.group.WORLD) + 1
@@ -584,6 +585,7 @@ class TestCollectivesWithNCCL(MultiProcessTestCase):
class TestNCCLCollectivesWithWorldSize4(TestCollectivesWithNCCL):
+
@property
def world_size(self):
return 4
@@ -606,17 +608,23 @@ class TestNCCLCollectivesWithWorldSize4(TestCollectivesWithNCCL):
# rank0: [0., 1.], rank1: [2., 3.]
send_tensor = torch.arange(2, dtype=torch.float32, device=device) + 2 * rank
- recvd_tensor = ft_c.permute_tensor(send_tensor, [1, 0], group=mesh)
+ recvd_tensor = ft_c.permute_tensor(
+ send_tensor,
+ [1, 0],
+ group=mesh
+ )
# rank0: [2., 3.], rank1: [0., 1.]
- expected = torch.arange(2, dtype=torch.float32, device=device) + 2 * (
- (rank - 1 + 2) % 2
- )
+ expected = torch.arange(
+ 2,
+ dtype=torch.float32,
+ device=device
+ ) + 2 * ((rank - 1 + 2) % 2)
self.assertEqual(
recvd_tensor,
expected,
msg=f"Expected {expected} on {self.rank=} (local_rank={rank}), "
- f"but received {recvd_tensor} instead.",
+ f"but received {recvd_tensor} instead."
)
@@ -640,7 +648,12 @@ class TestFunctionalAutograd(MultiThreadedTestCase):
sizes = [1] * world_size
t = t * 10
assert t.requires_grad
- out = ft_c.all_to_all_single_autograd(t, sizes, sizes, group)
+ out = ft_c.all_to_all_single_autograd(
+ t,
+ sizes,
+ sizes,
+ group
+ )
out = out + 2
return out
@@ -665,7 +678,12 @@ class TestFunctionalAutograd(MultiThreadedTestCase):
sizes = [1] * world_size
t = t * 10
assert t.requires_grad
- out = ft_c.all_to_all_single_autograd(t, sizes, sizes, group)
+ out = ft_c.all_to_all_single_autograd(
+ t,
+ sizes,
+ sizes,
+ group
+ )
out = out + 2
return out.sum()
@@ -679,9 +697,9 @@ class TestFunctionalAutograd(MultiThreadedTestCase):
for code in codes:
FileCheck().check_count(
"_c10d_functional.all_to_all_single.default", 1, exactly=True
- ).check_count("_c10d_functional.wait_tensor.default", 1, exactly=True).run(
- code
- )
+ ).check_count(
+ "_c10d_functional.wait_tensor.default", 1, exactly=True
+ ).run(code)
self.assertIsNotNone(t.grad)
diff --git a/test/distributed/test_inductor_collectives.py b/test/distributed/test_inductor_collectives.py
index 84db55d055..5662ae964c 100644
--- a/test/distributed/test_inductor_collectives.py
+++ b/test/distributed/test_inductor_collectives.py
@@ -2,25 +2,20 @@
import functools
import unittest
from unittest.mock import patch
-
import torch
-import torch._dynamo
-import torch._dynamo.logging
-import torch._dynamo.test_case
-
+from torch._C import FileCheck
# for some reason importing functional collectives after dynamo breaks collectives handling!
import torch.distributed._functional_collectives as _functional_collectives
-from torch._C import FileCheck
-from torch._dynamo.testing import CompileCounter
+import torch._dynamo
+import torch._dynamo.test_case
from torch._dynamo.utils import same
-from torch._inductor.compile_fx import compile_fx as inductor_compile_fx
-from torch._inductor.utils import run_and_get_triton_code
+from torch._dynamo.testing import CompileCounter
from torch.distributed.distributed_c10d import GroupMember
from torch.fx.experimental.proxy_tensor import make_fx
from torch.testing._internal.common_distributed import (
- _dynamo_dist_per_rank_init,
- DynamoDistributedMultiProcTestCase,
DynamoDistributedSingleProcTestCase,
+ DynamoDistributedMultiProcTestCase,
+ _dynamo_dist_per_rank_init,
requires_nccl,
skip_if_lt_x_gpu,
)
@@ -29,8 +24,10 @@ from torch.testing._internal.common_utils import (
parametrize,
requires_cuda,
)
+from torch._inductor.compile_fx import compile_fx as inductor_compile_fx
from torch.utils._triton import has_triton
-
+from torch._inductor.utils import run_and_get_triton_code
+import torch._dynamo.logging
def _tolist_with_constrain_as_size(tensor):
lst = tensor.tolist()
@@ -38,13 +35,11 @@ def _tolist_with_constrain_as_size(tensor):
torch._constrain_as_size(elem)
return lst
-
@requires_nccl()
class TestCollectivesMultiProc(DynamoDistributedMultiProcTestCase):
"""
Run correctness checks in multi-proc runner, mark with minimum # GPUs to run under
"""
-
def get_world_trs(self):
return {
"tag": "",
@@ -68,9 +63,7 @@ class TestCollectivesMultiProc(DynamoDistributedMultiProcTestCase):
"""
def example(tensor, src, *, tag, ranks, group_size):
- res = torch.ops.c10d_functional.broadcast(
- tensor, src, tag, ranks, group_size
- )
+ res = torch.ops.c10d_functional.broadcast(tensor, src, tag, ranks, group_size)
res = torch.ops.c10d_functional.wait_tensor(res)
return res
@@ -79,12 +72,16 @@ class TestCollectivesMultiProc(DynamoDistributedMultiProcTestCase):
return inductor_compile_fx(graph, example_inputs)
with _dynamo_dist_per_rank_init(self.rank, self.world_size):
+
example = functools.partial(
example,
**self.get_world_trs(),
)
t = torch.randn(4, 4, device="cuda")
- inputs = (t if self.rank == 0 else torch.zeros(4, 4, device="cuda"), 0)
+ inputs = (
+ t if self.rank == 0 else torch.zeros(4, 4, device="cuda"),
+ 0
+ )
eager_out = example(*inputs)
self.assertTrue(same(t, eager_out))
@@ -109,13 +106,14 @@ class TestCollectivesMultiProc(DynamoDistributedMultiProcTestCase):
g = torch.matmul(e, f)
ar = torch.ops.c10d_functional.wait_tensor(ar)
out = torch.add(ar, g.repeat(2, 1))
- return (out,)
+ return (out, )
def compile(func, example_inputs):
graph = make_fx(func)(*example_inputs)
return inductor_compile_fx(graph, example_inputs)
with _dynamo_dist_per_rank_init(self.rank, self.world_size):
+
matmul_cat_col = functools.partial(
matmul_cat_col,
**self.get_world_trs(),
@@ -138,6 +136,7 @@ class TestCollectivesMultiProc(DynamoDistributedMultiProcTestCase):
# TODO: somehow inductor bg compile threads are causing hangs at exit with distributed work dtor
@patch.object(torch._inductor.config, "compile_threads", 1)
def test_eager_allreduce_inductor_wait(self):
+
def eager_func(a, b, c, d, *, tag, ranks, group_size):
x = torch.matmul(a, b)
y = torch.matmul(c, d)
@@ -149,13 +148,14 @@ class TestCollectivesMultiProc(DynamoDistributedMultiProcTestCase):
g = torch.matmul(e, f)
ar = torch.ops.c10d_functional.wait_tensor(ar)
out = torch.add(ar, g.repeat(2, 1))
- return (out,)
+ return (out, )
def compile(func, example_inputs):
graph = make_fx(func)(*example_inputs)
return inductor_compile_fx(graph, example_inputs)
with _dynamo_dist_per_rank_init(self.rank, self.world_size):
+
eager_func = functools.partial(
eager_func,
**self.get_world_trs(),
@@ -164,12 +164,8 @@ class TestCollectivesMultiProc(DynamoDistributedMultiProcTestCase):
inductor_inputs = (torch.ones(4, 4, device="cuda") + self.rank,) * 2
eager_out = inductor_func(eager_func(*eager_inputs), *inductor_inputs)
- compiled_inductor_func = compile(
- inductor_func, [eager_func(*eager_inputs)] + list(inductor_inputs)
- )
- inductor_out = compiled_inductor_func(
- eager_func(*eager_inputs), *inductor_inputs
- )
+ compiled_inductor_func = compile(inductor_func, [eager_func(*eager_inputs)] + list(inductor_inputs))
+ inductor_out = compiled_inductor_func(eager_func(*eager_inputs), *inductor_inputs)
print(f"eager_out, {eager_out}")
print(f"inductor_out, {inductor_out}")
self.assertTrue(same(eager_out, inductor_out, tol=0.001))
@@ -179,6 +175,7 @@ class TestCollectivesMultiProc(DynamoDistributedMultiProcTestCase):
# TODO: somehow inductor bg compile threads are causing hangs at exit with distributed work dtor
@patch.object(torch._inductor.config, "compile_threads", 1)
def test_inductor_allreduce_eager_wait(self):
+
def inductor_func(a, b, c, d, *, tag, ranks, group_size):
x = torch.matmul(a, b)
y = torch.matmul(c, d)
@@ -190,13 +187,14 @@ class TestCollectivesMultiProc(DynamoDistributedMultiProcTestCase):
g = torch.matmul(e, f)
ar = torch.ops.c10d_functional.wait_tensor(ar)
out = torch.add(ar, g.repeat(2, 1))
- return (out,)
+ return (out, )
def compile(func, example_inputs):
graph = make_fx(func)(*example_inputs)
return inductor_compile_fx(graph, example_inputs)
with _dynamo_dist_per_rank_init(self.rank, self.world_size):
+
inductor_func = functools.partial(
inductor_func,
**self.get_world_trs(),
@@ -206,9 +204,7 @@ class TestCollectivesMultiProc(DynamoDistributedMultiProcTestCase):
eager_out = eager_func(inductor_func(*inductor_inputs), *eager_inputs)
compiled_inductor_func = compile(inductor_func, inductor_inputs)
- inductor_out = eager_func(
- compiled_inductor_func(*inductor_inputs), *eager_inputs
- )
+ inductor_out = eager_func(compiled_inductor_func(*inductor_inputs), *eager_inputs)
self.assertTrue(same(eager_out, inductor_out, tol=0.001))
@unittest.skipIf(not has_triton(), "Inductor+gpu needs triton and recent GPU arch")
@@ -237,9 +233,7 @@ class TestCollectivesMultiProc(DynamoDistributedMultiProcTestCase):
@patch.object(torch._inductor.config, "compile_threads", 1)
def test_permute_tensor(self):
def func(tensor, src_dst_pairs, *, tag, ranks, group_size):
- return _functional_collectives.permute_tensor(
- tensor, src_dst_pairs, ranks, tag
- )
+ return _functional_collectives.permute_tensor(tensor, src_dst_pairs, ranks, tag)
with _dynamo_dist_per_rank_init(self.rank, self.world_size):
inputs = (
@@ -253,9 +247,11 @@ class TestCollectivesMultiProc(DynamoDistributedMultiProcTestCase):
self.assertTrue(same(out, correct))
# rank0: [2., 3.], rank1: [0., 1.]
- expected = torch.arange(2, dtype=torch.float32, device="cuda") + 2 * (
- (self.rank - 1 + self.world_size) % self.world_size
- )
+ expected = torch.arange(
+ 2,
+ dtype=torch.float32,
+ device="cuda"
+ ) + 2 * ((self.rank - 1 + self.world_size) % self.world_size)
self.assertEqual(out, expected)
self.assertEqual(correct, expected)
@@ -322,17 +318,16 @@ class TestCollectivesMultiProc(DynamoDistributedMultiProcTestCase):
def example(a, b, *, tag, ranks, group_size):
c = torch.matmul(a, b)
- ag = torch.ops.c10d_functional.all_gather_into_tensor(
- c, tag, ranks, group_size
- )
+ ag = torch.ops.c10d_functional.all_gather_into_tensor(c, tag, ranks, group_size)
ag = torch.ops.c10d_functional.wait_tensor(ag)
- return (ag,)
+ return (ag, )
def compile(func, example_inputs):
graph = make_fx(func)(*example_inputs)
return inductor_compile_fx(graph, example_inputs)
with _dynamo_dist_per_rank_init(self.rank, self.world_size):
+
example = functools.partial(
example,
**self.get_world_trs(),
@@ -379,19 +374,9 @@ class TestCollectivesMultiProc(DynamoDistributedMultiProcTestCase):
# TODO: somehow inductor bg compile threads are causing hangs at exit with distributed work dtor
@patch.object(torch._inductor.config, "compile_threads", 1)
def test_all_to_all_single_inductor(self):
- def example(
- inp,
- input_split_sizes_tensor,
- output_split_sizes_tensor,
- *,
- tag,
- ranks,
- group_size,
- ):
+ def example(inp, input_split_sizes_tensor, output_split_sizes_tensor, *, tag, ranks, group_size):
input_split_sizes = _tolist_with_constrain_as_size(input_split_sizes_tensor)
- output_split_sizes = _tolist_with_constrain_as_size(
- output_split_sizes_tensor
- )
+ output_split_sizes = _tolist_with_constrain_as_size(output_split_sizes_tensor)
a2a = torch.ops.c10d_functional.all_to_all_single(
inp,
output_split_sizes,
@@ -404,22 +389,14 @@ class TestCollectivesMultiProc(DynamoDistributedMultiProcTestCase):
out = a2a / a2a.sum(dim=0)
return out
- with _dynamo_dist_per_rank_init(
- self.rank, self.world_size
- ), torch._dynamo.config.patch(
+ with _dynamo_dist_per_rank_init(self.rank, self.world_size), torch._dynamo.config.patch(
dynamic_shapes=True,
capture_dynamic_output_shape_ops=True,
capture_scalar_outputs=True,
):
row = self.world_size * (self.rank + 1) * (self.world_size + 1) / 2
- input_split_sizes_tensor = torch.tensor(
- [(i + 1) * (self.rank + 1) for i in range(self.world_size)],
- dtype=torch.int64,
- )
- output_split_sizes_tensor = torch.tensor(
- [(i + 1) * (self.rank + 1) for i in range(self.world_size)],
- dtype=torch.int64,
- )
+ input_split_sizes_tensor = torch.tensor([(i + 1) * (self.rank + 1) for i in range(self.world_size)], dtype=torch.int64)
+ output_split_sizes_tensor = torch.tensor([(i + 1) * (self.rank + 1) for i in range(self.world_size)], dtype=torch.int64)
inputs = (
torch.ones(int(row), 5, device="cuda") * (self.rank + 1),
input_split_sizes_tensor,
@@ -430,9 +407,9 @@ class TestCollectivesMultiProc(DynamoDistributedMultiProcTestCase):
compiled_fn = torch.compile(example, fullgraph=True, dynamic=True)
code = run_and_get_triton_code(compiled_fn, *inputs, **trs)
- FileCheck().check_regex(
- "all_to_all_single\\(buf\\d+\\[0\\], buf\\d+_inputs\\[0\\], output_split_sizes=\\[u\\d+, u\\d+\\], input_split_sizes=\\[u\\d+, u\\d+\\]" # noqa: B950
- ).run(code)
+ FileCheck() \
+ .check_regex("all_to_all_single\\(buf\\d+\\[0\\], buf\\d+_inputs\\[0\\], output_split_sizes=\\[u\\d+, u\\d+\\], input_split_sizes=\\[u\\d+, u\\d+\\]") \
+ .run(code) # noqa: B950
eager_out = example(*inputs, **trs)
inductor_out = compiled_fn(*inputs, **trs)
@@ -459,21 +436,18 @@ class TestCollectivesMultiProc(DynamoDistributedMultiProcTestCase):
return out
with _dynamo_dist_per_rank_init(self.rank, self.world_size):
- input_split_sizes_tensor = torch.tensor(
- [1] * self.world_size, dtype=torch.int64
- )
+ input_split_sizes_tensor = torch.tensor([1] * self.world_size, dtype=torch.int64)
inputs = (
- torch.ones(self.world_size, self.world_size, device="cuda")
- * (self.rank + 1),
+ torch.ones(self.world_size, self.world_size, device="cuda") * (self.rank + 1),
input_split_sizes_tensor,
)
trs = self.get_world_trs()
compiled_fn = torch.compile(example, fullgraph=True, dynamic=True)
code = run_and_get_triton_code(compiled_fn, *inputs, **trs)
- FileCheck().check_regex(
- "all_to_all_single\\(buf\\d+\\[0\\], buf\\d+_inputs\\[0\\], output_split_sizes=None, input_split_sizes=\\[u\\d+, u\\d+\\]" # noqa: B950
- ).run(code)
+ FileCheck() \
+ .check_regex("all_to_all_single\\(buf\\d+\\[0\\], buf\\d+_inputs\\[0\\], output_split_sizes=None, input_split_sizes=\\[u\\d+, u\\d+\\]") \
+ .run(code) # noqa: B950
eager_out = example(*inputs, **trs)
inductor_out = compiled_fn(*inputs, **trs)
@@ -486,9 +460,7 @@ class TestCollectivesMultiProc(DynamoDistributedMultiProcTestCase):
@patch.object(torch._inductor.config, "compile_threads", 1)
def test_all_to_all_single_inductor_input_split_sizes_none(self):
def example(inp, output_split_sizes_tensor, *, tag, ranks, group_size):
- output_split_sizes = _tolist_with_constrain_as_size(
- output_split_sizes_tensor
- )
+ output_split_sizes = _tolist_with_constrain_as_size(output_split_sizes_tensor)
a2a = torch.ops.c10d_functional.all_to_all_single(
inp,
output_split_sizes,
@@ -501,28 +473,23 @@ class TestCollectivesMultiProc(DynamoDistributedMultiProcTestCase):
out = a2a / a2a.sum(dim=0)
return out
- with _dynamo_dist_per_rank_init(
- self.rank, self.world_size
- ), torch._dynamo.config.patch(
+ with _dynamo_dist_per_rank_init(self.rank, self.world_size), torch._dynamo.config.patch(
dynamic_shapes=True,
capture_dynamic_output_shape_ops=True,
capture_scalar_outputs=True,
):
- output_split_sizes_tensor = torch.tensor(
- [1] * self.world_size, dtype=torch.int64
- )
+ output_split_sizes_tensor = torch.tensor([1] * self.world_size, dtype=torch.int64)
inputs = (
- torch.ones(self.world_size, self.world_size, device="cuda")
- * (self.rank + 1),
+ torch.ones(self.world_size, self.world_size, device="cuda") * (self.rank + 1),
output_split_sizes_tensor,
)
trs = self.get_world_trs()
compiled_fn = torch.compile(example, fullgraph=True, dynamic=True)
code = run_and_get_triton_code(compiled_fn, *inputs, **trs)
- FileCheck().check_regex(
- "all_to_all_single\\(buf\\d+\\[0\\], buf\\d+_inputs\\[0\\], output_split_sizes=\\[u\\d+, u\\d+\\], input_split_sizes=None" # noqa: B950
- ).run(code)
+ FileCheck() \
+ .check_regex("all_to_all_single\\(buf\\d+\\[0\\], buf\\d+_inputs\\[0\\], output_split_sizes=\\[u\\d+, u\\d+\\], input_split_sizes=None") \
+ .run(code) # noqa: B950
eager_out = example(*inputs, **trs)
inductor_out = compiled_fn(*inputs, **trs)
@@ -547,19 +514,14 @@ class TestCollectivesMultiProc(DynamoDistributedMultiProcTestCase):
return out
with _dynamo_dist_per_rank_init(self.rank, self.world_size):
- inputs = (
- torch.ones(self.world_size, self.world_size, device="cuda")
- * (self.rank + 1),
- )
+ inputs = (torch.ones(self.world_size, self.world_size, device="cuda") * (self.rank + 1),)
trs = self.get_world_trs()
compiled_fn = torch.compile(example, fullgraph=True, dynamic=True)
code = run_and_get_triton_code(compiled_fn, *inputs, **trs)
- FileCheck().check_regex(
- "all_to_all_single\\(buf\\d+\\[0\\], buf\\d+_inputs\\[0\\], output_split_sizes=None, input_split_sizes=None"
- ).run(
- code
- ) # noqa: B950
+ FileCheck() \
+ .check_regex("all_to_all_single\\(buf\\d+\\[0\\], buf\\d+_inputs\\[0\\], output_split_sizes=None, input_split_sizes=None") \
+ .run(code) # noqa: B950
eager_out = example(*inputs, **trs)
inductor_out = compiled_fn(*inputs, **trs)
@@ -573,7 +535,6 @@ class TestCollectivesInductor(DynamoDistributedSingleProcTestCase):
"""
Prefer single-proc test runner for basic tests as it is easier to work with.
"""
-
def get_world_trs(self, world_size=1):
return {
"tag": "",
@@ -584,10 +545,9 @@ class TestCollectivesInductor(DynamoDistributedSingleProcTestCase):
@unittest.skipIf(not has_triton(), "Inductor+gpu needs triton and recent GPU arch")
@torch._inductor.config.patch(debug=True)
def test_inductor_single_op(self):
+
def func(inp, *, tag, ranks, group_size):
- ar = torch.ops.c10d_functional.all_reduce(
- inp, "sum", tag, ranks, group_size
- )
+ ar = torch.ops.c10d_functional.all_reduce(inp, "sum", tag, ranks, group_size)
ar = torch.ops.c10d_functional.wait_tensor(ar)
return ar
@@ -598,17 +558,15 @@ class TestCollectivesInductor(DynamoDistributedSingleProcTestCase):
code = run_and_get_triton_code(compiled, inputs, **self.get_world_trs())
# NOTE: Make sure we are not unneccessarily copying the outputs of
# wait_tensors before they are returned from the graph.
- FileCheck().check("buf0 = empty").check("buf0.copy_(arg0_1)").check(
- "buf1 = buf0"
- ).check("buf1_work = dist.all_reduce(buf1").check(
- "fun_col_impl._register_tensor_work(buf1, buf1_work)"
- ).check(
- "buf0 = _wait_tensor(buf0)"
- ).check(
- "return (buf0, )"
- ).run(
- code
- )
+ FileCheck() \
+ .check("buf0 = empty") \
+ .check("buf0.copy_(arg0_1)") \
+ .check("buf1 = buf0") \
+ .check("buf1_work = dist.all_reduce(buf1") \
+ .check("fun_col_impl._register_tensor_work(buf1, buf1_work)") \
+ .check("buf0 = _wait_tensor(buf0)") \
+ .check("return (buf0, )") \
+ .run(code)
correct = func(inputs, **self.get_world_trs())
self.assertTrue(same(out, correct))
@@ -634,21 +592,17 @@ class TestCollectivesInductor(DynamoDistributedSingleProcTestCase):
code = run_and_get_triton_code(compiled, inputs, **self.get_world_trs())
# NOTE: Make sure we are not unneccessarily copying the outputs of
# wait_tensors before they are returned from the graph.
- FileCheck().check("buf1 = buf0; del buf0 # reuse").check_not(
- "buf1.copy_("
- ).check("buf2 = buf1").check("buf2_work = dist.all_reduce(buf2").check(
- "fun_col_impl._register_tensor_work(buf2, buf2_work)"
- ).check(
- "buf1 = _wait_tensor(buf1)"
- ).check(
- "buf4 = buf1"
- ).check(
- "buf5 = empty"
- ).check(
- "return (buf1, buf5"
- ).run(
- code
- )
+ FileCheck() \
+ .check("buf1 = buf0; del buf0 # reuse") \
+ .check_not("buf1.copy_(") \
+ .check("buf2 = buf1") \
+ .check("buf2_work = dist.all_reduce(buf2") \
+ .check("fun_col_impl._register_tensor_work(buf2, buf2_work)") \
+ .check("buf1 = _wait_tensor(buf1)") \
+ .check("buf4 = buf1") \
+ .check("buf5 = empty") \
+ .check("return (buf1, buf5") \
+ .run(code)
out = compiled(inputs, **self.get_world_trs())
correct = func(inputs, **self.get_world_trs())
self.assertTrue(same(out, correct))
@@ -675,28 +629,25 @@ class TestCollectivesInductor(DynamoDistributedSingleProcTestCase):
code = run_and_get_triton_code(compiled, inputs, **self.get_world_trs())
# NOTE: Make sure we are not unneccessarily copying the outputs of
# wait_tensors before they are returned from the graph.
- FileCheck().check("buf0 = empty").check("buf5 = empty").check(
- "triton_poi__0.run(arg0_1, buf0, buf5"
- ).check_not("copy_(").check("buf1 = buf0; del buf0 # reuse").check(
- "buf2 = buf1"
- ).check(
- "buf2_work = dist.all_reduce(buf2"
- ).check(
- "fun_col_impl._register_tensor_work(buf2, buf2_work)"
- ).check(
- "buf1 = _wait_tensor(buf1)"
- ).check(
- "buf4 = buf1"
- ).check(
- "return (buf1, buf5, buf6"
- ).run(
- code
- )
+ FileCheck() \
+ .check("buf0 = empty") \
+ .check("buf5 = empty") \
+ .check("triton_poi__0.run(arg0_1, buf0, buf5") \
+ .check_not("copy_(") \
+ .check("buf1 = buf0; del buf0 # reuse") \
+ .check("buf2 = buf1") \
+ .check("buf2_work = dist.all_reduce(buf2") \
+ .check("fun_col_impl._register_tensor_work(buf2, buf2_work)") \
+ .check("buf1 = _wait_tensor(buf1)") \
+ .check("buf4 = buf1") \
+ .check("return (buf1, buf5, buf6") \
+ .run(code)
out = compiled(inputs, **self.get_world_trs())
correct = func(inputs, **self.get_world_trs())
self.assertTrue(same(out, correct))
def test_dynamo_trace_allreduce(self):
+
def func(inp):
ar = _functional_collectives.all_reduce(inp, "sum", "0")
return ar
@@ -713,6 +664,7 @@ class TestCollectivesInductor(DynamoDistributedSingleProcTestCase):
self.assertTrue(same(out, correct))
def test_dynamo_trace_all_gather_tensor(self):
+
def func(inp):
ar = _functional_collectives.all_gather_tensor(inp, 0, "0")
return ar
@@ -729,6 +681,7 @@ class TestCollectivesInductor(DynamoDistributedSingleProcTestCase):
self.assertTrue(same(out, correct))
def test_dynamo_trace_all_gather_tensor_pg(self):
+
def func(inp, *, pg):
ar = _functional_collectives.all_gather_tensor(inp, 0, pg)
return ar
@@ -745,13 +698,13 @@ class TestCollectivesInductor(DynamoDistributedSingleProcTestCase):
self.assertTrue(same(out, correct))
def test_dynamo_rewrite_dist_all_gather(self):
+
def func(inp, out, *, pg):
torch.distributed.all_gather_into_tensor(
out,
inp,
pg,
)
-
local_size = [4, 4]
# single-proc test
global_size = local_size
@@ -770,13 +723,13 @@ class TestCollectivesInductor(DynamoDistributedSingleProcTestCase):
assert same(outputs, correct_outputs)
def test_dynamo_rewrite_dist_all_gather_list(self):
+
def func(inp, out, *, pg):
torch.distributed.all_gather(
out,
inp,
pg,
)
-
local_size = [4, 4]
# single-proc test
global_size = local_size
@@ -801,7 +754,6 @@ class TestCollectivesInductor(DynamoDistributedSingleProcTestCase):
group=pg,
async_op=False,
)
-
local_size = [4, 4]
# single-proc test
global_size = local_size
@@ -820,13 +772,13 @@ class TestCollectivesInductor(DynamoDistributedSingleProcTestCase):
assert same(outputs, correct_outputs)
def test_dynamo_rewrite_dist_reduce_scatter(self):
+
def func(inp, out, *, pg):
torch.distributed.reduce_scatter_tensor(
out,
inp,
group=pg,
)
-
local_size = [4, 4]
# single-proc test
global_size = local_size
@@ -852,9 +804,10 @@ class TestCollectivesInductor(DynamoDistributedSingleProcTestCase):
"kwargs",
"kwargs_none",
"unspecified",
- ],
+ ]
)
def test_dynamo_rewrite_dist_allreduce(self, pg_mode):
+
def func(tensor, *args, **kwargs):
torch.distributed.all_reduce(
tensor,
@@ -893,8 +846,13 @@ class TestCollectivesInductor(DynamoDistributedSingleProcTestCase):
assert same(inputs_compiled, inputs_eager)
def test_dynamo_rewrite_dist_all_to_all_single(self):
+
def func(output, input, pg):
- torch.distributed.all_to_all_single(output, input, group=pg)
+ torch.distributed.all_to_all_single(
+ output,
+ input,
+ group=pg
+ )
counter = CompileCounter()
compiled = torch.compile(func, backend=counter, fullgraph=True)
@@ -918,7 +876,7 @@ class TestCollectivesInductor(DynamoDistributedSingleProcTestCase):
torch.distributed.ReduceOp.PRODUCT,
torch.distributed.ReduceOp.MIN,
torch.distributed.ReduceOp.MAX,
- ],
+ ]
)
def test_dynamo_rewrite_dist_allreduce_reduce_op(self, reduce_op):
from torch.distributed._functional_collectives import REDUCE_OP_TO_STR
@@ -927,9 +885,8 @@ class TestCollectivesInductor(DynamoDistributedSingleProcTestCase):
ar_nodes = []
for node in gm.graph.nodes:
if node.target in [
- torch.ops.c10d_functional.all_reduce,
- torch.ops._c10d_functional.all_reduce,
- ]:
+ torch.ops.c10d_functional.all_reduce,
+ torch.ops._c10d_functional.all_reduce]:
ar_nodes.append(node)
self.assertEqual(len(ar_nodes), 1)
reduce_op_str = ar_nodes[0].args[1]
@@ -949,14 +906,14 @@ class TestCollectivesInductor(DynamoDistributedSingleProcTestCase):
compiled(*inputs)
@parametrize(
- "source",
- [
+ "source", [
"GroupMember.WORLD",
"group.WORLD",
"_get_default_group",
- ],
+ ]
)
def test_dynamo_get_world_group(self, source):
+
def func(tensor):
if source == "GroupMember.WORLD":
group = torch.distributed.GroupMember.WORLD
@@ -975,9 +932,8 @@ class TestCollectivesInductor(DynamoDistributedSingleProcTestCase):
ar_nodes = []
for node in gm.graph.nodes:
if node.target in [
- torch.ops.c10d_functional.all_reduce,
- torch.ops._c10d_functional.all_reduce,
- ]:
+ torch.ops.c10d_functional.all_reduce,
+ torch.ops._c10d_functional.all_reduce]:
ar_nodes.append(node)
self.assertEqual(len(ar_nodes), 1)
return gm
@@ -986,12 +942,18 @@ class TestCollectivesInductor(DynamoDistributedSingleProcTestCase):
input = torch.ones(2, device=self.device)
compiled(input)
+
def test_dynamo_support_collective_op_with_async_op_False(self):
+
def func(inp, out, *, pg):
# user explicitly set the attribute `async_op` to False,
# there should be no graph break
- torch.distributed.reduce_scatter_tensor(out, inp, group=pg, async_op=False)
-
+ torch.distributed.reduce_scatter_tensor(
+ out,
+ inp,
+ group=pg,
+ async_op=False
+ )
local_size = [4, 4]
# single-proc test
global_size = local_size
@@ -1008,12 +970,15 @@ class TestCollectivesInductor(DynamoDistributedSingleProcTestCase):
assert same(outputs, correct_outputs)
def test_dynamo_graphbreaks_unsupported_async_op(self):
+
def func(inp, out, *, pg):
work = torch.distributed.reduce_scatter_tensor(
- out, inp, group=pg, async_op=True
+ out,
+ inp,
+ group=pg,
+ async_op=True
)
work.wait()
-
local_size = [4, 4]
# single-proc test
global_size = local_size
@@ -1046,6 +1011,7 @@ class TestCollectivesInductor(DynamoDistributedSingleProcTestCase):
assert same(outputs, correct_outputs)
def test_dynamo_trace_reduce_scatter_tensor(self):
+
def func(inp):
ar = _functional_collectives.reduce_scatter_tensor(inp, "sum", 0, "0")
return ar
@@ -1063,9 +1029,7 @@ class TestCollectivesInductor(DynamoDistributedSingleProcTestCase):
def test_dynamo_trace_allgather_coalesced(self):
def func(inp, *, tag, ranks, group_size):
- ar = torch.ops.c10d_functional.all_gather_into_tensor_coalesced(
- inp, tag, ranks, group_size
- )
+ ar = torch.ops.c10d_functional.all_gather_into_tensor_coalesced(inp, tag, ranks, group_size)
return ar
inputs = [torch.ones(4, 4, device="cuda"), torch.ones(6, 6, device="cuda")]
@@ -1077,26 +1041,21 @@ class TestCollectivesInductor(DynamoDistributedSingleProcTestCase):
assert counter.op_count == 3 # It generates 2 getattr to unpack the array
assert same(out, correct)
+
def test_backwards(self):
"""
It's probably not that common to need backwards support for collectives.
However, I wanted to at least see if it was possible to support it as a design goal.
"""
-
def func(inp):
ar = _functional_collectives.all_reduce(inp, "sum", "0")
return ar
input = torch.ones(4, 4, device="cuda", requires_grad=True)
# TODO implement backwards
- with self.assertRaisesRegex(
- RuntimeError,
- "element 0 of tensors does not require grad and does not have a grad_fn",
- ):
- compiled = torch.compile(
- func, backend="aot_eager"
- ) # inductor bug with single-op allreduce graph
+ with self.assertRaisesRegex(RuntimeError, "element 0 of tensors does not require grad and does not have a grad_fn"):
+ compiled = torch.compile(func, backend="aot_eager") # inductor bug with single-op allreduce graph
out = compiled(input)
out.sum().backward()
@@ -1120,9 +1079,7 @@ class TestCollectivesInductor(DynamoDistributedSingleProcTestCase):
def func(inp, *, tag, ranks, group_size):
x = inp + 1
- tensor_list = torch.ops.c10d_functional.all_gather_into_tensor_coalesced(
- [x, inp], tag, ranks, group_size
- )
+ tensor_list = torch.ops.c10d_functional.all_gather_into_tensor_coalesced([x, inp], tag, ranks, group_size)
y = x + 2
ar0 = torch.ops.c10d_functional.wait_tensor(tensor_list[0])
ar1 = torch.ops.c10d_functional.wait_tensor(tensor_list[1])
@@ -1136,32 +1093,25 @@ class TestCollectivesInductor(DynamoDistributedSingleProcTestCase):
code = run_and_get_triton_code(compiled, inputs, **self.get_world_trs())
# NOTE: Make sure we are not unneccessarily copying the outputs of
# wait_tensors before they are returned from the graph.
- FileCheck().check("buf0 = empty").check("buf5 = empty").check(
- "triton_poi__0.run(arg0_1, buf0, buf5"
- ).check("buf1 = empty").check("buf2 = empty").check_not("copy_(").check(
- "buf3_inputs = [buf0,arg0_1]"
- ).check(
- "buf3 = [buf1,buf2]"
- ).check(
- "buf3_work = fun_col_impl._all_gather_into_tensor_coalesced_fallback("
- "output_tensors=buf3, input_tensors=buf3_inputs"
- ).check(
- "fun_col_impl._register_tensor_work(buf3, buf3_work)"
- ).check(
- "buf1 = _wait_tensor(buf1)"
- ).check(
- "buf4 = buf1"
- ).check(
- "buf6 = buf0; del buf0 # reuse"
- ).check(
- "buf2 = _wait_tensor(buf2)"
- ).check(
- "buf7 = buf2"
- ).check(
- "return (buf1, buf5, buf6, buf2"
- ).run(
- code
- )
+ FileCheck() \
+ .check("buf0 = empty") \
+ .check("buf5 = empty") \
+ .check("triton_poi__0.run(arg0_1, buf0, buf5") \
+ .check("buf1 = empty") \
+ .check("buf2 = empty") \
+ .check_not("copy_(") \
+ .check("buf3_inputs = [buf0,arg0_1]") \
+ .check("buf3 = [buf1,buf2]") \
+ .check("buf3_work = fun_col_impl._all_gather_into_tensor_coalesced_fallback("
+ "output_tensors=buf3, input_tensors=buf3_inputs") \
+ .check("fun_col_impl._register_tensor_work(buf3, buf3_work)") \
+ .check("buf1 = _wait_tensor(buf1)") \
+ .check("buf4 = buf1") \
+ .check("buf6 = buf0; del buf0 # reuse") \
+ .check("buf2 = _wait_tensor(buf2)") \
+ .check("buf7 = buf2") \
+ .check("return (buf1, buf5, buf6, buf2") \
+ .run(code)
out = compiled(inputs, **self.get_world_trs())
correct = func(inputs, **self.get_world_trs())
assert same(out, correct), f"{out} va {correct}"
@@ -1175,9 +1125,7 @@ class TestCollectivesInductor(DynamoDistributedSingleProcTestCase):
def func(inp, *, tag, ranks, group_size):
x = inp + 1
- tensor_list = torch.ops.c10d_functional.reduce_scatter_tensor_coalesced(
- [x, inp], "sum", tag, ranks, group_size
- )
+ tensor_list = torch.ops.c10d_functional.reduce_scatter_tensor_coalesced([x, inp], "sum", tag, ranks, group_size)
y = x + 2
ar0 = torch.ops.c10d_functional.wait_tensor(tensor_list[0])
ar1 = torch.ops.c10d_functional.wait_tensor(tensor_list[1])
@@ -1191,30 +1139,24 @@ class TestCollectivesInductor(DynamoDistributedSingleProcTestCase):
code = run_and_get_triton_code(compiled, inputs, **self.get_world_trs())
# NOTE: The first return value should be the output of the first wait_tensor.
# We want to make sure no unneccessary copy is made.
- FileCheck().check("buf0 = empty").check("buf5 = empty").check(
- "triton_poi__0.run(arg0_1, buf0, buf5"
- ).check("buf1 = empty").check("buf2 = empty").check_not("copy_(").check(
- "buf3 = [buf1,buf2]"
- ).check(
- "buf3_work = fun_col_impl._reduce_scatter_tensor_coalesced_fallback("
- "output_tensors=buf3, input_tensors=buf3_inputs"
- ).check(
- "fun_col_impl._register_tensor_work(buf3, buf3_work)"
- ).check(
- "buf1 = _wait_tensor(buf1)"
- ).check(
- "buf4 = buf1"
- ).check(
- "buf6 = buf0; del buf0 # reuse"
- ).check(
- "buf2 = _wait_tensor(buf2)"
- ).check(
- "buf7 = buf2"
- ).check(
- "return (buf1, buf5, buf6, buf2"
- ).run(
- code
- )
+ FileCheck() \
+ .check("buf0 = empty") \
+ .check("buf5 = empty") \
+ .check("triton_poi__0.run(arg0_1, buf0, buf5") \
+ .check("buf1 = empty") \
+ .check("buf2 = empty") \
+ .check_not("copy_(") \
+ .check("buf3 = [buf1,buf2]") \
+ .check("buf3_work = fun_col_impl._reduce_scatter_tensor_coalesced_fallback("
+ "output_tensors=buf3, input_tensors=buf3_inputs") \
+ .check("fun_col_impl._register_tensor_work(buf3, buf3_work)") \
+ .check("buf1 = _wait_tensor(buf1)") \
+ .check("buf4 = buf1") \
+ .check("buf6 = buf0; del buf0 # reuse") \
+ .check("buf2 = _wait_tensor(buf2)") \
+ .check("buf7 = buf2") \
+ .check("return (buf1, buf5, buf6, buf2") \
+ .run(code)
out = compiled(inputs, **self.get_world_trs())
correct = func(inputs, **self.get_world_trs())
assert same(out, correct), f"{out} va {correct}"
@@ -1222,5 +1164,4 @@ class TestCollectivesInductor(DynamoDistributedSingleProcTestCase):
if __name__ == "__main__":
from torch._dynamo.test_case import run_tests
-
run_tests()
diff --git a/test/distributed/test_launcher.py b/test/distributed/test_launcher.py
index 58c5de168e..178d98ffdc 100644
--- a/test/distributed/test_launcher.py
+++ b/test/distributed/test_launcher.py
@@ -13,9 +13,9 @@ if not dist.is_available():
sys.exit(0)
from torch.testing._internal.common_utils import (
- run_tests,
TEST_WITH_DEV_DBG_ASAN,
TestCase,
+ run_tests,
)
diff --git a/test/distributed/test_multi_threaded_pg.py b/test/distributed/test_multi_threaded_pg.py
index 5bc9700e24..1109c731b5 100644
--- a/test/distributed/test_multi_threaded_pg.py
+++ b/test/distributed/test_multi_threaded_pg.py
@@ -1,32 +1,34 @@
# Owner(s): ["oncall: distributed"]
-import operator
import os
import sys
-import threading
-from functools import reduce
-from unittest import skip, SkipTest
-
import torch
-import torch.autograd
import torch.distributed as dist
from torch._C._distributed_c10d import ReduceOp
+from unittest import skip, SkipTest
+import operator
+from functools import reduce
+import threading
+import torch.autograd
if not dist.is_available():
print("Distributed not available, skipping tests", file=sys.stderr)
sys.exit(0)
from torch.testing._internal.common_distributed import (
+ spawn_threads_and_init_comms,
MultiThreadedTestCase,
skip_if_lt_x_gpu,
- spawn_threads_and_init_comms,
)
-from torch.testing._internal.common_utils import IS_SANDCASTLE, run_tests, TestCase
+from torch.testing._internal.common_utils import (
+ TestCase,
+ run_tests,
+ IS_SANDCASTLE,
+)
DEFAULT_WORLD_SIZE = 4
-
class TestCollectivesWithWrapper(TestCase):
@spawn_threads_and_init_comms(world_size=4)
def test_broadcast_object_list(self):
@@ -40,9 +42,7 @@ class TestCollectivesWithWrapper(TestCase):
@spawn_threads_and_init_comms(world_size=4)
def _test_method(self):
input_tensor = torch.ones(3, 3) * dist.get_rank() # perform 1st all gather
- output_tensors = [
- torch.empty_like(input_tensor) for _ in range(dist.get_world_size())
- ]
+ output_tensors = [torch.empty_like(input_tensor) for _ in range(dist.get_world_size())]
dist.all_gather(output_tensors, input_tensor)
if dist.get_rank() == 0:
@@ -57,9 +57,7 @@ class TestCollectivesWithWrapper(TestCase):
@spawn_threads_and_init_comms(world_size=4)
def _test_method(self):
input_tensor = torch.ones(3, 3) * dist.get_rank() # perform 1st all gather
- output_tensors = [
- torch.empty_like(input_tensor) for _ in range(dist.get_world_size())
- ]
+ output_tensors = [torch.empty_like(input_tensor) for _ in range(dist.get_world_size())]
dist.all_gather(output_tensors, input_tensor)
if dist.get_rank() == 1:
@@ -74,15 +72,11 @@ class TestCollectivesWithWrapper(TestCase):
@spawn_threads_and_init_comms(world_size=4)
def _test_method(self):
input_tensor = torch.ones(3, 3) * dist.get_rank() # perform 1st all gather
- output_tensors = [
- torch.empty_like(input_tensor) for _ in range(dist.get_world_size())
- ]
+ output_tensors = [torch.empty_like(input_tensor) for _ in range(dist.get_world_size())]
dist.all_gather(output_tensors, input_tensor)
if dist.get_rank() > 0:
- raise AssertionError(
- "Mimic real test failure."
- ) # fail on all non-zero rank
+ raise AssertionError("Mimic real test failure.") # fail on all non-zero rank
dist.all_gather(output_tensors, input_tensor) # perform 2nd all gather
@@ -131,7 +125,6 @@ class TestCollectivesWithWrapper(TestCase):
dist.all_to_all_single(out, send)
self.assertEqual(out.tolist(), list(zip(range(world_size), range(world_size))))
-
class TestCollectivesWithBaseClass(MultiThreadedTestCase):
@property
def world_size(self):
@@ -148,9 +141,7 @@ class TestCollectivesWithBaseClass(MultiThreadedTestCase):
def test_allgather(self):
input_tensor = torch.ones(3, 3) * dist.get_rank()
- output_tensors = [
- torch.empty_like(input_tensor) for _ in range(self.world_size)
- ]
+ output_tensors = [torch.empty_like(input_tensor) for _ in range(self.world_size)]
dist.all_gather(output_tensors, input_tensor)
for rank, out_tensor in enumerate(output_tensors):
self.assertEqual(out_tensor, torch.ones(3, 3) * rank)
@@ -316,11 +307,7 @@ class TestCollectivesWithBaseClass(MultiThreadedTestCase):
result, rank = ctx.saved_tensors
bwd_tid = threading.current_thread().ident
- self.assertEqual(
- fwd_tid,
- bwd_tid,
- f"bwd not running in the same thread a fwd for rank {rank.item()}",
- )
+ self.assertEqual(fwd_tid, bwd_tid, f"bwd not running in the same thread a fwd for rank {rank.item()}")
self.assertTrue(dist.is_initialized())
self.assertEqual(int(rank.item()), dist.get_rank())
dist.all_reduce(result)
@@ -328,12 +315,9 @@ class TestCollectivesWithBaseClass(MultiThreadedTestCase):
return grad_output * result
- x = torch.tensor(
- [dist.get_rank()], dtype=torch.float, device="cuda", requires_grad=True
- )
+ x = torch.tensor([dist.get_rank()], dtype=torch.float, device="cuda", requires_grad=True)
x = MyFunc.apply(x)
x.sum().backward()
-
if __name__ == "__main__":
run_tests()
diff --git a/test/distributed/test_nccl.py b/test/distributed/test_nccl.py
index 7da2c9af9a..27997256ea 100644
--- a/test/distributed/test_nccl.py
+++ b/test/distributed/test_nccl.py
@@ -1,27 +1,26 @@
# Owner(s): ["oncall: distributed"]
-import re
import sys
-
import torch
-import torch.cuda
import torch.cuda.nccl as nccl
+import torch.cuda
import torch.distributed as c10d
-from torch.testing._internal.common_cuda import TEST_CUDA, TEST_MULTIGPU
-from torch.testing._internal.common_device_type import (
- dtypes,
- instantiate_device_type_tests,
-)
from torch.testing._internal.common_utils import (
+ TestCase,
+ run_tests,
IS_WINDOWS,
load_tests,
- NoTest,
- run_tests,
- skip_but_pass_in_sandcastle_if,
TEST_WITH_ROCM,
- TestCase,
+ skip_but_pass_in_sandcastle_if,
+ NoTest,
)
+from torch.testing._internal.common_cuda import TEST_CUDA, TEST_MULTIGPU
+from torch.testing._internal.common_device_type import (
+ instantiate_device_type_tests,
+ dtypes,
+)
+import re
HIP_VERSION = (
0.0
diff --git a/test/distributed/test_store.py b/test/distributed/test_store.py
index 8383101d20..da76d7b6a1 100644
--- a/test/distributed/test_store.py
+++ b/test/distributed/test_store.py
@@ -5,8 +5,8 @@ import os
import socket
import sys
import tempfile
-import threading
import time
+import threading
from datetime import timedelta
from sys import platform
@@ -14,12 +14,9 @@ import torch
import torch.distributed as dist
import torch.distributed.distributed_c10d as c10d
import torch.distributed.rpc as rpc
-from torch.distributed import DistError, DistNetworkError, DistStoreError
+from torch.distributed import DistNetworkError, DistError, DistStoreError
from torch.testing._internal.common_distributed import MultiThreadedTestCase
-from torch.testing._internal.common_utils import (
- instantiate_parametrized_tests,
- parametrize,
-)
+from torch.testing._internal.common_utils import instantiate_parametrized_tests, parametrize
if not dist.is_available():
print("torch.distributed not available, skipping tests", file=sys.stderr)
@@ -27,17 +24,17 @@ if not dist.is_available():
import torch.testing._internal.common_utils as common
from torch.testing._internal.common_distributed import (
- create_tcp_store,
skip_if_win32,
- tp_transports,
+ create_tcp_store,
+ tp_transports
)
from torch.testing._internal.common_utils import (
- ADDRESS_IN_USE,
- CONNECT_TIMEOUT,
+ TestCase,
load_tests,
- retry_on_connect_failures,
run_tests,
- TestCase,
+ retry_on_connect_failures,
+ ADDRESS_IN_USE,
+ CONNECT_TIMEOUT,
)
# load_tests from common_utils is used to automatically filter tests for
@@ -65,7 +62,7 @@ def gpus_for_rank(world_size):
gpus_for_rank = []
for rank in range(world_size):
gpus_for_rank.append(
- visible_devices[rank * gpus_per_process : (rank + 1) * gpus_per_process]
+ visible_devices[rank * gpus_per_process: (rank + 1) * gpus_per_process]
)
return gpus_for_rank
@@ -105,9 +102,7 @@ class StoreTestBase:
self._test_set_get_check(self._create_store())
def _test_compare_set(self, store):
- missing_key_result = store.compare_set(
- "cs_key0", "wrong_old_value", "new_value0"
- )
+ missing_key_result = store.compare_set("cs_key0", "wrong_old_value", "new_value0")
self.assertEqual(b"wrong_old_value", missing_key_result)
store.set("cs_key0", "value0")
@@ -194,14 +189,10 @@ class FileStoreTest(TestCase, StoreTestBase):
rpc_backend_options = rpc.TensorPipeRpcBackendOptions()
rpc_backend_options.init_method = f"file://{file.name}"
rpc_backend_options._transports = tp_transports()
- rpc.init_rpc(
- "worker", rank=0, world_size=1, rpc_backend_options=rpc_backend_options
- )
+ rpc.init_rpc("worker", rank=0, world_size=1, rpc_backend_options=rpc_backend_options)
# Init PG using file
- dist.init_process_group(
- "gloo", rank=0, world_size=1, init_method=f"file://{file.name}"
- )
+ dist.init_process_group("gloo", rank=0, world_size=1, init_method=f"file://{file.name}")
dist.destroy_process_group()
assert os.path.exists(file.name)
@@ -237,9 +228,7 @@ class PrefixStoreTest(TestCase):
self.file = tempfile.NamedTemporaryFile(delete=False)
def test_get_underlying_store(self):
- tcp_store = dist.TCPStore(
- host_name=DEFAULT_HOSTNAME, port=0, world_size=1, is_master=True
- )
+ tcp_store = dist.TCPStore(host_name=DEFAULT_HOSTNAME, port=0, world_size=1, is_master=True)
hash_store = dist.HashStore()
file_store = dist.FileStore(self.file.name, world_size=1)
for store in [tcp_store, hash_store, file_store]:
@@ -316,7 +305,8 @@ class TCPStoreTest(TestCase, StoreTestBase):
)
backend_opts = rpc.TensorPipeRpcBackendOptions(
- init_method=f"tcp://{addr}:{port}", _transports=tp_transports()
+ init_method=f"tcp://{addr}:{port}",
+ _transports=tp_transports()
)
rpc.init_rpc(
name="worker0",
@@ -372,17 +362,11 @@ class TCPStoreTest(TestCase, StoreTestBase):
self._test_numkeys_delkeys(self._create_store())
def _create_client(self, index, addr, port, world_size):
- client_store = dist.TCPStore(
- addr, port, world_size=world_size, timeout=timedelta(seconds=10)
- )
+ client_store = dist.TCPStore(addr, port, world_size=world_size, timeout=timedelta(seconds=10))
self.assertEqual(b"value", client_store.get("key"))
client_store.set(f"new_key{index}", f"new_value{index}")
- self.assertEqual(
- f"next_value{index}".encode(),
- client_store.compare_set(
- f"new_key{index}", f"new_value{index}", f"next_value{index}"
- ),
- )
+ self.assertEqual(f"next_value{index}".encode(),
+ client_store.compare_set(f"new_key{index}", f"new_value{index}", f"next_value{index}"))
def _multi_worker_helper(self, world_size):
addr = DEFAULT_HOSTNAME
@@ -424,34 +408,22 @@ class TCPStoreTest(TestCase, StoreTestBase):
self.assertEqual(b"tato", v1)
def test_store_timeout_on_missing_clients(self):
- with self.assertRaisesRegex(
- DistStoreError,
- r"Timed out after \d+ seconds waiting for clients. \d+/\d+ clients joined.",
- ):
+ with self.assertRaisesRegex(DistStoreError, r"Timed out after \d+ seconds waiting for clients. \d+/\d+ clients joined."):
# world_size is 2 so it should timeout
dist.TCPStore("localhost", 0, 2, True, timeout=timedelta(seconds=2))
# when wait_for_workers is not set, then there should be no exception raised
- dist.TCPStore(
- "localhost",
- 0,
- 2,
- True,
- timeout=timedelta(seconds=2),
- wait_for_workers=False,
- )
-
+ dist.TCPStore("localhost", 0, 2, True, timeout=timedelta(seconds=2), wait_for_workers=False)
class LibUvTCPStoreTest(TCPStoreTest):
+
def _create_store(self):
store = create_tcp_store(use_libuv=True)
store.set_timeout(timedelta(seconds=300))
return store
def _create_store_with_ws(self, addr, world_size):
- return create_tcp_store(
- addr, world_size, wait_for_workers=False, use_libuv=True
- )
+ return create_tcp_store(addr, world_size, wait_for_workers=False, use_libuv=True)
class PrefixTCPStoreTest(TestCase, StoreTestBase):
@@ -473,13 +445,10 @@ class PrefixTCPStoreTest(TestCase, StoreTestBase):
def test_underlying_non_prefix_store(self):
store = self._create_store()
- wrapped_store = dist.PrefixStore(
- self.prefix, dist.PrefixStore(self.prefix, store)
- )
+ wrapped_store = dist.PrefixStore(self.prefix, dist.PrefixStore(self.prefix, store))
self.assertEqual(self.tcpstore, store._underlying_non_prefix_store)
self.assertEqual(self.tcpstore, wrapped_store._underlying_non_prefix_store)
-
class MyPythonStore(dist.Store):
def __init__(self):
super().__init__()
@@ -514,7 +483,6 @@ class MyPythonStore(dist.Store):
val = self.store[key] = newValue
return val
-
class PythonStoreTest(TestCase):
def test_set_get(self):
# If we were to inherit from StoreTestBase and try to use
@@ -610,9 +578,7 @@ class RendezvousTCPTest(TestCase):
next(gen)
def test_dns_timeout(self):
- with self.assertRaisesRegex(
- DistNetworkError, "client socket has timed out after.*dnsnotexist"
- ) as manager:
+ with self.assertRaisesRegex(DistNetworkError, "client socket has timed out after.*dnsnotexist") as manager:
gen = dist.rendezvous(
"tcp://dnsnotexist:23456?world_size=2&rank=0",
timeout=timedelta(seconds=1),
@@ -675,7 +641,6 @@ class RendezvousTCPTest(TestCase):
store0, rank0, size0 = next(gen0)
self.assertTrue(store0.libuvBackend)
-
class DummyStore(dist.Store):
def __init__(self):
self.appends = []
@@ -697,12 +662,10 @@ class DummyStore(dist.Store):
def has_extended_api(self):
return True
-
class TestPythonStore(TestCase):
def test_optional_methods_fail(self):
class TestStore(dist.Store):
pass
-
store = TestStore()
self.assertFalse(store.has_extended_api())
with self.assertRaisesRegex(RuntimeError, "Not implemented."):
@@ -715,7 +678,6 @@ class TestPythonStore(TestCase):
def test_has_extended_api_passthrough(self):
class TestStore(dist.Store):
pass
-
test_store = TestStore()
store = dist.PrefixStore("p", test_store)
self.assertFalse(store.has_extended_api())
@@ -750,10 +712,10 @@ class TestPythonStore(TestCase):
def test_multi_set_roundtrip(self):
store = DummyStore()
prefix = dist.PrefixStore("p", store)
- prefix.multi_set(["foo", "bar"], [b"x", b"y"])
+ prefix.multi_set(["foo", "bar"], [b'x', b'y'])
self.assertEqual(1, len(store.multi_sets))
self.assertEqual(["p/foo", "p/bar"], store.multi_sets[0][0])
- self.assertEqual([b"x", b"y"], store.multi_sets[0][1])
+ self.assertEqual([b'x', b'y'], store.multi_sets[0][1])
def test_extended_methods_fallbacks(self):
test_store = MyPythonStore()
@@ -773,9 +735,7 @@ class TestMultiThreadedWait(MultiThreadedTestCase):
stores = [
dist.FileStore(tempfile.NamedTemporaryFile(delete=False).name, 1),
dist.HashStore(),
- dist.PrefixStore(
- "pre", dist.FileStore(tempfile.NamedTemporaryFile(delete=False).name, 1)
- ),
+ dist.PrefixStore("pre", dist.FileStore(tempfile.NamedTemporaryFile(delete=False).name, 1)),
create_tcp_store(),
create_tcp_store(use_libuv=True),
dist.PrefixStore("pre", create_tcp_store()),
@@ -804,18 +764,15 @@ class TestMultiThreadedWait(MultiThreadedTestCase):
instantiate_parametrized_tests(TestMultiThreadedWait)
-
@skip_if_win32()
class TimeoutTest(TestCase):
def tearDown(self):
import signal
-
super().tearDown()
signal.signal(signal.SIGUSR1, signal.SIG_IGN)
def test_interrupt_doesnt_break_wait(self):
import signal
-
rank_res = [None, None]
def run(rank, my_store):
@@ -832,29 +789,13 @@ class TimeoutTest(TestCase):
time.sleep(1)
rank0_store = dist.TCPStore(
- host_name=DEFAULT_HOSTNAME,
- port=0,
- world_size=2,
- is_master=True,
- wait_for_workers=False,
- )
+ host_name=DEFAULT_HOSTNAME, port=0, world_size=2, is_master=True, wait_for_workers=False)
rank1_store = dist.TCPStore(
- host_name=DEFAULT_HOSTNAME,
- port=rank0_store.port,
- world_size=2,
- is_master=False,
- wait_for_workers=False,
- )
+ host_name=DEFAULT_HOSTNAME, port=rank0_store.port, world_size=2, is_master=False, wait_for_workers=False)
ths = []
for i in range(2):
- t = threading.Thread(
- target=run,
- args=(
- i,
- [rank0_store, rank1_store][i],
- ),
- )
+ t = threading.Thread(target=run, args=(i, [rank0_store, rank1_store][i],))
t.start()
ths.append(t)
@@ -880,12 +821,7 @@ class InitPgWithUvStore(TestCase):
def test_with_url_param(self):
port = common.find_free_port()
- dist.init_process_group(
- "gloo",
- rank=0,
- world_size=1,
- init_method=f"tcp://{DEFAULT_HOSTNAME}:{port}?use_libuv=1",
- )
+ dist.init_process_group("gloo", rank=0, world_size=1, init_method=f"tcp://{DEFAULT_HOSTNAME}:{port}?use_libuv=1")
self._run_test()
def test_with_env_var(self):
@@ -907,7 +843,6 @@ class InitPgWithUvStore(TestCase):
self.assertTrue(store.libuvBackend)
dist.destroy_process_group()
-
if __name__ == "__main__":
assert (
not torch.cuda._initialized
|
2.41.0
|
b0c768c5b4d6c76c77f287ed5e4b6c7be43d1a9
|
Mon, 15 Apr 2024 15:39:05 -0700
|
[PATCH 0199/1000] [dynamo][refactor] Move LazyGraphModule handling (#124113)
|
Pull Request resolved: https://github.com/pytorch/pytorch/pull/124113 Approved by: https://github.com/jansel ghstack dependencies: #124078
|
diff --git a/torch/_dynamo/eval_frame.py b/torch/_dynamo/eval_frame.py
index 52b610be89..20f8675c75 100644
--- a/torch/_dynamo/eval_frame.py
+++ b/torch/_dynamo/eval_frame.py
@@ -315,32 +315,8 @@ class _TorchDynamoContext:
fn = innermost_fn(fn)
# add context containing GraphModule to any GraphModule forward functions
- from torch.fx._lazy_graph_module import _LazyGraphModule
-
- if isinstance(fn, _LazyGraphModule) or (
- isinstance(getattr(fn, "__self__", None), _LazyGraphModule)
- and fn.__name__ == "_lazy_forward"
- ):
- # Since dynamo will run the forward method for the GraphModule shortly
- # anyways, it does not hurt to do the real recompilation here if
- # this is a _LazyGraphModule. This makes it easier for dynamo to
- # optimize a _LazyGraphModule.
-
- lazy_gm = fn if isinstance(fn, _LazyGraphModule) else fn.__self__
-
- _LazyGraphModule.force_recompile(lazy_gm)
-
- # Assume that the underlying node metadata of `fn`,
- # a GraphModule instance, accurately represents
- # all instances of type(fn).
- code_context.get_context(lazy_gm.forward.__code__)[
- "orig_graphmodule"
- ] = weakref.ref(lazy_gm)
-
- if not isinstance(fn, _LazyGraphModule):
- # replace fn with the real forward method
- fn = lazy_gm.forward
- elif isinstance(fn, GraphModule):
+ if isinstance(fn, GraphModule):
+ # add context containing GraphModule to any GraphModule forward functions
code_context.get_context(fn.forward.__code__)[
"orig_graphmodule"
] = weakref.ref(fn)
@@ -368,7 +344,8 @@ class _TorchDynamoContext:
if (
(filename is None or trace_rules.check(fn))
and (
- getattr(fn, "__name__", "") not in ["_call_impl", "_wrapped_call_impl"]
+ getattr(fn, "__name__", "")
+ not in ["_call_impl", "_wrapped_call_impl", "_lazy_forward"]
)
and filename not in DONT_WRAP_FILES
):
diff --git a/torch/_dynamo/output_graph.py b/torch/_dynamo/output_graph.py
index af768761a5..2668cf6ea3 100644
--- a/torch/_dynamo/output_graph.py
+++ b/torch/_dynamo/output_graph.py
@@ -1262,6 +1262,30 @@ class OutputGraph:
with self.restore_global_state():
compiled_fn = self.call_user_compiler(gm)
+
+ from torch.fx._lazy_graph_module import _LazyGraphModule
+
+ if isinstance(compiled_fn, _LazyGraphModule) or (
+ isinstance(getattr(compiled_fn, "__self__", None), _LazyGraphModule)
+ and compiled_fn.__name__ == "_lazy_forward"
+ ):
+ # Since dynamo will run the forward method for the GraphModule shortly
+ # anyways, it does not hurt to do the real recompilation here if
+ # this is a _LazyGraphModule. This makes it easier for dynamo to
+ # optimize a _LazyGraphModule.
+
+ lazy_gm = (
+ compiled_fn
+ if isinstance(compiled_fn, _LazyGraphModule)
+ else compiled_fn.__self__
+ )
+
+ _LazyGraphModule.force_recompile(lazy_gm)
+
+ if not isinstance(compiled_fn, _LazyGraphModule):
+ # replace compiled_fn with the real forward method
+ compiled_fn = lazy_gm.forward
+
compiled_fn = disable(compiled_fn)
counters["stats"]["unique_graphs"] += 1
|
2.41.0
|
b3594f90e77f3e3855f6c41c3d3a916c9a07b81
|
Mon, 15 Apr 2024 17:40:53 -0700
|
[PATCH 0201/1000] [dynamo] fix call_finally issue in Python 3.8 (#124122)
|
Fix https://github.com/pytorch/pytorch/issues/97811 again... Pull Request resolved: https://github.com/pytorch/pytorch/pull/124122 Approved by: https://github.com/jansel
|
diff --git a/test/dynamo/test_repros.py b/test/dynamo/test_repros.py
index 0f8561f6fb..899cebcac7 100644
--- a/test/dynamo/test_repros.py
+++ b/test/dynamo/test_repros.py
@@ -3875,6 +3875,16 @@ class ReproTests(torch._dynamo.test_case.TestCase):
make_fn(None)()
+ def test_call_finally_python_3_8_2(self):
+ def f(x):
+ while x:
+ try:
+ pass
+ except Exception as _:
+ continue
+
+ torch.compile(f, backend="eager")(0)
+
def test_call_finally_opcode_python_3_8(self):
def fn():
try:
diff --git a/torch/_dynamo/bytecode_analysis.py b/torch/_dynamo/bytecode_analysis.py
index 092b20491d..340378e726 100644
--- a/torch/_dynamo/bytecode_analysis.py
+++ b/torch/_dynamo/bytecode_analysis.py
@@ -225,10 +225,13 @@ def stacksize_analysis(instructions) -> Union[int, float]:
)
if inst.opcode not in TERMINAL_OPCODES:
assert next_inst is not None, f"missing next inst: {inst}"
- stack_sizes[next_inst].offset_of(
- stack_size,
- stack_effect(inst.opcode, inst.arg, jump=is_call_finally),
+ # total stack effect of CALL_FINALLY and END_FINALLY in 3.8 is 0
+ eff = (
+ 0
+ if is_call_finally
+ else stack_effect(inst.opcode, inst.arg, jump=False)
)
+ stack_sizes[next_inst].offset_of(stack_size, eff)
if inst.opcode in JUMP_OPCODES and not is_call_finally:
stack_sizes[inst.target].offset_of(
stack_size, stack_effect(inst.opcode, inst.arg, jump=True)
|
2.41.0
|
309580d69a1fbbe73785785d011d6cf44113103
|
Mon, 15 Apr 2024 17:49:52 -0700
|
[PATCH 0202/1000] [dynamo, 3.12] handle possibility of NULL local variables during graph breaks (#124095)
|
Pull Request resolved: https://github.com/pytorch/pytorch/pull/124095 Approved by: https://github.com/jansel
|
diff --git a/test/dynamo/test_misc.py b/test/dynamo/test_misc.py
index 9457c63f8c..14f43ec37d 100644
--- a/test/dynamo/test_misc.py
+++ b/test/dynamo/test_misc.py
@@ -10161,6 +10161,17 @@ fn
opt_fn = torch.compile(fn, backend="eager")
opt_fn(torch.randn(3, 3))
+ def test_load_fast_and_clear_graph_break(self):
+ # Can result in a segfault in 3.12+ if LOAD_FAST_AND_CLEAR
+ # is not handled properly in a graph break
+ def fn():
+ out = torch.cat([torch.randn(r, 5) for r in range(3)])
+ torch._dynamo.graph_break()
+ out = torch.cat([torch.randn(r, 5) for r in range(3)])
+ return out
+
+ self.assertEqual(torch._dynamo.optimize("eager")(fn)().shape, (3, 5))
+
def test_raises_importerror1(self):
@torch.compile(backend="eager")
def fn(x):
diff --git a/torch/_dynamo/output_graph.py b/torch/_dynamo/output_graph.py
index 2668cf6ea3..1cf0c1363a 100644
--- a/torch/_dynamo/output_graph.py
+++ b/torch/_dynamo/output_graph.py
@@ -93,6 +93,7 @@ from .variables.builder import (
VariableBuilder,
wrap_fx_proxy,
)
+from .variables.misc import NullVariable
from .variables.nn_module import NNModuleVariable
from .variables.tensor import (
NumpyNdarrayVariable,
@@ -1001,6 +1002,14 @@ class OutputGraph:
# while running test_subgraphs.py
if isinstance(v.source, LocalSource) and v.source.local_name == k:
continue # no need to restore initial state
+ # Do not load variable if it is NULL.
+ if sys.version_info >= (3, 12):
+ # Continuation function will load the NULL for v.
+ if type.__instancecheck__(NullVariable, v):
+ continue
+ else:
+ # A variable should never be NULL in < 3.12
+ assert not type.__instancecheck__(NullVariable, v)
if v not in val_to_names:
val_to_names[v] = list()
val_to_names[v].append(k)
diff --git a/torch/_dynamo/resume_execution.py b/torch/_dynamo/resume_execution.py
index a34030fd88..545bb0f5c9 100644
--- a/torch/_dynamo/resume_execution.py
+++ b/torch/_dynamo/resume_execution.py
@@ -339,6 +339,7 @@ class ContinueExecutionCache:
setup_fn_target_offsets: Tuple[int], # only used in Python 3.11+
nstack: int,
argnames: Tuple[str],
+ argnames_null: Tuple[str],
setup_fns: Tuple[ReenterWith],
null_idxes: Tuple[int],
) -> types.CodeType:
@@ -356,6 +357,7 @@ class ContinueExecutionCache:
setup_fn_target_offsets,
nstack,
argnames,
+ argnames_null,
setup_fns,
null_idxes,
)
@@ -391,7 +393,9 @@ class ContinueExecutionCache:
code_options["co_posonlyargcount"] = 0
code_options["co_kwonlyargcount"] = 0
code_options["co_varnames"] = tuple(
- args + [v for v in code_options["co_varnames"] if v not in args]
+ args
+ + [v for v in argnames_null if v not in args]
+ + [v for v in code_options["co_varnames"] if v not in args]
)
code_options["co_flags"] = code_options["co_flags"] & ~(
CO_VARARGS | CO_VARKEYWORDS
@@ -442,6 +446,18 @@ class ContinueExecutionCache:
assert not hooks
+ # 3.12+: store NULL into variables that were NULL
+ if argnames_null:
+ assert sys.version_info >= (3, 12)
+ for v in argnames_null:
+ assert v not in args
+ prefix.extend(
+ [
+ create_instruction("PUSH_NULL"),
+ create_instruction("STORE_FAST", argval=v),
+ ]
+ )
+
prefix.append(create_jump_absolute(target))
# because the line number table monotonically increases from co_firstlineno
diff --git a/torch/_dynamo/symbolic_convert.py b/torch/_dynamo/symbolic_convert.py
index 377c9e5902..8a8fda775e 100644
--- a/torch/_dynamo/symbolic_convert.py
+++ b/torch/_dynamo/symbolic_convert.py
@@ -2270,11 +2270,24 @@ class InstructionTranslator(InstructionTranslatorBase):
return [create_instruction("RETURN_CONST", argval=inst.argval)]
reads = livevars_analysis(self.instructions, inst)
- argnames = tuple(
+ all_argnames = tuple(
k
for k in self.symbolic_locals.keys()
if k in reads and k not in self.cell_and_freevars()
)
+ # NOTE: do not use isinstance, since it realizes lazy VT's
+ argnames = tuple(
+ k
+ for k in all_argnames
+ if not type.__instancecheck__(NullVariable, self.symbolic_locals[k])
+ )
+ argnames_null = tuple(
+ k
+ for k in all_argnames
+ if type.__instancecheck__(NullVariable, self.symbolic_locals[k])
+ )
+ if sys.version_info < (3, 12):
+ assert len(argnames_null) == 0, "variables should not be NULL in < 3.12"
cg = PyCodegen(self)
@@ -2312,6 +2325,7 @@ class InstructionTranslator(InstructionTranslatorBase):
tuple(b.target.offset for b in self.block_stack),
stack_len,
argnames,
+ argnames_null,
tuple(b.resume_fn() for b in self.block_stack),
tuple(null_idxes),
)
|
2.41.0
|
e17f62d103bdb6683f9b93b19ca60751192c45a
|
Mon, 15 Apr 2024 17:49:53 -0700
|
[PATCH 0203/1000] [dynamo, 3.12] move functorch/test_aotdispatch.py::TestAOTAutograd::test_view_detach from dynamo xfail to skip (#124100)
|
Pull Request resolved: https://github.com/pytorch/pytorch/pull/124100 Approved by: https://github.com/zou3519, https://github.com/jansel ghstack dependencies: #124095
|
diff --git a/test/dynamo_expected_failures/TestAOTAutograd.test_view_detach b/test/dynamo_skips/TestAOTAutograd.test_view_detach
similarity index 100%
rename from test/dynamo_expected_failures/TestAOTAutograd.test_view_detach
rename to test/dynamo_skips/TestAOTAutograd.test_view_detach
|
2.41.0
|
62096bce6715a51bd1dd3f006920be8e7ca6cbc
|
Mon, 15 Apr 2024 17:49:59 -0700
|
[PATCH 0204/1000] [dynamo, 3.12] skip some failing profiler dynamo-wrapped tests (#124124)
|
The dynamo wrapped tests and normal tests give the same results locally. Pull Request resolved: https://github.com/pytorch/pytorch/pull/124124 Approved by: https://github.com/jansel, https://github.com/aaronenyeshi ghstack dependencies: #124095, #124100
|
diff --git a/test/dynamo_skips/TestExperimentalUtils.test_profiler_for_loop_indexing_pattern b/test/dynamo_skips/TestExperimentalUtils.test_profiler_for_loop_indexing_pattern
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/test/dynamo_skips/TestProfiler.test_tensorboard_trace_handler b/test/dynamo_skips/TestProfiler.test_tensorboard_trace_handler
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/test/dynamo_skips/TestTorchTidyProfiler.test_optimizer b/test/dynamo_skips/TestTorchTidyProfiler.test_optimizer
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/test/dynamo_skips/TestTorchTidyProfiler.test_optimizer_parameters_adam b/test/dynamo_skips/TestTorchTidyProfiler.test_optimizer_parameters_adam
new file mode 100644
index 0000000000..e69de29bb2
|
2.41.0
|
dc160864b3aac3c845bd7e2fa565899ab4732f6
|
Mon, 15 Apr 2024 17:49:59 -0700
|
[PATCH 0205/1000] [dynamo, 3.12] enable dynamo-wrapped tests in CI (#123307)
|
Pull Request resolved: https://github.com/pytorch/pytorch/pull/123307 Approved by: https://github.com/jansel, https://github.com/malfet ghstack dependencies: #124095, #124100, #124124
|
diff --git a/.github/workflows/pull.yml b/.github/workflows/pull.yml
index d287b78d70..8f54248101 100644
--- a/.github/workflows/pull.yml
+++ b/.github/workflows/pull.yml
@@ -215,6 +215,9 @@ jobs:
{ config: "default", shard: 1, num_shards: 3, runner: "linux.2xlarge" },
{ config: "default", shard: 2, num_shards: 3, runner: "linux.2xlarge" },
{ config: "default", shard: 3, num_shards: 3, runner: "linux.2xlarge" },
+ { config: "dynamo", shard: 1, num_shards: 3, runner: "linux.2xlarge" },
+ { config: "dynamo", shard: 2, num_shards: 3, runner: "linux.2xlarge" },
+ { config: "dynamo", shard: 3, num_shards: 3, runner: "linux.2xlarge" },
]}
linux-focal-py3_12-clang10-test:
|
2.41.0
|
f6ce45bcbd7026c00da43db0317ede10830378b
|
Tue, 16 Apr 2024 11:07:12 +0000
|
[PATCH 0206/1000] [Inductor] handle AMD special launch options (#124146)
|
Summary: `matrix_instr_nonkdim` and `waves_per_eu` are AMD specific launch configs that can't be treated as fn input args Test Plan: HIP_VISIBLE_DEVICES=7 numactl --cpunodebind=1 --membind=1 buck2 run mode/{opt,amd-gpu} -c fbcode.triton_backend=amd -c fbcode.enable_gpu_sections=true -c fbcode.rocm_arch=mi300 //hammer/modules/sequential/encoders/tests:hstu_bench -- --torch-compile=True the E2E works well on the magic model Differential Revision: D56165438 Pull Request resolved: https://github.com/pytorch/pytorch/pull/124146 Approved by: https://github.com/aakhundov
|
diff --git a/torch/_inductor/triton_heuristics.py b/torch/_inductor/triton_heuristics.py
index 7a4ea40b5f..aae26554c9 100644
--- a/torch/_inductor/triton_heuristics.py
+++ b/torch/_inductor/triton_heuristics.py
@@ -300,6 +300,13 @@ class CachingAutotuner(KernelInterface):
"""Ahead of time compile a given autotuner config."""
compile_meta = copy.deepcopy(self.triton_meta)
for k, v in cfg.kwargs.items():
+ if torch.version.hip is not None:
+ if k == "matrix_instr_nonkdim":
+ compile_meta["matrix_instr_nonkdim"] = v
+ continue
+ if k == "waves_per_eu":
+ compile_meta["waves_per_eu"] = v
+ continue
compile_meta["constants"][self.fn.arg_names.index(k)] = v
compile_meta["num_warps"] = cfg.num_warps
compile_meta["num_stages"] = cfg.num_stages
@@ -340,6 +347,13 @@ class CachingAutotuner(KernelInterface):
"num_stages": compile_meta["num_stages"],
"debug": compile_meta["debug"],
}
+ if torch.version.hip is not None:
+ if "waves_per_eu" in compile_meta:
+ options["waves_per_eu"] = compile_meta["waves_per_eu"]
+ if "matrix_instr_nonkdim" in compile_meta:
+ options["matrix_instr_nonkdim"] = compile_meta[
+ "matrix_instr_nonkdim"
+ ]
compile_kwargs = {
"target": target,
"options": options,
|
2.41.0
|
7bd43b5106bed6365f1986229d7c32079653e76
|
Tue, 16 Apr 2024 00:12:17 -0700
|
[PATCH 0208/1000] [compiled autograd][dynamo] use aliases for stack restore when partial graphs steal inputs (#124127)
|
same idea as https://github.com/pytorch/pytorch/pull/123359, but for when we restore stack variables after calling a partial graph: Illustrated by the test case: before: ```python def function(inputs): graph_out_0 = __compiled_fn_2(inputs) getitem_1 = graph_out_0[0] add = inputs[1] <---- error inputs is already cleared del graph_out_0 add_1 = add + getitem_1 add = None getitem_1 = None cpu = add_1.cpu() add_1 = None return (cpu,) ``` after: ```python def function(inputs): inputs_ref_0 = inputs[1] graph_out_1 = __compiled_fn_2(inputs) getitem_1 = graph_out_1[0] add = inputs_ref_0 del graph_out_1 add_1 = add + getitem_1 add = None getitem_1 = None cpu = add_1.cpu() add_1 = None return (cpu,) ``` Co-authored-by: Jason Ansel <jansel@meta.com> Pull Request resolved: https://github.com/pytorch/pytorch/pull/124127 Approved by: https://github.com/jansel
|
diff --git a/test/inductor/test_compiled_autograd.py b/test/inductor/test_compiled_autograd.py
index 7992ce8c40..27c1ccd3e7 100644
--- a/test/inductor/test_compiled_autograd.py
+++ b/test/inductor/test_compiled_autograd.py
@@ -257,7 +257,7 @@ main()
self.assertNotEqual(grads[1], None)
self.assertNotEqual(grads[2], None)
- def test_inputs_aliasing_bytecode(self):
+ def test_inputs_aliasing_bytecode_attr_mutations(self):
# Freeze compiled autograd graph
compiler = torch._dynamo.compiled_autograd.AutogradCompilerInstance(compiler_fn)
param = torch.ones(100)
@@ -308,6 +308,65 @@ main()
finally:
handle.remove()
+ def test_inputs_aliasing_bytecode_stack_restore(self):
+ from torch.testing._internal.logging_tensor import LoggingTensor
+
+ # Create a graph that allows inputs stealing
+ def forward(inputs):
+ add = inputs[0] + 1
+ add_1 = add + inputs[1] # handled in suffix for tensor subclass
+ out = add_1.cpu()
+ return (out,)
+
+ gm = torch.fx.symbolic_trace(forward)
+ torch._dynamo.utils.set_locals_to_steal(gm, ["inputs"])
+ compiled_fn = torch.compile(gm)
+
+ inputs = [
+ torch.ones(1000000, dtype=torch.float32),
+ LoggingTensor(torch.ones(1)),
+ ]
+
+ def bytecode_hook(code, out_code):
+ import dis
+ import sys
+
+ if sys.version_info < (3, 11):
+ call_op = "CALL_FUNCTION"
+ else:
+ call_op = "CALL"
+
+ insts = list(dis.get_instructions(out_code))
+ call_graph_idx = next(
+ i for i, inst in enumerate(insts) if inst.opname == call_op
+ )
+ # pre-graph should alias: inputs_ref_0 = inputs[0]
+ matches = [
+ inst
+ for inst in insts[:call_graph_idx]
+ if inst.opname == "STORE_FAST" and inst.argval == "inputs_ref_0"
+ ]
+ self.assertTrue(len(matches) == 1)
+ # post-graph should access inputs_ref_0 instead of inputs
+ matches = [
+ inst for inst in insts[call_graph_idx:] if inst.argval == "inputs"
+ ]
+ self.assertTrue(len(matches) == 0)
+ matches = [
+ inst
+ for inst in insts[call_graph_idx:]
+ if inst.opname == "LOAD_FAST" and inst.argval == "inputs_ref_0"
+ ]
+ self.assertTrue(len(matches) == 1)
+
+ torch._dynamo.reset()
+ handle = torch._dynamo.convert_frame.register_bytecode_hook(bytecode_hook)
+ try:
+ out = compiled_fn(inputs)
+ self.assertTrue(len(inputs) == 0)
+ finally:
+ handle.remove()
+
def test_implicit_add(self):
def fn():
y = torch.randn(1, 4, requires_grad=True)
diff --git a/torch/_dynamo/output_graph.py b/torch/_dynamo/output_graph.py
index 1cf0c1363a..5d9a238c18 100644
--- a/torch/_dynamo/output_graph.py
+++ b/torch/_dynamo/output_graph.py
@@ -853,66 +853,65 @@ class OutputGraph:
raise AssertionError("unreachable")
- def get_attr_mutations_on_stolen_lists(
- self,
- ) -> Dict[str, List[AttributeMutationExisting]]:
- attr_mutations_on_stolen_lists: Dict[str, List[AttributeMutationExisting]] = {}
+ def handle_aliases_for_stolen_lists(self, tx):
+ # If list inputs are stolen, but still needed after the function call, create aliases to keep them alive
+ alias_insts = []
+
+ needs_alias: Dict[
+ str, List[Union[VariableTracker, AttributeMutationExisting]]
+ ] = {}
maybe_gm = self.local_scope.get("self")
stolen_list_names = get_locals_to_steal(maybe_gm)
- for attr_mutation in self.side_effects.store_attr_mutations.keys():
- if (
- not isinstance(attr_mutation, AttributeMutationExisting)
- or not isinstance(attr_mutation.source, GetItemSource)
- or not isinstance(attr_mutation.source.base, LocalSource)
+ for x in [
+ *tx.stack,
+ *tx.symbolic_locals.values(),
+ *self.side_effects.store_attr_mutations.keys(),
+ ]:
+ if not (
+ isinstance(x, (VariableTracker, AttributeMutationExisting))
+ and isinstance(x.source, GetItemSource)
+ and isinstance(x.source.base, LocalSource)
+ and x.source.base.local_name in stolen_list_names
):
continue
- list_name = attr_mutation.source.base.local_name
- if list_name not in stolen_list_names:
- continue
+ stolen_name = x.source.base.local_name
+ if stolen_name not in needs_alias:
+ needs_alias[stolen_name] = []
+ needs_alias[stolen_name].append(x)
- # mutation is of type `stolen_list[i].attr_name`, so we need to keep stolen_list[i] alive
- if list_name not in attr_mutations_on_stolen_lists:
- attr_mutations_on_stolen_lists[list_name] = []
- attr_mutations_on_stolen_lists[list_name].append(attr_mutation)
- return attr_mutations_on_stolen_lists
-
- def handle_mutations_on_stolen_list_inputs(self):
- # When mutations happen on inputs list elements, those elements must be kept alive after the function call.
- # If the input list is stolen, we perform the mutation on aliases.
- alias_insts = []
- attr_mutations_on_stolen_lists = self.get_attr_mutations_on_stolen_lists()
+ visited = set()
for arg in self.graphargs:
if not (
isinstance(arg._example, list)
and isinstance(arg.source, LocalSource)
- and arg.source.local_name in attr_mutations_on_stolen_lists
+ and arg.source.local_name in needs_alias
):
continue
# arg is a list that will be cleared by the compiled function
list_name = arg.source.local_name
assert list_name in self.code_options["co_varnames"]
- for mutation in attr_mutations_on_stolen_lists[list_name]:
- assert mutation.source is not None
- assert isinstance(mutation.source, GetItemSource)
- list_idx = mutation.source.index
+ for x in needs_alias[list_name]:
+ list_idx = x.source.index
alias_name = self.new_var(
f"{list_name}_ref"
) # self.new_var already adds unique id suffix
- # bytecode of `alias_name = list_name[list_idx]`
- alias_insts.extend(
- [
- create_instruction("LOAD_FAST", argval=list_name),
- create_instruction("LOAD_CONST", argval=list_idx),
- create_instruction("BINARY_SUBSCR"),
- create_instruction("STORE_FAST", argval=alias_name),
- ]
- )
+ if list_idx not in visited:
+ visited.add(list_idx)
+ # bytecode of `alias_name = list_name[list_idx]`
+ alias_insts.extend(
+ [
+ create_instruction("LOAD_FAST", argval=list_name),
+ create_instruction("LOAD_CONST", argval=list_idx),
+ create_instruction("BINARY_SUBSCR"),
+ create_instruction("STORE_FAST", argval=alias_name),
+ ]
+ )
- # perform mutation on alias, handled by suffix codegen
- mutation.source = LocalSource(alias_name)
+ # operate on alias, handled by suffix codegen
+ x.source = LocalSource(alias_name)
return alias_insts
@@ -956,7 +955,7 @@ class OutputGraph:
self.pregraph_bytecode and self.export
), "export does not support pregraph_bytecode"
prefix_insts.extend(self.pregraph_bytecode)
- prefix_insts.extend(self.handle_mutations_on_stolen_list_inputs())
+ prefix_insts.extend(self.handle_aliases_for_stolen_lists(tx))
def append_prefix_insts():
self.add_output_instructions(prefix_insts)
|
2.41.0
|
2271fb07ef88c33a5cb928eec6d8af8bf365493
|
Tue, 16 Apr 2024 18:49:52 +0000
|
[PATCH 0209/1000] Add NEON ISA support on aarch64 (#123584)
|
Fixes #104729 This improves the compiled mode performance of Softmax (by 20%) and other operations (like batchnorm) that invoke the reduce_all function. Thereby also improves BERT inference by around 8%. Tested on a graviton 3 instance (c7g.4xl). Tests were run in a single-threaded manner. Script attached below. Command: `OMP_NUM_THREADS=1 LRU_CACHE_CAPACITY=1024 DNNL_DEFAULT_FPMATH_MODE=BF16 python TestSoftmax.py` [TestSoftmax.txt](https://github.com/pytorch/pytorch/files/14910754/TestSoftmax.txt) ```python import torch import torch.nn as nn from torch.profiler import profile, record_function, ProfilerActivity model = nn.Softmax().eval() compiled_model = torch.compile(model) inputs = torch.randn(1024, 1024) with torch.set_grad_enabled(False): for _ in range(50): compiled_model(inputs) #Warmup print("Warmup over") with profile(activities=[ProfilerActivity.CPU]) as prof: with record_function("model_inference"): for _ in range(100): compiled_model(inputs) print(prof.key_averages().table(sort_by="self_cpu_time_total")) # Check if the compiled model inference and the eager model inference are similar using torch.allclose print(torch.allclose(compiled_model(inputs), model(inputs))) ``` Co-authored-by: Nikita Shulga <2453524+malfet@users.noreply.github.com> Pull Request resolved: https://github.com/pytorch/pytorch/pull/123584 Approved by: https://github.com/jgong5, https://github.com/malfet
|
diff --git a/aten/src/ATen/cpu/vec/functional_base.h b/aten/src/ATen/cpu/vec/functional_base.h
index 3b183ad965..48d44dc42c 100644
--- a/aten/src/ATen/cpu/vec/functional_base.h
+++ b/aten/src/ATen/cpu/vec/functional_base.h
@@ -78,6 +78,35 @@ struct VecReduceAllSIMD<float, Op> {
#endif // defined(CPU_CAPABILITY_AVX512)
#endif // defined(__GNUC__) && (__GNUC__ > 5) && !defined(_MSC_VER) && !defined(C10_MOBILE)
+#if defined(__aarch64__) && !defined(C10_MOBILE) && !defined(__CUDACC__)
+template <typename Op>
+struct VecReduceAllSIMD<float, Op> {
+ static inline float apply(const Op& vec_fun, const Vectorized<float>& acc_vec) {
+ using Vec = Vectorized<float>;
+ Vec v = acc_vec;
+
+ // 128-bit shuffle: [a1, a2, a3, a4, a5, a6, a7, a8] -> [a5, a6, a7, a8, a1, a2, a3, a4]
+ Vec v1 = {v.get_high(), v.get_low()};
+ // [a1+a5, a2+a6, a3+a7, a4+a8, -, -, -, -] ('+' stands for the reduction function. Note that the last 4 elements are not required)
+ v = vec_fun(v, v1);
+
+ // 64-bit shuffle: [a1+a5, a2+a6, a3+a7, a4+a8, -, -, -, -] -> [a3+a7, a4+a8, a1+a5, a2+a6, -, -, -, -]
+ float32x4_t v1_1 = vextq_f32(v.get_low(), v.get_low(), 2);
+ v1 = {v1_1, v1_1};
+ // [a1+a3+a5+a7, a2+a4+a6+a8, a1+a3+a5+a7, a2+a4+a6+a8, -, -, -, -]
+ v = vec_fun(v, v1);
+
+ // 32-bit shuffle: [a1+a3+a5+a7, a2+a4+a6+a8, a1+a3+a5+a7, a2+a4+a6+a8, -, -, -, -] -> [a2+a4+a6+a8, a1+a3+a5+a7, a2+a4+a6+a8, a1+a3+a5+a7, -, -, -, -]
+ v1_1 = vrev64q_f32(v.get_low());
+ v1 = {v1_1, v1_1};
+ // [a1+a2+a3+a4+a5+a6+a7+a8, a1+a2+a3+a4+a5+a6+a7+a8, a1+a2+a3+a4+a5+a6+a7+a8, a1+a2+a3+a4+a5+a6+a7+a8, -, -, -, -]
+ v = vec_fun(v, v1);
+
+ return v.get_low()[0];
+ }
+};
+#endif // defined(__aarch64__)
+
template <typename scalar_t, typename Op>
inline scalar_t vec_reduce_all(const Op& vec_fun, const Vectorized<scalar_t>& acc_vec) {
return VecReduceAllSIMD<scalar_t, Op>::apply(vec_fun, acc_vec);
diff --git a/torch/_inductor/codecache.py b/torch/_inductor/codecache.py
index 7287aa2e80..72786779c0 100644
--- a/torch/_inductor/codecache.py
+++ b/torch/_inductor/codecache.py
@@ -1146,7 +1146,7 @@ class VecNEON(VecISA):
_dtype_nelements = {torch.float: 8, torch.bfloat16: 16, torch.float16: 16}
def __str__(self) -> str:
- return "neon" # Unused
+ return "asimd" # detects the presence of advanced SIMD on armv8-a kernels
__hash__: Callable[[VecISA], Any] = VecISA.__hash__
@@ -1206,7 +1206,7 @@ class InvalidVecISA(VecISA):
invalid_vec_isa = InvalidVecISA()
-supported_vec_isa_list = [VecAVX512(), VecAVX2()]
+supported_vec_isa_list = [VecAVX512(), VecAVX2(), VecNEON()]
# Cache the cpuinfo to avoid I/O overhead. Meanwhile, the cpuinfo content
|
2.41.0
|
2e22bb4448402ceef5146451863fbebbbe653ec
|
Tue, 16 Apr 2024 20:08:07 +0000
|
[PATCH 0210/1000] [nccl-pg] Pass pg name and desc to NCCL communicator (#124149)
|
Summary: Pass Process Group Name and Desc to NCCL communicator in order to access pg information in NCCL layer. The information is passed as commDesc string(i.e. "<pg_desc>:<pg_name>") Function only valid when NCCL_COMM_DESCRIPTION is defined. Differential Revision: D55703310 Pull Request resolved: https://github.com/pytorch/pytorch/pull/124149 Approved by: https://github.com/shuqiangzhang
|
diff --git a/torch/csrc/distributed/c10d/ProcessGroupNCCL.cpp b/torch/csrc/distributed/c10d/ProcessGroupNCCL.cpp
index 96c6ce2e34..29e4616be9 100644
--- a/torch/csrc/distributed/c10d/ProcessGroupNCCL.cpp
+++ b/torch/csrc/distributed/c10d/ProcessGroupNCCL.cpp
@@ -1923,6 +1923,12 @@ std::shared_ptr<NCCLComm> ProcessGroupNCCL::getNCCLComm(
// reset log prefix to include group_desc
logPrefix_ = createLogPrefix();
+#ifdef NCCL_COMM_DESCRIPTION
+ // Pass process group name and description to NCCL communicator
+ std::string commDesc = pg_desc_ + ':' + pg_name_;
+ options_->config.commDesc = strdup(commDesc.c_str());
+#endif
+
// For batch_isend_irecv, ncclGroupStart() would be called upfront
bool batchP2P = ncclActiveGroupCounter_ > 0;
bool singleP2POp = isP2POp(opType, batchP2P);
|
2.41.0
|
f89bf4188e35e8596252d8b75c7672394073b4a
|
Tue, 16 Apr 2024 20:23:14 +0000
|
[PATCH 0211/1000] Revert "[reland] `_foreach_copy` with different src/dst dtypes (#123844)"
|
This reverts commit ff1e3ff5a503a520c1a310c8e72a383657f9a4bc. Reverted https://github.com/pytorch/pytorch/pull/123844 on behalf of https://github.com/malfet due to Perhaps it enabled it for different dtype, but broke for the same ([comment](https://github.com/pytorch/pytorch/pull/123844#issuecomment-2059861767))
|
diff --git a/aten/src/ATen/native/ForeachUtils.h b/aten/src/ATen/native/ForeachUtils.h
index d7a1449463..9c22c35ee9 100644
--- a/aten/src/ATen/native/ForeachUtils.h
+++ b/aten/src/ATen/native/ForeachUtils.h
@@ -102,13 +102,12 @@ inline void check_foreach_api_restrictions(
// corresponding tensors (aligning in index across the tensorLists) share the
// same device and dtype.
inline bool _check_tensors_share_device_and_dtype(
- ArrayRef<TensorList> tensorLists,
- const bool skip_dtype_check = false) {
+ ArrayRef<TensorList> tensorLists) {
const auto expected_dtype = tensorLists[0][0].dtype();
const auto expected_device = tensorLists[0][0].device();
auto is_tensor_okay = [&](const Tensor& tensor) {
- return (skip_dtype_check || tensor.dtype() == expected_dtype) &&
+ return tensor.dtype() == expected_dtype &&
tensor.device() == expected_device && tensor.layout() == at::kStrided &&
tensor.is_non_overlapping_and_dense();
};
diff --git a/aten/src/ATen/native/cuda/ForeachBinaryOpList.cu b/aten/src/ATen/native/cuda/ForeachBinaryOpList.cu
index 7329360230..366049a540 100644
--- a/aten/src/ATen/native/cuda/ForeachBinaryOpList.cu
+++ b/aten/src/ATen/native/cuda/ForeachBinaryOpList.cu
@@ -1,11 +1,9 @@
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
#include <ATen/Dispatch.h>
-#include <ATen/cuda/CUDAContext.h>
#include <ATen/native/ForeachUtils.h>
#include <ATen/native/cuda/ForeachFunctors.cuh>
#include <ATen/native/cuda/ForeachMinMaxFunctors.cuh>
#include <functional>
-#include <type_traits>
#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/NativeFunctions.h>
@@ -252,154 +250,20 @@ FOREACH_BINARY_OP_LIST(
power_functor,
/*division_op*/ true);
-template <typename dst_t, typename src_t = dst_t>
-struct Copy {
- __device__ __forceinline__ dst_t operator()(const src_t& x) {
- return static_cast<dst_t>(x);
+template <typename T>
+struct Identity {
+ __device__ __forceinline__ T operator()(const T& x) {
+ return x;
}
};
-template <typename dst_t>
-struct Copy<dst_t, c10::complex<double>> {
- __device__ __forceinline__ dst_t operator()(const c10::complex<double>& x) {
- if constexpr (!(std::is_same_v<dst_t, c10::complex<double>> ||
- std::is_same_v<dst_t, c10::complex<float>>)) {
- return static_cast<dst_t>(x.real());
- } else {
- return static_cast<dst_t>(x);
- }
- }
-};
-
-template <typename dst_t>
-struct Copy<dst_t, c10::complex<float>> {
- __device__ __forceinline__ dst_t operator()(const c10::complex<float>& x) {
- if constexpr (!(std::is_same_v<dst_t, c10::complex<double>> ||
- std::is_same_v<dst_t, c10::complex<float>>)) {
- return static_cast<dst_t>(x.real());
- } else {
- return static_cast<dst_t>(x);
- }
- }
-};
-
-#define AT_DISPATCH_SOURCE_TYPES(TYPE, NAME, ...) \
- AT_DISPATCH_SWITCH( \
- TYPE, \
- NAME, \
- AT_PRIVATE_CASE_TYPE_USING_HINT( \
- at::ScalarType::Byte, src_t, __VA_ARGS__) \
- AT_PRIVATE_CASE_TYPE_USING_HINT( \
- at::ScalarType::Char, src_t, __VA_ARGS__) \
- AT_PRIVATE_CASE_TYPE_USING_HINT( \
- at::ScalarType::Long, src_t, __VA_ARGS__) \
- AT_PRIVATE_CASE_TYPE_USING_HINT( \
- at::ScalarType::Short, src_t, __VA_ARGS__) \
- AT_PRIVATE_CASE_TYPE_USING_HINT( \
- at::ScalarType::Double, src_t, __VA_ARGS__) \
- AT_PRIVATE_CASE_TYPE_USING_HINT( \
- at::ScalarType::Float, src_t, __VA_ARGS__) \
- AT_PRIVATE_CASE_TYPE_USING_HINT( \
- at::ScalarType::ComplexDouble, \
- src_t, \
- __VA_ARGS__) \
- AT_PRIVATE_CASE_TYPE_USING_HINT( \
- at::ScalarType::ComplexFloat, \
- src_t, \
- __VA_ARGS__) \
- AT_PRIVATE_CASE_TYPE_USING_HINT( \
- at::ScalarType::Half, \
- src_t, \
- __VA_ARGS__) \
- AT_PRIVATE_CASE_TYPE_USING_HINT( \
- at::ScalarType::BFloat16, \
- src_t, \
- __VA_ARGS__) \
- AT_PRIVATE_CASE_TYPE_USING_HINT( \
- at::ScalarType::Bool, \
- src_t, \
- __VA_ARGS__))
-
-namespace {
-
-template <
- typename T,
- typename src_t,
- int depth,
- int r_args_depth,
- int res_arg_index>
-struct CopyFunctor {
- static_assert(depth == 2 && r_args_depth == 1 && res_arg_index == 1);
- template <typename Op>
- __device__ __forceinline__ void operator()(
- int chunk_size,
- TensorListMetadata<depth>& tl,
- Op op) {
- const auto tensor_loc = tl.block_to_tensor[blockIdx.x];
- const auto chunk_idx = tl.block_to_chunk[blockIdx.x];
- auto n = tl.numel_for_tensor[tensor_loc];
-
- src_t* src_ptr = (src_t*)tl.addresses[0][tensor_loc];
- src_ptr += chunk_idx * chunk_size;
- T* self_ptr = (T*)tl.addresses[1][tensor_loc];
- self_ptr += chunk_idx * chunk_size;
-
- const bool all_aligned{is_aligned(src_ptr) && is_aligned(self_ptr)};
-
- n -= chunk_idx * chunk_size;
- src_t src_args[kILP];
- T r_args[kILP];
-
- // to make things simple, we put aligned case in a different code path
- if (n % kILP == 0 && chunk_size % kILP == 0 && all_aligned) {
- for (int64_t i_start = threadIdx.x;
- i_start * kILP < n && i_start * kILP < chunk_size;
- i_start += blockDim.x) {
- // load
- load_store(src_args, src_ptr, 0, i_start);
-#pragma unroll
- for (int ii = 0; ii < kILP; ii++) {
- r_args[ii] = static_cast<T>(op(src_args[ii]));
- }
- // store
- load_store(self_ptr, r_args, i_start, 0);
- }
- } else {
- for (int64_t i_start = 0; i_start < n && i_start < chunk_size;
- i_start += blockDim.x * kILP) {
-#pragma unroll
- for (int ii = 0; ii < kILP; ii++) {
- const auto i = i_start + threadIdx.x + ii * blockDim.x;
- if (i < n && i < chunk_size) {
- src_args[ii] = src_ptr[i];
- }
- }
-#pragma unroll
- for (int ii = 0; ii < kILP; ii++) {
- r_args[ii] = static_cast<T>(op(src_args[ii]));
- }
- store_args(self_ptr, r_args, i_start, chunk_size, n);
- }
- }
- }
-};
-
-} // anonymous namespace
-
void foreach_tensor_copy_list_kernel_cuda_(
TensorList self,
TensorList src,
const bool non_blocking) {
check_foreach_api_restrictions(self, src);
- if (!(_check_tensors_share_device_and_dtype(
- {self, src}, /* skip_dtype_check */ true) &&
- std::all_of(
- src.cbegin(),
- src.cend(),
- [&](const auto& t) -> bool {
- return t.dtype() == src[0].dtype();
- }) &&
- _check_tensors_share_sizes_and_strides({self, src}))) {
+ if (!can_use_fast_route(
+ self, src, /* does_op_promote_integer_inputs_to_float */ false)) {
return at::native::foreach_tensor_copy_list_kernel_slow_(
self, src, non_blocking);
}
@@ -414,38 +278,16 @@ void foreach_tensor_copy_list_kernel_cuda_(
"foreach_tensor_copy",
[&]() {
using opmath_t = at::opmath_type<scalar_t>;
- AT_DISPATCH_SOURCE_TYPES(src[0].scalar_type(), "foreach_tensor_copy", [&] {
- if constexpr (std::is_same_v<scalar_t, src_t>) {
- multi_tensor_apply<2>(
- tensor_lists,
- UnaryOpFunctor<
- scalar_t,
- /* depth */ 2,
- /* r_args_depth */ 1,
- /* res_arg_index */ 1>(),
- Copy<opmath_t, opmath_t>());
- } else {
- // Ref:
- // https://github.com/pytorch/pytorch/blob/656134c38f4737d13c3f43fc5c59470bc23c1d2f/aten/src/ATen/native/Copy.cpp#L299-L301
- if (!self[0].is_complex() && src[0].is_complex()) {
- TORCH_WARN_ONCE(
- "Casting complex values to real discards the imaginary part");
- }
- multi_tensor_apply<2>(
- tensor_lists,
- CopyFunctor<
- scalar_t,
- src_t,
- /* depth */ 2,
- /* r_args_depth */ 1,
- /* res_arg_index */ 1>(),
- Copy<scalar_t, src_t>());
- }
- });
+ multi_tensor_apply<2>(
+ tensor_lists,
+ UnaryOpFunctor<
+ scalar_t,
+ /* depth */ 2,
+ /* r_args_depth */ 1,
+ /* res_arg_index */ 1>(),
+ Identity<opmath_t>());
});
increment_version(self);
}
-#undef AT_DISPATCH_SOURCE_TYPES
-
} // namespace at::native
diff --git a/test/test_foreach.py b/test/test_foreach.py
index 27867a4ace..19d695762c 100644
--- a/test/test_foreach.py
+++ b/test/test_foreach.py
@@ -1206,28 +1206,6 @@ class TestForeach(TestCase):
copy_(t, s, non_blocking)
self.assertEqual(ref_input, sample.input)
- @onlyCUDA
- @ops(filter(lambda op: op.name == "_foreach_copy", foreach_binary_op_db))
- def test_foreach_copy_with_multi_dtypes(self, device, dtype, op):
- # check (a) multi_tensor_apply is called and (b) numerical parity with for-loop and Tensor.copy_
- foreach_copy_ = ForeachFuncWrapper(op.inplace_variant)
- for sample in op.sample_inputs(device, dtype, noncontiguous=False):
- for src_dtype in floating_types_and(torch.half, torch.bfloat16):
- if src_dtype == dtype:
- continue
- self_tensors = [t.clone() for t in sample.input]
- src_tensors = [t.to(src_dtype) for t in self_tensors]
- out = foreach_copy_(
- (self_tensors, src_tensors), is_cuda=True, expect_fastpath=True
- )
- self.assertEqual(
- out,
- [
- torch.empty_like(t).copy_(s)
- for t, s in zip(self_tensors, src_tensors)
- ],
- )
-
# Test reverse-mode & forward-mode AD if supported.
@onlyCUDA
@ops(
|
2.41.0
|
6a25cc0db0a47aec560f732a2228c319ca8a589
|
Tue, 16 Apr 2024 10:22:34 -0700
|
[PATCH 0214/1000] [DCP] Adds support for non-primatives in async_save by deep copying during cpu offloading (#123941)
|
Adds support for non-primatives in async_save by deep copying during cpu offloading. If users are not type checking, the expectation in async is likely that the object is copied Differential Revision: [D56065237](https://our.internmc.facebook.com/intern/diff/D56065237/) Pull Request resolved: https://github.com/pytorch/pytorch/pull/123941 Approved by: https://github.com/fegin
|
diff --git a/torch/distributed/_state_dict_utils.py b/torch/distributed/_state_dict_utils.py
index 074bd5375a..b69be53bb2 100644
--- a/torch/distributed/_state_dict_utils.py
+++ b/torch/distributed/_state_dict_utils.py
@@ -1,3 +1,4 @@
+import copy
import io
import math
from typing import Any, Callable, Dict, Optional, Tuple, TYPE_CHECKING
@@ -166,7 +167,7 @@ def _iterate_state_dict(
if isinstance(iter_object, tuple):
ret = tuple(ret)
elif not type_check:
- ret = iter_object
+ ret = copy.deepcopy(iter_object)
else:
raise ValueError(f"Unexpected value type {type(iter_object)}")
diff --git a/torch/distributed/checkpoint/state_dict_saver.py b/torch/distributed/checkpoint/state_dict_saver.py
index b79ce3a496..f2180da8c7 100644
--- a/torch/distributed/checkpoint/state_dict_saver.py
+++ b/torch/distributed/checkpoint/state_dict_saver.py
@@ -216,7 +216,9 @@ def async_save(
torch.device("cpu") in pg._device_types # type: ignore[attr-defined]
), "A CPU backend must be enabled for async save; try initializing process group with 'cpu:gloo,cuda:nccl'"
- cpu_state_dict = _offload_state_dict_to_cpu(_stateful_to_state_dict(state_dict))
+ cpu_state_dict = _offload_state_dict_to_cpu(
+ _stateful_to_state_dict(state_dict), type_check=False
+ )
executor = ThreadPoolExecutor(max_workers=1)
f: Future = executor.submit(
|
2.41.0
|
06c4f1367c7eb4a49aa4a9538dd2b1eb92485d6
|
Tue, 16 Apr 2024 21:18:18 +0000
|
[PATCH 0215/1000] [PT] [ST] fix test_sharded_tensor (#124103)
|
Summary: https://github.com/pytorch/pytorch/pull/123230 formalizes the rank validation to support sub groups. It broke a few UTs, some of which got fixed in https://github.com/pytorch/pytorch/pull/123778 This is to fix the remaining one reported by DanilBaibak Test Plan: CI Differential Revision: D56155076 Pull Request resolved: https://github.com/pytorch/pytorch/pull/124103 Approved by: https://github.com/fegin
|
diff --git a/test/distributed/_shard/sharded_tensor/test_sharded_tensor.py b/test/distributed/_shard/sharded_tensor/test_sharded_tensor.py
index e0a71a06d6..da894062ac 100644
--- a/test/distributed/_shard/sharded_tensor/test_sharded_tensor.py
+++ b/test/distributed/_shard/sharded_tensor/test_sharded_tensor.py
@@ -840,8 +840,8 @@ class TestShardedTensorChunked(ShardedTensorTestBase):
spec = ChunkShardingSpec(
dim=0,
placements=[
- "rank:1/cuda:2",
- "rank:2/cuda:3",
+ "rank:2/cuda:2",
+ "rank:3/cuda:3",
],
)
@@ -866,7 +866,7 @@ class TestShardedTensorChunked(ShardedTensorTestBase):
for shard_rank, shard_metadata in enumerate(shards_metadata):
self.assertEqual([shard_rank * 5, 0], shard_metadata.shard_offsets)
self.assertEqual([5, 20], shard_metadata.shard_sizes)
- self.assertEqual(f'rank:{shard_rank + 1}/cuda:{shard_rank + 2}', str(shard_metadata.placement))
+ self.assertEqual(f'rank:{shard_rank + 2}/cuda:{shard_rank + 2}', str(shard_metadata.placement))
# Validate remote shards.
remote_shards = st.remote_shards()
@@ -880,7 +880,7 @@ class TestShardedTensorChunked(ShardedTensorTestBase):
for remote_shard in shards:
shard = remote_shard.to_here()
self.assertEqual(rpc_rank, remote_shard.owner().id)
- self.assertEqual(f'rank:{rpc_rank - 1}/cuda:{rpc_rank}', str(shard.metadata.placement))
+ self.assertEqual(f'rank:{rpc_rank}/cuda:{rpc_rank}', str(shard.metadata.placement))
self.assertEqual((5, 20), shard.tensor.size())
@with_comms
@@ -981,7 +981,10 @@ class TestShardedTensorChunked(ShardedTensorTestBase):
sharded_tensor.empty(spec, 10, 20)
spec = ChunkShardingSpec(dim=0, placements=["rank:5/cuda:1"])
- with self.assertRaisesRegex(ValueError, 'Invalid rank'):
+ with self.assertRaisesRegex(
+ ValueError,
+ "Global rank 5 does not exist in input process group"
+ ):
sharded_tensor.empty(spec, 10, 20)
spec = ChunkShardingSpec(dim=0, placements=["rank:0/cuda:1"])
@@ -1180,10 +1183,10 @@ class TestShardedTensorChunked(ShardedTensorTestBase):
spec = ChunkShardingSpec(
dim=0,
placements=[
- "rank:0/cuda:0",
- "rank:1/cuda:1",
- "rank:0/cuda:2",
- "rank:1/cuda:3",
+ "rank:2/cuda:0",
+ "rank:3/cuda:1",
+ "rank:2/cuda:2",
+ "rank:3/cuda:3",
],
)
@@ -1945,12 +1948,12 @@ class TestShardedTensorEnumerable(ShardedTensorTestBase):
ShardMetadata(
shard_offsets=[0, 0],
shard_sizes=[5, 5],
- placement="rank:0/cuda:1",
+ placement="rank:1/cuda:1",
),
ShardMetadata(
shard_offsets=[5, 0],
shard_sizes=[5, 5],
- placement="rank:2/cuda:3",
+ placement="rank:3/cuda:3",
),
])
@@ -1967,7 +1970,7 @@ class TestShardedTensorEnumerable(ShardedTensorTestBase):
# Verify local shard metadata.
self.assertEqual((self.rank // 2 * 5, 0), local_shard.metadata.shard_offsets)
self.assertEqual((5, 5), local_shard.metadata.shard_sizes)
- self.assertEqual(f'rank:{self.rank - 1}/cuda:{self.rank}', str(local_shard.metadata.placement))
+ self.assertEqual(f'rank:{self.rank}/cuda:{self.rank}', str(local_shard.metadata.placement))
# Verify global metadata.
st_metadata = st.metadata()
@@ -1976,7 +1979,7 @@ class TestShardedTensorEnumerable(ShardedTensorTestBase):
for rank, shard_metadata in enumerate(shards_metadata):
self.assertEqual((rank * 5, 0), shard_metadata.shard_offsets)
self.assertEqual((5, 5), shard_metadata.shard_sizes)
- self.assertEqual(f'rank:{rank * 2}/cuda:{rank * 2 + 1}', str(shard_metadata.placement))
+ self.assertEqual(f'rank:{rank * 2 + 1}/cuda:{rank * 2 + 1}', str(shard_metadata.placement))
# Validate remote shards.
remote_shards = st.remote_shards()
@@ -1985,7 +1988,6 @@ class TestShardedTensorEnumerable(ShardedTensorTestBase):
else:
self.assertEqual(2, len(remote_shards))
- owners = {}
for rpc_rank, shards in remote_shards.items():
self.assertEqual(1, len(shards))
@@ -2457,7 +2459,7 @@ class TestShardedTensorFromLocalShards(ShardedTensorTestBase):
local_shard_metadata = ShardMetadata(
shard_offsets=[5 * (self.rank - 1), 0],
shard_sizes=[5, 5],
- placement=f"rank:{self.rank - 1}/cuda:{self.rank}"
+ placement=f"rank:{self.rank}/cuda:{self.rank}"
)
local_shards = [sharded_tensor.Shard(torch.randn(5, 5, device=f"cuda:{self.rank}"), local_shard_metadata)]
@@ -2471,7 +2473,7 @@ class TestShardedTensorFromLocalShards(ShardedTensorTestBase):
# Verify local shard metadata.
self.assertEqual(((self.rank - 1) * 5, 0), local_shard.metadata.shard_offsets)
self.assertEqual((5, 5), local_shard.metadata.shard_sizes)
- self.assertEqual(f'rank:{self.rank - 1}/cuda:{self.rank}', str(local_shard.metadata.placement))
+ self.assertEqual(f'rank:{self.rank}/cuda:{self.rank}', str(local_shard.metadata.placement))
# Verify global metadata.
st_metadata = st.metadata()
@@ -2480,7 +2482,7 @@ class TestShardedTensorFromLocalShards(ShardedTensorTestBase):
for rank, shard_metadata in enumerate(shards_metadata):
self.assertEqual((rank * 5, 0), shard_metadata.shard_offsets)
self.assertEqual((5, 5), shard_metadata.shard_sizes)
- self.assertEqual(f'rank:{rank}/cuda:{rank + 1}', str(shard_metadata.placement))
+ self.assertEqual(f'rank:{rank + 1}/cuda:{rank + 1}', str(shard_metadata.placement))
@with_comms
diff --git a/torch/distributed/_shard/sharded_tensor/api.py b/torch/distributed/_shard/sharded_tensor/api.py
index 2fd2bdcfd4..2433a6a946 100644
--- a/torch/distributed/_shard/sharded_tensor/api.py
+++ b/torch/distributed/_shard/sharded_tensor/api.py
@@ -262,7 +262,7 @@ class ShardedTensor(ShardedTensorBase):
self._metadata.tensor_properties.memory_format = memory_format
- current_rank = dist.get_rank(self._process_group)
+ current_rank = dist.get_rank() # global rank
for shard_metadata in self._metadata.shards_metadata:
rank, device = _parse_and_validate_remote_device(self._process_group, shard_metadata.placement)
diff --git a/torch/distributed/_shard/sharded_tensor/utils.py b/torch/distributed/_shard/sharded_tensor/utils.py
index 9fd380a2d9..246bc37b24 100644
--- a/torch/distributed/_shard/sharded_tensor/utils.py
+++ b/torch/distributed/_shard/sharded_tensor/utils.py
@@ -23,7 +23,7 @@ def _parse_and_validate_remote_device(pg, remote_device):
device = remote_device.device()
# Validate rank, skip validation if rank is not part of process group.
- if not c10d._rank_not_in_group(pg):
+ if rank is not None and not c10d._rank_not_in_group(pg):
pg_global_ranks = c10d.get_process_group_ranks(pg)
if rank not in pg_global_ranks:
raise ValueError(
|
2.41.0
|
d88339b535f57cd0e2926c9ac4c2542e4490aac
|
Tue, 16 Apr 2024 22:08:24 +0000
|
[PATCH 0217/1000] Revert "make sure dynamo doesn't inline DTensor __new__ or __torch_dispatch__ (#123347)"
|
This reverts commit 63dcb5b0f2ef3578e81841fd8a2166e732c0ca99. Reverted https://github.com/pytorch/pytorch/pull/123347 on behalf of https://github.com/facebook-github-bot due to Diff reverted internally ([comment](https://github.com/pytorch/pytorch/pull/123347#issuecomment-2059994989))
|
diff --git a/test/distributed/_tensor/test_dtensor_compile.py b/test/distributed/_tensor/test_dtensor_compile.py
index 5f98050e83..f9ad0278d7 100644
--- a/test/distributed/_tensor/test_dtensor_compile.py
+++ b/test/distributed/_tensor/test_dtensor_compile.py
@@ -191,47 +191,6 @@ class TestDTensorCompile(torch._dynamo.test_case.TestCase):
res = opt_fn(x)
self.assertEqual(res, ref)
- def test_dtensor_constructor_w_graph_break(self):
- mesh = DeviceMesh(self.device_type, torch.arange(self.world_size))
-
- # test passing in DTensor as inputs/outputs and run some tensor computation
- def fn(x):
- print("graph break!")
- return DTensor(
- x,
- mesh,
- (Replicate(), Shard(0)),
- shape=[128, 32],
- dtype=x.dtype,
- requires_grad=x.requires_grad,
- stride=[32, 1],
- )
-
- x = torch.randn(64, 32, requires_grad=True)
- out = fn(x)
- out2 = torch.compile(fn, backend="eager")(x)
-
- def test_dtensor_constructor_w_dynamo_disable(self):
- mesh = DeviceMesh(self.device_type, torch.arange(self.world_size))
-
- @torch._dynamo.disable(recursive=False)
- def fn(x):
- print("foo")
- return DTensor(
- x,
- mesh,
- (Replicate(),),
- shape=torch.Size([32]),
- dtype=x.dtype,
- requires_grad=x.requires_grad,
- stride=(1,),
- )
-
- x = torch.randn(32, requires_grad=True)
- out = fn(x)
- out2 = torch.compile(fn, backend="eager")(x)
- self.assertEqual(out, out2)
-
def test_dtensor_noncontiguous_output(self):
mesh = DeviceMesh(self.device_type, torch.arange(self.world_size))
diff --git a/torch/distributed/_tensor/api.py b/torch/distributed/_tensor/api.py
index 9ef008156c..00079ef946 100644
--- a/torch/distributed/_tensor/api.py
+++ b/torch/distributed/_tensor/api.py
@@ -198,7 +198,6 @@ class DTensor(torch.Tensor): # pyre-ignore[13]: pyre is bad at __new__
_op_dispatcher: op_dispatch.OpDispatcher = op_dispatch.OpDispatcher()
@staticmethod
- @torch._dynamo.disable
def __new__(
cls,
local_tensor: torch.Tensor,
@@ -289,7 +288,6 @@ class DTensor(torch.Tensor): # pyre-ignore[13]: pyre is bad at __new__
)
@classmethod
- @torch._dynamo.disable
# pyre-fixme[3]: Return type must be annotated.
# pyre-fixme[2]: Parameter must be annotated.
def __torch_dispatch__(cls, func, types, args=(), kwargs=None):
|
2.41.0
|
d7ac7baa0685557906c730202a96d554b7fbae5
|
Thu, 11 Apr 2024 17:35:39 -0700
|
[PATCH 0219/1000] [dtensor][3/N] add torchrec row-wise uneven sharding example (#121392)
|
**Test** `torchrun --standalone --nnodes=1 --nproc-per-node=4 torch/distributed/_tensor/examples/torchrec_sharding_example.py -e row-wise-uneven` Pull Request resolved: https://github.com/pytorch/pytorch/pull/121392 Approved by: https://github.com/wanchaol ghstack dependencies: #120265
|
diff --git a/torch/distributed/_tensor/examples/torchrec_sharding_example.py b/torch/distributed/_tensor/examples/torchrec_sharding_example.py
index 7e2b249b85..e64a9f215d 100644
--- a/torch/distributed/_tensor/examples/torchrec_sharding_example.py
+++ b/torch/distributed/_tensor/examples/torchrec_sharding_example.py
@@ -4,6 +4,8 @@ sharding with the DTensor API.
"""
import argparse
import os
+from functools import cached_property
+from typing import List
import torch
@@ -15,6 +17,12 @@ from torch.distributed._tensor import (
Shard,
)
from torch.distributed._tensor.debug.visualize_sharding import visualize_sharding
+from torch.distributed._tensor.placement_types import Placement
+from torch.distributed.checkpoint.metadata import (
+ ChunkStorageMetadata,
+ TensorProperties,
+ TensorStorageMetadata,
+)
def get_device_type():
@@ -25,9 +33,87 @@ def get_device_type():
)
-def run_torchrec_row_wise_sharding_example(rank, world_size):
- # row-wise example:
- # one table is sharded by rows within the global ProcessGroup
+aten = torch.ops.aten
+supported_ops = [aten.view.default, aten._to_copy.default]
+
+
+# this torch.Tensor subclass is a wrapper around all local shards associated
+# with a single sharded embedding table.
+class LocalShardsWrapper(torch.Tensor):
+ local_shards: List[torch.Tensor]
+ storage_meta: TensorStorageMetadata
+
+ @staticmethod
+ def __new__(
+ cls, local_shards: List[torch.Tensor], offsets: List[torch.Size]
+ ) -> "LocalShardsWrapper":
+ assert len(local_shards) > 0
+ assert len(local_shards) == len(offsets)
+ assert local_shards[0].ndim == 2
+ # we calculate the total tensor size by "concat" on second tensor dimension
+ cat_tensor_shape = list(local_shards[0].shape)
+ if len(local_shards) > 1: # column-wise sharding
+ for shard_size in [s.shape for s in local_shards[1:]]:
+ cat_tensor_shape[1] += shard_size[1]
+
+ # according to DCP, each chunk is expected to have the same properties of the
+ # TensorStorageMetadata that includes it. Vice versa, the wrapper's properties
+ # should also be the same with that of its first chunk.
+ wrapper_properties = TensorProperties.create_from_tensor(local_shards[0])
+ wrapper_shape = torch.Size(cat_tensor_shape)
+ chunks_meta = [
+ ChunkStorageMetadata(o, s.shape) for s, o in zip(local_shards, offsets)
+ ]
+
+ r = torch.Tensor._make_wrapper_subclass( # type: ignore[attr-defined]
+ cls,
+ wrapper_shape,
+ )
+ r.shards = local_shards
+ r.storage_meta = TensorStorageMetadata(
+ properties=wrapper_properties,
+ size=wrapper_shape,
+ chunks=chunks_meta,
+ )
+
+ return r
+
+ # necessary for ops dispatching from this subclass to its local shards
+ @classmethod
+ def __torch_dispatch__(cls, func, types, args=(), kwargs=None):
+ kwargs = kwargs or {}
+
+ # TODO: we shall continually extend this function to support more ops if needed
+ if func in supported_ops:
+ res_shards_list = [
+ func(shard, *args[1:], **kwargs) for shard in args[0].shards
+ ]
+ return LocalShardsWrapper(res_shards_list, args[0].shard_offsets)
+ else:
+ raise NotImplementedError(
+ f"{func} is not supported for LocalShardsWrapper!"
+ )
+
+ @property
+ def shards(self) -> List[torch.Tensor]:
+ return self.local_shards
+
+ @shards.setter
+ def shards(self, local_shards: List[torch.Tensor]):
+ self.local_shards = local_shards
+
+ @cached_property
+ def shard_sizes(self) -> List[torch.Size]:
+ return [chunk.sizes for chunk in self.storage_meta.chunks]
+
+ @cached_property
+ def shard_offsets(self) -> List[torch.Size]:
+ return [chunk.offsets for chunk in self.storage_meta.chunks]
+
+
+def run_torchrec_row_wise_even_sharding_example(rank, world_size):
+ # row-wise even sharding example:
+ # One table is evenly sharded by rows within the global ProcessGroup.
# In our example, the table's num_embedding is 8, and the embedding dim is 16
# The global ProcessGroup has 4 ranks, so each rank will have one 2 by 16 local
# shard.
@@ -45,13 +131,16 @@ def run_torchrec_row_wise_sharding_example(rank, world_size):
local_shard_shape = torch.Size(
[num_embeddings // world_size, embedding_dim] # (local_rows, local_cols)
)
- # in our case, the embedding table will be sharded row-wisely into 4 local shards
- # and each rank will have 1 of them.
- local_shards = [
- torch.randn(local_shard_shape, device=device) for _ in range(world_size)
- ]
+ # tensor offset
+ local_shard_offset = torch.Size((rank * 2, embedding_dim))
+ # tensor
+ local_tensor = torch.randn(local_shard_shape, device=device)
# row-wise sharding: one shard per rank
- local_shard = local_shards[rank]
+ # create the local shards wrapper
+ local_shards_wrapper = LocalShardsWrapper(
+ local_shards=[local_tensor],
+ offsets=[local_shard_offset],
+ )
###########################################################################
# example 1: transform local_shards into DTensor
@@ -66,22 +155,15 @@ def run_torchrec_row_wise_sharding_example(rank, world_size):
# this is the sharding placement we use in DTensor to represent row-wise sharding
# row_wise_sharding_placements means that the global tensor is sharded by first dim
# over the 1-d mesh.
- row_wise_sharding_placements = [Shard(0)]
+ row_wise_sharding_placements: List[Placement] = [Shard(0)]
+
# create a DTensor from the local shard
dtensor = DTensor.from_local(
- local_shard, device_mesh, row_wise_sharding_placements, run_check=False
+ local_shards_wrapper, device_mesh, row_wise_sharding_placements, run_check=False
)
# display the DTensor's sharding
- visualize_sharding(dtensor, header="Row-wise sharding example in DTensor")
-
- # get the global tensor from the DTensor
- dtensor_full = dtensor.full_tensor() # torch.Tensor
- # manually compose the global tensor from the local shards
- global_tensor = torch.cat(local_shards, dim=0)
- # we demonstrate that the DTensor constructed has the same
- # global view as the actual global tensor
- assert torch.equal(dtensor_full, global_tensor)
+ visualize_sharding(dtensor, header="Row-wise even sharding example in DTensor")
###########################################################################
# example 2: transform DTensor into local_shards
@@ -90,34 +172,87 @@ def run_torchrec_row_wise_sharding_example(rank, world_size):
# _pre_load_state_dict_hook, if the source param is a ShardedTensor
# then we need to transform it into its local_shards.
- # transform DTensor into local_shards
- local_shard = dtensor.to_local()
-
- # another case is that the source param is a torch.Tensor. In this case,
- # the source param is the global tensor rather than shards so we need to
- # splice the global tensor into local shards. This will be identical to
- # existing code in TorchRec.
- local_shard_shape_list = list(local_shard_shape)
- local_shard_spliced = global_tensor[
- local_shard_shape_list[0] * rank : local_shard_shape_list[0] * (rank + 1),
- :,
- ]
- # the local shard obtained from both approaches should be identical
- assert torch.equal(local_shard, local_shard_spliced)
+ # transform DTensor into LocalShardsWrapper
+ dtensor_local_shards = dtensor.to_local()
+ assert isinstance(dtensor_local_shards, LocalShardsWrapper)
+ shard_tensor = dtensor_local_shards.shards[0]
+ assert torch.equal(shard_tensor, local_tensor)
+ assert dtensor_local_shards.shard_sizes[0] == local_shard_shape # unwrap shape
+ assert dtensor_local_shards.shard_offsets[0] == local_shard_offset # unwrap offset
+
+
+def run_torchrec_row_wise_uneven_sharding_example(rank, world_size):
+ # row-wise uneven sharding example:
+ # One table is unevenly sharded by rows within the global ProcessGroup.
+ # In our example, the table's num_embedding is 8, and the embedding dim is 16
+ # The global ProcessGroup has 4 ranks, and each rank will have the local shard
+ # of shape:
+ # rank 0: [1, 16]
+ # rank 1: [3, 16]
+ # rank 2: [1, 16]
+ # rank 3: [3, 16]
+
+ # device mesh is a representation of the worker ranks
+ # create a 1-D device mesh that includes every rank
+ device_type = get_device_type()
+ device = torch.device(device_type)
+ device_mesh = init_device_mesh(device_type=device_type, mesh_shape=(world_size,))
+
+ # manually create the embedding table's local shards
+ num_embeddings = 8
+ embedding_dim = 16
+ emb_table_shape = torch.Size([num_embeddings, embedding_dim])
+ # tensor shape
+ local_shard_shape = (
+ torch.Size([1, embedding_dim])
+ if rank % 2 == 0
+ else torch.Size([3, embedding_dim])
+ )
+ # tensor offset
+ local_shard_offset = (rank // 2 * 4 + rank % 2 * 1, embedding_dim)
+ # tensor
+ local_tensor = torch.randn(local_shard_shape, device=device)
+ # local shards
+ # row-wise sharding: one shard per rank
+ local_shards = [TensorShard(local_tensor, local_shard_shape, local_shard_offset)]
###########################################################################
- # example 3: load state dict
- # usage in TorchRec:
- # In case where the source param and the destination param are both
- # DTensors, we can directly call DTensor.copy_() to load the state.
- src_dtensor = torch.distributed._tensor.ones(
- emb_table_shape,
- device_mesh=device_mesh,
- placements=row_wise_sharding_placements,
+ # example 1: transform local_shards into DTensor
+ # create the DTensorMetadata which torchrec should provide
+ row_wise_sharding_placements: List[Placement] = [Shard(0)]
+ dtensor_metadata = DTensorMetadata(device_mesh, row_wise_sharding_placements)
+
+ # create the local shards wrapper
+ local_shards_wrapper = LocalShardsWrapper(local_shards)
+
+ # note: for uneven sharding, we need to specify the shape and stride because
+ # DTensor would assume even sharding and compute shape/stride based on the
+ # assumption. Torchrec needs to pass in this information explicitely.
+ # shape/stride are global tensor's shape and stride
+ dtensor = DTensor.from_local(
+ local_shards_wrapper, # a torch.Tensor subclass
+ dtensor_metadata.device_mesh, # DeviceMesh
+ dtensor_metadata.placements, # List[Placement]
+ run_check=False,
+ shape=emb_table_shape, # this is required for uneven sharding
+ stride=(embedding_dim, 1),
)
- dtensor.copy_(src_dtensor)
- # these two DTensors should have the same global view after loading
- assert torch.equal(dtensor.full_tensor(), src_dtensor.full_tensor())
+ # so far visualize_sharding() cannot print correctly for unevenly sharded DTensor
+ # because it relies on offset computation which assumes even sharding.
+ visualize_sharding(dtensor, header="Row-wise uneven sharding example in DTensor")
+ # check the dtensor has the correct shape and stride on all ranks
+ assert dtensor.shape == emb_table_shape
+ assert dtensor.stride() == (embedding_dim, 1)
+
+ ###########################################################################
+ # example 2: transform DTensor into local_shards
+ # note: DTensor.to_local() always returns a LocalShardsWrapper
+ dtensor_local_shards = dtensor.to_local()
+ assert isinstance(dtensor_local_shards, LocalShardsWrapper)
+ dtensor_shard = dtensor_local_shards[0]
+ assert torch.equal(dtensor_shard.tensor, local_tensor) # unwrap tensor
+ assert dtensor_shard.shard_size == local_shard_shape # unwrap shape
+ assert dtensor_shard.shard_offset == local_shard_offset # unwrap offset
def run_torchrec_table_wise_sharding_example(rank, world_size):
@@ -195,7 +330,8 @@ def run_torchrec_table_wise_sharding_example(rank, world_size):
def run_example(rank, world_size, example_name):
# the dict that stores example code
name_to_example_code = {
- "row-wise": run_torchrec_row_wise_sharding_example,
+ "row-wise-even": run_torchrec_row_wise_even_sharding_example,
+ "row-wise-uneven": run_torchrec_row_wise_uneven_sharding_example,
"table-wise": run_torchrec_table_wise_sharding_example,
}
if example_name not in name_to_example_code:
@@ -224,9 +360,10 @@ if __name__ == "__main__":
)
example_prompt = (
"choose one sharding example from below:\n"
- "\t1. row-wise;\n"
- "\t2. table-wise\n"
- "e.g. you want to try the row-wise sharding example, please input 'row-wise'\n"
+ "\t1. row-wise-even;\n"
+ "\t2. row-wise-uneven\n"
+ "\t3. table-wise\n"
+ "e.g. you want to try the row-wise even sharding example, please input 'row-wise-even'\n"
)
parser.add_argument("-e", "--example", help=example_prompt, required=True)
args = parser.parse_args()
|
2.41.0
|
419fcd19f30351b2fecfd476445a92be291d808
|
Thu, 11 Apr 2024 17:35:39 -0700
|
[PATCH 0220/1000] [dtensor][4/N] have row-wise sharding always use LocalShardsWrapper (#122843)
|
**Summary** Always wrap local tensor into a `LocalShardsWrapper`. This is for uniformity and it leads to easiness on adoption of DTensor as a wrapper for local shard(s) representation. To support more tensor ops over `LocalShardsWrapper`, users need to extend its `__torch_dispatch__`. **Test** `torchrun --standalone --nnodes=1 --nproc-per-node=4 torch/distributed/_tensor/examples/torchrec_sharding_example.py -e row-wise-even` **Result** ``` Row-wise even sharding example in DTensor Col 0-15 ------- ---------- Row 0-1 cuda:0 Row 2-3 cuda:1 Row 4-5 cuda:2 Row 6-7 cuda:3 ``` Pull Request resolved: https://github.com/pytorch/pytorch/pull/122843 Approved by: https://github.com/wz337 ghstack dependencies: #120265, #121392
|
diff --git a/torch/distributed/_tensor/examples/torchrec_sharding_example.py b/torch/distributed/_tensor/examples/torchrec_sharding_example.py
index e64a9f215d..523e874bbe 100644
--- a/torch/distributed/_tensor/examples/torchrec_sharding_example.py
+++ b/torch/distributed/_tensor/examples/torchrec_sharding_example.py
@@ -128,6 +128,7 @@ def run_torchrec_row_wise_even_sharding_example(rank, world_size):
num_embeddings = 8
embedding_dim = 16
emb_table_shape = torch.Size([num_embeddings, embedding_dim])
+ # tensor shape
local_shard_shape = torch.Size(
[num_embeddings // world_size, embedding_dim] # (local_rows, local_cols)
)
@@ -209,21 +210,21 @@ def run_torchrec_row_wise_uneven_sharding_example(rank, world_size):
else torch.Size([3, embedding_dim])
)
# tensor offset
- local_shard_offset = (rank // 2 * 4 + rank % 2 * 1, embedding_dim)
+ local_shard_offset = torch.Size((rank // 2 * 4 + rank % 2 * 1, embedding_dim))
# tensor
local_tensor = torch.randn(local_shard_shape, device=device)
# local shards
# row-wise sharding: one shard per rank
- local_shards = [TensorShard(local_tensor, local_shard_shape, local_shard_offset)]
+ # create the local shards wrapper
+ local_shards_wrapper = LocalShardsWrapper(
+ local_shards=[local_tensor],
+ offsets=[local_shard_offset],
+ )
###########################################################################
# example 1: transform local_shards into DTensor
# create the DTensorMetadata which torchrec should provide
row_wise_sharding_placements: List[Placement] = [Shard(0)]
- dtensor_metadata = DTensorMetadata(device_mesh, row_wise_sharding_placements)
-
- # create the local shards wrapper
- local_shards_wrapper = LocalShardsWrapper(local_shards)
# note: for uneven sharding, we need to specify the shape and stride because
# DTensor would assume even sharding and compute shape/stride based on the
@@ -231,8 +232,8 @@ def run_torchrec_row_wise_uneven_sharding_example(rank, world_size):
# shape/stride are global tensor's shape and stride
dtensor = DTensor.from_local(
local_shards_wrapper, # a torch.Tensor subclass
- dtensor_metadata.device_mesh, # DeviceMesh
- dtensor_metadata.placements, # List[Placement]
+ device_mesh, # DeviceMesh
+ row_wise_sharding_placements, # List[Placement]
run_check=False,
shape=emb_table_shape, # this is required for uneven sharding
stride=(embedding_dim, 1),
@@ -249,10 +250,10 @@ def run_torchrec_row_wise_uneven_sharding_example(rank, world_size):
# note: DTensor.to_local() always returns a LocalShardsWrapper
dtensor_local_shards = dtensor.to_local()
assert isinstance(dtensor_local_shards, LocalShardsWrapper)
- dtensor_shard = dtensor_local_shards[0]
- assert torch.equal(dtensor_shard.tensor, local_tensor) # unwrap tensor
- assert dtensor_shard.shard_size == local_shard_shape # unwrap shape
- assert dtensor_shard.shard_offset == local_shard_offset # unwrap offset
+ shard_tensor = dtensor_local_shards.shards[0]
+ assert torch.equal(shard_tensor, local_tensor)
+ assert dtensor_local_shards.shard_sizes[0] == local_shard_shape # unwrap shape
+ assert dtensor_local_shards.shard_offsets[0] == local_shard_offset # unwrap offset
def run_torchrec_table_wise_sharding_example(rank, world_size):
|
2.41.0
|
e90e93a78e07618279b3521d265ad157b3fb742
|
Tue, 16 Apr 2024 11:16:47 -0700
|
[PATCH 0222/1000] [inductor] disable comprehensive padding in fbcode (#124191)
|
Comprehension padding cause small NE change and fail an internal test. Disable it for internal use case to mitigate. Differential Revision: [D56197430](https://our.internmc.facebook.com/intern/diff/D56197430) Pull Request resolved: https://github.com/pytorch/pytorch/pull/124191 Approved by: https://github.com/jansel
|
diff --git a/torch/_inductor/config.py b/torch/_inductor/config.py
index 61af8b070c..d7141639d0 100644
--- a/torch/_inductor/config.py
+++ b/torch/_inductor/config.py
@@ -421,7 +421,8 @@ shape_padding = os.environ.get("TORCHINDUCTOR_SHAPE_PADDING", "1") == "1"
# Control if we will do padding for pointwise/reductions
comprehensive_padding = (
- os.environ.get("TORCHINDUCTOR_COMPREHENSIVE_PADDING", "1") == "1"
+ os.environ.get("TORCHINDUCTOR_COMPREHENSIVE_PADDING", "0" if is_fbcode() else "1")
+ == "1"
)
pad_channels_last = False
|
2.41.0
|
eea300680113f540e9f5c825e346bcc9510168c
|
Tue, 16 Apr 2024 08:10:52 -0700
|
[PATCH 0223/1000] [quant] Do not decompose choose_qparams_per_token_asymmetric (#124178)
|
Summary: https://github.com/pytorch/pytorch/pull/123452 added backward support to this op by turning it into CompositeImplicitAutograd, which meant it gets decomposed during export/compile. However, this is not desirable behavior for the PTQ case when we try to lower the model. This commit enables QAT without breaking PTQ by refactoring the impl into a separate op that does have backward support. Test Plan: python test/test_quantization.py -k test_decomposed_choose_qparams_per_token_asymmetric_backward Reviewers: jerryzh168, digantdesai, zou3519 Subscribers: jerryzh168, digantdesai, zou3519, supriyar Differential Revision: [D56192116](https://our.internmc.facebook.com/intern/diff/D56192116) Pull Request resolved: https://github.com/pytorch/pytorch/pull/124178 Approved by: https://github.com/digantdesai
|
diff --git a/test/quantization/core/test_quantized_tensor.py b/test/quantization/core/test_quantized_tensor.py
index 228f1f8ee7..a3488b06bd 100644
--- a/test/quantization/core/test_quantized_tensor.py
+++ b/test/quantization/core/test_quantized_tensor.py
@@ -1606,7 +1606,7 @@ class TestQuantizedTensor(TestCase):
# register the ops
import torch.ao.quantization.fx._decomposed
x = torch.randn(2, 3).requires_grad_()
- (s, zp) = torch.ops.quantized_decomposed.choose_qparams_per_token_asymmetric(x, torch.int8)
+ (s, zp) = torch.ops.quantized_decomposed._choose_qparams_per_token_asymmetric_impl(x, torch.int8)
out = x.div(s).add(zp).round()
out.sum().backward()
diff --git a/torch/ao/quantization/fx/_decomposed.py b/torch/ao/quantization/fx/_decomposed.py
index 2ffe8f21ce..0326cd3186 100644
--- a/torch/ao/quantization/fx/_decomposed.py
+++ b/torch/ao/quantization/fx/_decomposed.py
@@ -639,16 +639,16 @@ def choose_qparams_per_token_meta(
quantized_decomposed_lib.define(
- "choose_qparams_per_token_asymmetric(Tensor input, ScalarType dtype) -> (Tensor, Tensor)"
+ "_choose_qparams_per_token_asymmetric_impl(Tensor input, ScalarType dtype) -> (Tensor, Tensor)"
)
@impl(
quantized_decomposed_lib,
- "choose_qparams_per_token_asymmetric",
+ "_choose_qparams_per_token_asymmetric_impl",
"CompositeImplicitAutograd",
)
-def choose_qparams_per_token_asymmetric(
+def _choose_qparams_per_token_asymmetric_impl(
input: torch.Tensor,
dtype: torch.dtype,
) -> Tuple[torch.Tensor, torch.Tensor]:
@@ -691,6 +691,38 @@ def choose_qparams_per_token_asymmetric(
return scale.to(torch.float32), zero_point.to(torch.float32)
+quantized_decomposed_lib.define(
+ "choose_qparams_per_token_asymmetric(Tensor input, ScalarType dtype) -> (Tensor, Tensor)"
+)
+
+
+@impl(
+ quantized_decomposed_lib,
+ "choose_qparams_per_token_asymmetric",
+ "CompositeExplicitAutograd",
+)
+def choose_qparams_per_token_asymmetric(
+ input: torch.Tensor,
+ dtype: torch.dtype,
+) -> Tuple[torch.Tensor, torch.Tensor]:
+ return _choose_qparams_per_token_asymmetric_impl(input, dtype)
+
+
+@impl(
+ quantized_decomposed_lib,
+ "choose_qparams_per_token_asymmetric",
+ "Meta",
+)
+def choose_qparams_per_token_asymmetric_meta(
+ input: torch.Tensor,
+ dtype: torch.dtype,
+) -> Tuple[torch.Tensor, torch.Tensor]:
+ size = (1, input.size(-1))
+ return torch.empty(size, dtype=torch.double, device=input.device), torch.empty(
+ size, dtype=torch.int64, device=input.device
+ )
+
+
def _per_token_quant_qparam_dim_check(input, scales, zero_points):
num_tokens = math.prod(list(input.size())[:-1])
assert (
|
2.41.0
|
ebf65126c71cdb14ac27b9fb483629500d4c5cf
|
Tue, 16 Apr 2024 12:41:36 -0700
|
[PATCH 0225/1000] FakeTensorProp assert consistency of sizes when metadata previously existed (#124059)
|
Signed-off-by: Edward Z. Yang <ezyang@meta.com> Pull Request resolved: https://github.com/pytorch/pytorch/pull/124059 Approved by: https://github.com/bdhirsh, https://github.com/thiagocrepaldi ghstack dependencies: #124105
|
diff --git a/docs/source/fx.experimental.rst b/docs/source/fx.experimental.rst
index db08f69291..93974c0819 100644
--- a/docs/source/fx.experimental.rst
+++ b/docs/source/fx.experimental.rst
@@ -42,3 +42,4 @@ torch.fx.experimental.symbolic_shapes
canonicalize_bool_expr
statically_known_true
lru_cache
+ check_consistent
diff --git a/test/test_fake_tensor.py b/test/test_fake_tensor.py
index e31e22349f..bc820153d8 100644
--- a/test/test_fake_tensor.py
+++ b/test/test_fake_tensor.py
@@ -19,7 +19,9 @@ from torch._subclasses.fake_tensor import (
UnsupportedOperatorException,
unset_fake_temporarily,
)
-from torch.fx.experimental.symbolic_shapes import ShapeEnv, DimDynamic, free_symbols, StatelessSymbolicContext, ShapeEnvSettings
+from torch.fx.experimental.symbolic_shapes import (
+ ShapeEnv, DimDynamic, free_symbols, StatelessSymbolicContext, ShapeEnvSettings, statically_known_true
+)
from torch.testing._internal.custom_op_db import custom_op_db
from torch.testing._internal.common_device_type import ops
from torch.testing._internal.common_device_type import instantiate_device_type_tests, OpDTypes
@@ -44,6 +46,7 @@ from torch import distributed as dist
from torch.utils._mode_utils import no_dispatch
from torch.utils._python_dispatch import TorchDispatchMode
import torch.utils._pytree as pytree
+from torch.fx.experimental.proxy_tensor import make_fx
aten = torch.ops.aten
@@ -1300,6 +1303,32 @@ class FakeTensorPropTest(TestCase):
FakeTensorProp(graph_model, fake_mode).propagate(value, None, another_optional_value)
+ def test_unbacked_shape_realloc(self):
+ def f(x):
+ return x.nonzero()
+ shape_env = ShapeEnv()
+ fake_mode = FakeTensorMode(shape_env=shape_env)
+ with fake_mode:
+ value = torch.randn(5)
+ gm = make_fx(f)(value)
+ nonzero_nodes = [n for n in gm.graph.nodes if n.target is torch.ops.aten.nonzero.default]
+ self.assertEqual(len(nonzero_nodes), 1)
+ self.assertIsInstance(nonzero_nodes[0].meta['val'].shape[0], torch.SymInt)
+ u0 = nonzero_nodes[0].meta['val'].shape[0]
+ FakeTensorProp(gm, fake_mode).propagate(value)
+ u1 = nonzero_nodes[0].meta['val'].shape[0]
+ # Test that this test is actually doing something in that the
+ # FakeTensorProp actually triggered a reallocation. If this assert is
+ # failing, it could be because we started memoizing the nnz count for
+ # nonzero, which is nice in some sense (no reallocation) but not
+ # helpful for this test, which is checking what we do when we have
+ # to reallocate. If so, you need to make this example more
+ # complicated (e.g., maybe have a nontrivial computation on the input
+ # before feeding it into nonzero, or have some sort of randomness)
+ self.assertIsNot(u0, u1)
+ self.assertTrue(statically_known_true(u0 == u1))
+
+
def test_torch_load_with_fake_mode(self):
class TheModelClass(torch.nn.Module):
diff --git a/torch/_functorch/_aot_autograd/dispatch_and_compile_graph.py b/torch/_functorch/_aot_autograd/dispatch_and_compile_graph.py
index 653a882680..88a7bad396 100644
--- a/torch/_functorch/_aot_autograd/dispatch_and_compile_graph.py
+++ b/torch/_functorch/_aot_autograd/dispatch_and_compile_graph.py
@@ -7,6 +7,7 @@ import dataclasses
from typing import Any, Callable, List, Optional, Tuple, Union
import torch
+import torch.utils._pytree as pytree
import torch.utils.dlpack
from torch import Tensor
from torch._dispatch.python import enable_python_dispatcher
@@ -127,6 +128,9 @@ def aot_dispatch_base_graph(
_map_assigned_buffer_to_proxy
)
+ saved_updated_flat_args_subclasses_desugared = pytree.tree_map_only(
+ torch.Tensor, lambda t: t.detach(), updated_flat_args_subclasses_desugared
+ )
fw_module = _create_graph(
fn_to_trace,
updated_flat_args_subclasses_desugared,
@@ -171,9 +175,9 @@ def aot_dispatch_base_graph(
num_tokens = len(fw_metadata.tokens)
if num_tokens != 0 and config.unlift_effect_tokens:
unlift_tokens(fw_module, fw_metadata)
- updated_flat_args_subclasses_desugared = updated_flat_args_subclasses_desugared[
- num_tokens:
- ]
+ saved_updated_flat_args_subclasses_desugared = (
+ saved_updated_flat_args_subclasses_desugared[num_tokens:]
+ )
assert copy_count == copy_count2
@@ -192,7 +196,7 @@ def aot_dispatch_base_graph(
maybe_subclass_meta is None
), "aot_export_module does not support tensor subclass inputs for now."
return fw_module
- return fw_module, list(updated_flat_args_subclasses_desugared), maybe_subclass_meta
+ return fw_module, saved_updated_flat_args_subclasses_desugared, maybe_subclass_meta
# Has the precondition that there
@@ -235,6 +239,17 @@ def aot_dispatch_autograd_graph(
joint_fn_to_trace = subclass_tracing_info.plain_tensor_trace_fn
updated_joint_inputs = subclass_tracing_info.plain_tensor_args
+ # When we call _create_graph, this may mutate the metadata of joint
+ # inputs. But callers are expecting to get the original joint inputs. So
+ # we make aliases of all the inputs to make sure we have a copy that
+ # doesn't get modified.
+ #
+ # This destroys requires_grad/grad_fn information. However, backends
+ # beneath AOTAutograd are indifferent to this information, so it doesn't
+ # matter.
+ saved_updated_joint_inputs = pytree.tree_map_only(
+ torch.Tensor, lambda t: t.detach(), updated_joint_inputs
+ )
maybe_subclass_meta = subclass_tracing_info.maybe_subclass_meta
aot_graphs_log.debug(
"aot_config id: %s, fw_metadata=%s,subclass_metadata=%s",
@@ -262,4 +277,4 @@ def aot_dispatch_autograd_graph(
maybe_subclass_meta is None
), "aot_export_module does not support tensor subclass inputs for now."
return fx_g
- return fx_g, updated_joint_inputs, maybe_subclass_meta
+ return fx_g, saved_updated_joint_inputs, maybe_subclass_meta # type: ignore[return-value]
diff --git a/torch/fx/experimental/symbolic_shapes.py b/torch/fx/experimental/symbolic_shapes.py
index b634f6e313..345e8626ab 100644
--- a/torch/fx/experimental/symbolic_shapes.py
+++ b/torch/fx/experimental/symbolic_shapes.py
@@ -94,7 +94,7 @@ __all__ = [
"is_concrete_bool", "is_nested_int", "SHAPEENV_EVENT_KEY", "CURRENT_NODE_KEY",
"has_free_symbols", "sym_eq", "SymbolicContext", "StatelessSymbolicContext",
"StatefulSymbolicContext", "SubclassSymbolicContext", "statically_known_true",
- "guard_size_oblivious",
+ "guard_size_oblivious", "check_consistent",
]
# FX node metadata keys for symbolic shape FX graph.
@@ -239,6 +239,29 @@ def guard_size_oblivious(expr: Union[torch.SymBool, bool]) -> bool:
assert isinstance(expr, bool)
return expr
+def check_consistent(new, old) -> None:
+ """
+ Test that two "meta" values (typically either Tensor or SymInt) have
+ the same values, e.g., after retracing. If we don't understand the
+ quantities in question, we'll just skip the consistency check.
+ """
+ # TODO: do boolean equality test too, see
+ # https://github.com/pytorch/pytorch/issues/124110
+ scalar_types = (torch.SymInt, torch.SymFloat, int, float)
+
+ if isinstance(new, torch.Tensor):
+ assert isinstance(old, torch.Tensor)
+ torch._check(old.dim() == new.dim(), lambda: f"{old.shape} != {new.shape} (old != new)")
+ # Do this manually so that each individual test is irrefutable
+ # (TODO: should be a helper for this, maybe sym_eq? That
+ # gives us a compound expression and I'm not sure it
+ # simplifies right now)
+ for i, j in zip(old.shape, new.shape):
+ torch._check(i == j, lambda: f"{old.shape} != {new.shape} (old != new)")
+ elif isinstance(new, scalar_types):
+ assert isinstance(old, scalar_types)
+ torch._check(old == new, lambda: f"{old} != {new} (old != new)")
+
def canonicalize_bool_expr(expr: SympyBoolean) -> SympyBoolean:
r""" Canonicalize a boolean expression by transforming it into a lt / le
inequality and moving all the non-constant terms to the rhs.
@@ -4108,7 +4131,7 @@ class ShapeEnv:
"version": 2,
},
)
- log.warning("Ignored guard %s == %s, this could result in accuracy problems", expr, concrete_val)
+ log.warning("Ignored guard %s == %s, this could result in accuracy problems", expr, concrete_val, stack_info=True)
def _get_stack_summary(self, is_debug: bool = False):
diff --git a/torch/fx/passes/fake_tensor_prop.py b/torch/fx/passes/fake_tensor_prop.py
index a31953ca6e..e24f53d10d 100644
--- a/torch/fx/passes/fake_tensor_prop.py
+++ b/torch/fx/passes/fake_tensor_prop.py
@@ -4,8 +4,8 @@ import torch.fx
from torch.fx import Node
from torch.fx._compatibility import compatibility
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
-from torch.fx.experimental.proxy_tensor import py_sym_types, snapshot_fake
-from torch.fx.node import map_aggregate
+from torch.fx.experimental.proxy_tensor import snapshot_fake, py_sym_types
+from torch.utils._pytree import tree_map
__all__ = ['FakeTensorProp']
@@ -23,42 +23,44 @@ class FakeTensorProp(torch.fx.Interpreter):
module (GraphModule): The module to be executed
mode (Optional[FakeTensorMode]): The dispatch mode used to execute computation indicated by each FX Node.
"""
- def __init__(self, module: torch.fx.GraphModule, mode: Optional[FakeTensorMode] = None):
+ def __init__(self, module: torch.fx.GraphModule, mode: Optional[FakeTensorMode] = None, *, check_consistency: bool = True):
super().__init__(module)
if mode is None:
mode = FakeTensorMode()
self._mode = mode
+ self.check_consistency = check_consistency
def run_node(self, n: Node):
- import sympy
- from torch.fx.experimental.symbolic_shapes import free_unbacked_symbols
result = super().run_node(n)
- sym = None
- if (
- 'val' in n.meta and
- isinstance(v := n.meta['val'], torch.SymInt) and
- isinstance(v.node.expr, sympy.Symbol) and free_unbacked_symbols(v)
- ):
- sym = v
- def extract_val(obj):
- if isinstance(obj, FakeTensor):
- return snapshot_fake(obj)
- elif isinstance(obj, torch.Tensor):
+ nil = object()
+
+ def check_consistent_and_snapshot(new, old=nil):
+ from torch.fx.experimental.symbolic_shapes import check_consistent
+
+ if old is not nil and self.check_consistency:
+ check_consistent(new, old)
+
+ if isinstance(new, FakeTensor):
+ return snapshot_fake(new)
+ elif isinstance(new, torch.Tensor):
# TODO: How is it possible that we get a non fake tensor? We
# should be running under the mode...
- return snapshot_fake(self._mode.from_tensor(obj, static_shapes=True))
- elif isinstance(obj, py_sym_types):
- return obj
+ return snapshot_fake(self._mode.from_tensor(new, static_shapes=True))
+ elif isinstance(new, py_sym_types):
+ return new
else:
return None
- meta = map_aggregate(result, extract_val)
+ meta_arg = []
+ if 'val' in n.meta and n.meta['val'] is not None:
+ meta_arg = [n.meta['val']]
+
+ meta = tree_map(check_consistent_and_snapshot, result, *meta_arg)
if meta is not None:
n.meta['val'] = meta
- if sym is not None:
- torch._check(meta == v)
+
return result
def propagate(self, *args):
diff --git a/torch/onnx/_internal/onnxruntime.py b/torch/onnx/_internal/onnxruntime.py
index 4be4d3fbff..723235c33d 100644
--- a/torch/onnx/_internal/onnxruntime.py
+++ b/torch/onnx/_internal/onnxruntime.py
@@ -887,9 +887,9 @@ class OrtBackend:
)
else:
try:
- prim_outputs = FakeTensorProp(graph_module).propagate(
- *args, **kwargs
- )
+ prim_outputs = FakeTensorProp(
+ graph_module, check_consistency=False
+ ).propagate(*args, **kwargs)
except Exception:
logger.warning("FakeTensorProb failed for %s", graph_module)
# When FakeTensorProp fails, it is not possible to preallocate output buffers
|
2.41.0
|
7cf6f81ea304644e1fdcad0661d28552dfc84a0
|
Tue, 16 Apr 2024 13:17:42 -0700
|
[PATCH 0226/1000] [sym_shapes][perf] Skip assert in check_is_size (#124209)
|
Differential Revision: [D56207943](https://our.internmc.facebook.com/intern/diff/D56207943) Pull Request resolved: https://github.com/pytorch/pytorch/pull/124209 Approved by: https://github.com/ezyang
|
diff --git a/torch/fx/experimental/symbolic_shapes.py b/torch/fx/experimental/symbolic_shapes.py
index 345e8626ab..aa6d9c1956 100644
--- a/torch/fx/experimental/symbolic_shapes.py
+++ b/torch/fx/experimental/symbolic_shapes.py
@@ -545,7 +545,8 @@ def _advise_is_size(a):
# This must always succeed, because the sole allowed caller _check_is_size
# was responsible for expect_true'ing this
- assert a >= 0
+ # This assert triggers expensive sym compute, do not do it until its cheap.
+ # assert a >= 0
# NB: it's important not to constrain range for size for *hinted* SymInts,
# because it is not only unsound, it will immediately trip our asserts
|
2.41.0
|
46b50c788bb1dabbee0e3602f662df6634aabc3
|
Wed, 17 Apr 2024 00:18:24 +0000
|
[PATCH 0227/1000] [ez][TD] Increase logging (#124082)
|
increase logging during td generate an artifact that says which tests got excluded fix minor bug where filter test configs couldnt get commit messages Pull Request resolved: https://github.com/pytorch/pytorch/pull/124082 Approved by: https://github.com/seemethere, https://github.com/malfet
|
diff --git a/.github/scripts/filter_test_configs.py b/.github/scripts/filter_test_configs.py
index dd9553e947..ebeccaeb16 100755
--- a/.github/scripts/filter_test_configs.py
+++ b/.github/scripts/filter_test_configs.py
@@ -449,7 +449,7 @@ def parse_reenabled_issues(s: Optional[str]) -> List[str]:
def get_reenabled_issues(pr_body: str = "") -> List[str]:
- default_branch = os.getenv("GIT_DEFAULT_BRANCH", "main")
+ default_branch = f"origin/{os.environ.get('GIT_DEFAULT_BRANCH', 'main')}"
try:
commit_messages = subprocess.check_output(
f"git cherry -v {default_branch}".split(" ")
diff --git a/test/run_test.py b/test/run_test.py
index 4d0b8b58e9..8952c78d6b 100755
--- a/test/run_test.py
+++ b/test/run_test.py
@@ -57,6 +57,7 @@ from tools.testing.discover_tests import (
TESTS,
)
from tools.testing.do_target_determination_for_s3 import import_results
+from tools.testing.target_determination.gen_artifact import gen_ci_artifact
from tools.testing.test_run import TestRun
from tools.testing.test_selections import (
@@ -1714,6 +1715,8 @@ def main():
test_batch = TestBatch("tests to run", include, False)
test_batch_exclude = TestBatch("excluded", exclude, True)
+ if IS_CI:
+ gen_ci_artifact([x.to_json() for x in include], [x.to_json() for x in exclude])
print_to_stderr(test_batch)
print_to_stderr(test_batch_exclude)
diff --git a/tools/testing/do_target_determination_for_s3.py b/tools/testing/do_target_determination_for_s3.py
index c7691b5679..4b004801fc 100644
--- a/tools/testing/do_target_determination_for_s3.py
+++ b/tools/testing/do_target_determination_for_s3.py
@@ -56,6 +56,9 @@ def main() -> None:
test_prioritizations = aggregated_heuristics.get_aggregated_priorities()
+ print("Aggregated Heuristics")
+ print(test_prioritizations.get_info_str(verbose=False))
+
if os.getenv("CI") == "true":
print("Emitting metrics")
# Split into 3 due to size constraints
diff --git a/tools/testing/target_determination/gen_artifact.py b/tools/testing/target_determination/gen_artifact.py
new file mode 100644
index 0000000000..fda4833f75
--- /dev/null
+++ b/tools/testing/target_determination/gen_artifact.py
@@ -0,0 +1,10 @@
+import json
+import pathlib
+from typing import Any, List
+
+REPO_ROOT = pathlib.Path(__file__).resolve().parent.parent.parent.parent
+
+
+def gen_ci_artifact(included: List[Any], excluded: List[Any]) -> None:
+ with open(REPO_ROOT / "test/test-reports/td_exclusions.json", "w") as f:
+ json.dump({"included": included, "excluded": excluded}, f)
diff --git a/tools/testing/target_determination/heuristics/utils.py b/tools/testing/target_determination/heuristics/utils.py
index a0823ab66e..0c158bb934 100644
--- a/tools/testing/target_determination/heuristics/utils.py
+++ b/tools/testing/target_determination/heuristics/utils.py
@@ -2,6 +2,7 @@ import json
import os
import subprocess
from collections import defaultdict
+from functools import lru_cache
from pathlib import Path
from typing import cast, Dict, List, Set, Union
from urllib.request import Request, urlopen
@@ -20,6 +21,7 @@ def python_test_file_to_test_name(tests: Set[str]) -> Set[str]:
return valid_tests
+@lru_cache(maxsize=None)
def query_changed_files() -> List[str]:
default_branch = f"origin/{os.environ.get('GIT_DEFAULT_BRANCH', 'main')}"
merge_base = (
@@ -40,15 +42,18 @@ def query_changed_files() -> List[str]:
capture_output=True,
check=False,
)
+ print(f"merge_base: {merge_base}, head: {head}")
if proc.returncode != 0:
raise RuntimeError("Unable to get changed files")
lines = proc.stdout.decode().strip().split("\n")
lines = [line.strip() for line in lines]
+ print(f"Changed files: {lines}")
return lines
+@lru_cache(maxsize=None)
def get_git_commit_info() -> str:
"""Gets the commit info since the last commit on the default branch."""
default_branch = f"origin/{os.environ.get('GIT_DEFAULT_BRANCH', 'main')}"
@@ -75,6 +80,7 @@ def get_git_commit_info() -> str:
)
+@lru_cache(maxsize=None)
def get_issue_or_pr_body(number: int) -> str:
"""Gets the body of an issue or PR"""
github_token = os.environ.get("GITHUB_TOKEN")
|
2.41.0
|
abd3f60fd6bd388c5bbbccd318ab4ad938cb781
|
Wed, 17 Apr 2024 00:23:42 +0000
|
[PATCH 0228/1000] [CI] Reduce CI_SERIAL_LIST list (#124085)
|
Add serial marker for individual tests so the test file can be removed from the ci serial list Run serial marked tests first in serial Run all other tests afterwards in parallel Slowly reduce list and mark individual tests as serial instead Hope # of serial tests is small so sharding evenness doesn't get too messed up Hopefully can do 3 procs for sm86 and cpu? serial no longer looks like a real word to me Pull Request resolved: https://github.com/pytorch/pytorch/pull/124085 Approved by: https://github.com/seemethere, https://github.com/malfet
|
diff --git a/pytest.ini b/pytest.ini
index b7e7608169..e2ab2ebd0c 100644
--- a/pytest.ini
+++ b/pytest.ini
@@ -19,3 +19,6 @@ filterwarnings =
ignore:Module already imported so cannot be rewritten.*hypothesis:pytest.PytestAssertRewriteWarning
xfail_strict = True
+
+markers =
+ serial: marks tests as needs to be run serially (deselect with '-m "not serial"')
diff --git a/test/inductor/test_torchinductor.py b/test/inductor/test_torchinductor.py
index 786428e690..1bda97a3fe 100644
--- a/test/inductor/test_torchinductor.py
+++ b/test/inductor/test_torchinductor.py
@@ -70,6 +70,7 @@ from torch.testing._internal.common_utils import (
IS_WINDOWS,
IS_X86,
parametrize,
+ serialTest,
skipIfRocm,
subtest,
TEST_WITH_ASAN,
@@ -9278,6 +9279,7 @@ class CommonTemplate:
@config.patch(
"triton.autotune_pointwise", True
) # needed to introduce config that exceed max shared memory usage
+ @serialTest()
def test_large_block_sizes(self):
"""
Inductor will try triton configs like x = 64 and y = 1024 which will
diff --git a/test/run_test.py b/test/run_test.py
index 8952c78d6b..cafa60bd1c 100755
--- a/test/run_test.py
+++ b/test/run_test.py
@@ -246,9 +246,6 @@ CI_SERIAL_LIST = [
"test_module_hooks", # OOM
"inductor/test_max_autotune",
"inductor/test_cutlass_backend", # slow due to many nvcc compilation steps
- "inductor/test_torchinductor", # OOM on test_large_block_sizes
- "inductor/test_torchinductor_dynamic_shapes", # OOM on test_large_block_sizes
- "inductor/test_torchinductor_codegen_dynamic_shapes", # OOM on test_large_block_sizes
"test_profiler", # test_source_multithreaded is probably not compatible with parallelism
]
# A subset of onnx tests that cannot run in parallel due to high memory usage.
@@ -1591,6 +1588,11 @@ def run_tests(
):
pool.terminate()
+ keep_going_message = (
+ "\n\nTip: You can keep running tests even on failure by passing --keep-going to run_test.py.\n"
+ "If running on CI, add the 'keep-going' label to your PR and rerun your jobs."
+ )
+
try:
for test in selected_tests_serial:
options_clone = copy.deepcopy(options)
@@ -1603,19 +1605,29 @@ def run_tests(
and not options.continue_through_error
and not RERUN_DISABLED_TESTS
):
- raise RuntimeError(
- failure.message
- + "\n\nTip: You can keep running tests even on failure by "
- "passing --keep-going to run_test.py.\n"
- "If running on CI, add the 'keep-going' label to "
- "your PR and rerun your jobs."
- )
+ raise RuntimeError(failure.message + keep_going_message)
+
+ # Run tests marked as serial first
+ for test in selected_tests_parallel:
+ options_clone = copy.deepcopy(options)
+ if can_run_in_pytest(test):
+ options_clone.pytest = True
+ options_clone.additional_unittest_args.extend(["-m", "serial"])
+ failure = run_test_module(test, test_directory, options_clone)
+ test_failed = handle_error_messages(failure)
+ if (
+ test_failed
+ and not options.continue_through_error
+ and not RERUN_DISABLED_TESTS
+ ):
+ raise RuntimeError(failure.message + keep_going_message)
os.environ["NUM_PARALLEL_PROCS"] = str(NUM_PROCS)
for test in selected_tests_parallel:
options_clone = copy.deepcopy(options)
if can_run_in_pytest(test):
options_clone.pytest = True
+ options_clone.additional_unittest_args.extend(["-m", "not serial"])
pool.apply_async(
run_test_module,
args=(test, test_directory, options_clone),
@@ -1718,6 +1730,7 @@ def main():
if IS_CI:
gen_ci_artifact([x.to_json() for x in include], [x.to_json() for x in exclude])
+ print_to_stderr(f"Running parallel tests on {NUM_PROCS} processes")
print_to_stderr(test_batch)
print_to_stderr(test_batch_exclude)
diff --git a/torch/testing/_internal/common_utils.py b/torch/testing/_internal/common_utils.py
index 776a9d2d3e..5527387080 100644
--- a/torch/testing/_internal/common_utils.py
+++ b/torch/testing/_internal/common_utils.py
@@ -97,6 +97,11 @@ from torch.utils._import_utils import _check_module_exists
import torch.utils._pytree as pytree
from .composite_compliance import no_dispatch
+try:
+ import pytest
+ has_pytest = True
+except ImportError:
+ has_pytest = False
# Class to keep track of test flags configurable by environment variables.
@@ -1384,6 +1389,15 @@ def skipIfTorchInductor(msg="test doesn't currently work with torchinductor",
return decorator
+def serialTest(condition=True):
+ """
+ Decorator for running tests serially. Requires pytest
+ """
+ def decorator(fn):
+ if has_pytest and condition:
+ return pytest.mark.serial(fn)
+ return fn
+ return decorator
def unMarkDynamoStrictTest(cls=None):
def decorator(cls):
|
2.41.0
|
f45ac8c986934610c0fc94808db9131dc0dfd7d
|
Tue, 16 Apr 2024 14:45:29 -0700
|
[PATCH 0229/1000] [FSDP2] Added explicit `unshard(async_op)` API (#120952)
|
This PR adds an `unshard(async_op: bool = False)` API to manually unshard the parameters via all-gather. This can be used for reordering the all-gather with other collectives (e.g. all-to-all). This currently requires the user to set `TORCH_NCCL_AVOID_RECORD_STREAMS=1` to avoid `recordStream` from `ProcessGroupNCCL` and get expected memory behaviors. Differential Revision: [D56148725](https://our.internmc.facebook.com/intern/diff/D56148725) Pull Request resolved: https://github.com/pytorch/pytorch/pull/120952 Approved by: https://github.com/wanchaol
|
diff --git a/test/distributed/_composable/fsdp/test_fully_shard_comm.py b/test/distributed/_composable/fsdp/test_fully_shard_comm.py
index 59b1048df8..f41f4f2009 100644
--- a/test/distributed/_composable/fsdp/test_fully_shard_comm.py
+++ b/test/distributed/_composable/fsdp/test_fully_shard_comm.py
@@ -8,9 +8,10 @@ from typing import Callable, List, Optional, Tuple, Union
import torch
import torch.distributed as dist
import torch.nn as nn
+import torch.nn.functional as F
-from torch.distributed._composable import checkpoint
-from torch.distributed._composable.fsdp import fully_shard, MixedPrecisionPolicy
+from torch.distributed._composable import checkpoint, replicate
+from torch.distributed._composable.fsdp import FSDP, fully_shard, MixedPrecisionPolicy
from torch.distributed._composable.fsdp._fsdp_collectives import (
foreach_all_gather,
foreach_all_gather_copy_out,
@@ -24,12 +25,15 @@ from torch.distributed._composable.fsdp._fsdp_init import (
from torch.distributed._composable.fsdp._fsdp_param import ShardedState
from torch.distributed._composable.fsdp._fsdp_param_group import FSDPParamGroup
from torch.distributed._tensor import DTensor
+from torch.distributed._tensor.experimental import implicit_replication
+from torch.distributed.device_mesh import DeviceMesh, init_device_mesh
from torch.testing._internal.common_cuda import TEST_CUDA
from torch.testing._internal.common_distributed import skip_if_lt_x_gpu
from torch.testing._internal.common_fsdp import (
DoubleLinear,
FSDPTest,
FSDPTestMultiThread,
+ MLP,
patch_all_gather,
patch_post_backward,
patch_reduce_scatter,
@@ -573,5 +577,88 @@ class TestFullyShardBackwardPrefetch(FSDPTest):
return post_backward_with_record
+class TestFullyShardUnshard(FSDPTest):
+ @property
+ def world_size(self) -> int:
+ return min(torch.cuda.device_count(), 2)
+
+ @skip_if_lt_x_gpu(2)
+ def test_unshard_async(self):
+ class ReduceModule(nn.Module):
+ def __init__(self, dim: int, mesh: DeviceMesh):
+ super().__init__()
+ self.mesh = mesh
+ self.weight = nn.Parameter(torch.randn(dim, dim))
+
+ def forward(self, x: torch.Tensor):
+ y = F.relu(x @ self.weight)
+ # NOTE: This all-reduce is not differentiable and is included
+ # to exercise the overlap.
+ work = dist.all_reduce(y, group=self.mesh.get_group(), async_op=True)
+ return y, work
+
+ class MLPs(nn.Module):
+ def __init__(self, dim: int):
+ super().__init__()
+ self.mlp1 = MLP(dim)
+ self.mlp2 = MLP(dim)
+ self.mlp3 = MLP(dim)
+
+ def forward(self, ys: List[torch.Tensor], works: List[dist.Work]):
+ (y1, y2, y3), (work1, work2, work3) = ys, works
+ work1.wait()
+ z1 = self.mlp1(y1)
+ work2.wait()
+ z2 = self.mlp2(y2)
+ work3.wait()
+ z3 = self.mlp3(y3)
+ return z1 + z2 + z3
+
+ class ReduceModel(nn.Module):
+ def __init__(self, dim: int, mesh: DeviceMesh):
+ super().__init__()
+ self.reduce_module1 = ReduceModule(dim, mesh)
+ self.reduce_module2 = ReduceModule(dim, mesh)
+ self.reduce_module3 = ReduceModule(dim, mesh)
+ self.mlps = MLPs(dim)
+
+ def forward(self, x: torch.Tensor):
+ y1, work1 = self.reduce_module1(x)
+ if isinstance(self.mlps.mlp1, FSDP):
+ self.mlps.mlp1.unshard(async_op=True)
+ y2, work2 = self.reduce_module2(x)
+ if isinstance(self.mlps.mlp2, FSDP):
+ self.mlps.mlp2.unshard(async_op=True)
+ y3, work3 = self.reduce_module3(x)
+ if isinstance(self.mlps.mlp3, FSDP):
+ self.mlps.mlp3.unshard(async_op=True)
+ return self.mlps([y1, y2, y3], [work1, work2, work3])
+
+ mesh = init_device_mesh("cuda", (self.world_size,))
+ batch_size, dim = 2, 8
+ torch.manual_seed(42)
+ ref_model = replicate(ReduceModel(dim, mesh).cuda())
+ ref_optim = torch.optim.Adam(ref_model.parameters(), lr=1e-2)
+ torch.manual_seed(42)
+ model = ReduceModel(dim, mesh)
+ fully_shard(model.mlps.mlp1, reshard_after_forward=False)
+ fully_shard(model.mlps.mlp2, reshard_after_forward=False)
+ fully_shard(model.mlps.mlp3, reshard_after_forward=False)
+ fully_shard(model.mlps)
+ replicate(model.cuda())
+ optim = torch.optim.Adam(model.parameters(), lr=1e-2, foreach=True)
+ torch.manual_seed(42 + self.rank + 1)
+ inp = torch.randn((batch_size, dim), device="cuda")
+ for _ in range(10):
+ losses: List[torch.Tensor] = []
+ for _model, _optim in ((ref_model, ref_optim), (model, optim)):
+ losses.append(_model(inp).sum())
+ losses[-1].backward()
+ with implicit_replication():
+ _optim.step()
+ _optim.zero_grad()
+ self.assertEqual(losses[0], losses[1])
+
+
if __name__ == "__main__":
run_tests()
diff --git a/test/distributed/_composable/fsdp/test_fully_shard_training.py b/test/distributed/_composable/fsdp/test_fully_shard_training.py
index 3b9406b3ef..2b2ac10949 100644
--- a/test/distributed/_composable/fsdp/test_fully_shard_training.py
+++ b/test/distributed/_composable/fsdp/test_fully_shard_training.py
@@ -710,6 +710,12 @@ class TestFullyShardGradientAccumulation(FSDPTest):
@skip_if_lt_x_gpu(2)
def test_1f1b_microbatching(self):
+ self.run_subtests(
+ {"use_explicit_unshard": [False, True]},
+ self._test_1f1b_microbatching,
+ )
+
+ def _test_1f1b_microbatching(self, use_explicit_unshard: bool):
torch.manual_seed(42)
model_args = ModelArgs(dropout_p=0.0)
model = Transformer(model_args)
@@ -731,6 +737,14 @@ class TestFullyShardGradientAccumulation(FSDPTest):
for _ in range(num_microbatches)
]
+ # Before pipelining, we may prefer to issue all all-gathers ahead of
+ # time to increase overlap opportunity at no difference in parameter
+ # memory usage since we do not reshard after forward
+ if use_explicit_unshard:
+ for module in model.modules():
+ if isinstance(module, FSDP):
+ module.unshard(async_op=True)
+
# Emulate the 1f1b pipeline schedule and only reduce gradients on the
# last microbatch
losses: List[torch.Tensor] = []
diff --git a/torch/distributed/_composable/fsdp/_fsdp_param_group.py b/torch/distributed/_composable/fsdp/_fsdp_param_group.py
index a6eb977e02..7a7addb40a 100644
--- a/torch/distributed/_composable/fsdp/_fsdp_param_group.py
+++ b/torch/distributed/_composable/fsdp/_fsdp_param_group.py
@@ -188,6 +188,7 @@ class FSDPParamGroup:
self._grad_divide_factors = (factor, data_parallel_world_size / factor)
def lazy_init(self):
+ # Lazy init should be idempotent
param_names_on_meta = [
fsdp_param._param_fqn
for fsdp_param in self.fsdp_params
@@ -433,8 +434,13 @@ class FSDPParamGroup:
return args, kwargs
def _register_state_dict_hooks(self) -> None:
- assert len(self._module_to_pre_save_state_dict_hook_handle) == 0
- assert len(self._module_to_pre_load_state_dict_hook_handle) == 0
+ num_pre_save_hooks = len(self._module_to_pre_save_state_dict_hook_handle)
+ num_pre_load_hooks = len(self._module_to_pre_load_state_dict_hook_handle)
+ assert (
+ num_pre_save_hooks == num_pre_load_hooks
+ ), f"Pre-save: {num_pre_save_hooks} pre-load: {num_pre_load_hooks}"
+ if num_pre_save_hooks > 0:
+ return # already registered
modules_with_fsdp_params: Set[nn.Module] = {
fsdp_param._module_info.module for fsdp_param in self.fsdp_params
}
diff --git a/torch/distributed/_composable/fsdp/fully_shard.py b/torch/distributed/_composable/fsdp/fully_shard.py
index ec7a4a2862..cc6038548f 100644
--- a/torch/distributed/_composable/fsdp/fully_shard.py
+++ b/torch/distributed/_composable/fsdp/fully_shard.py
@@ -161,6 +161,33 @@ class FSDP:
if fsdp_param_group := state._fsdp_param_group:
fsdp_param_group.reshard()
+ def unshard(self, async_op: bool = False) -> Optional["UnshardHandle"]:
+ """
+ Unshards the module's parameters by allocating memory and all-gathering
+ the parameters. This method is *not* recursive.
+
+ Args:
+ async_op (bool): If ``True``, then returns a :class:`UnshardHandle`
+ that has a :meth:`wait` method to wait on the unshard op. If
+ ``False``, then returns ``None`` and waits on the handle inside
+ this function.
+
+ .. note:: If ``async_op=True``, then the user does not have to call
+ :meth:`wait` on the returned handle if waiting on the unshard op
+ in the module's pre-forward is tolerable. FSDP will wait on the
+ pending unshard op in the pre-forward automatically.
+ """
+ state = self._get_fsdp_state()
+ if (fsdp_param_group := state._fsdp_param_group) is None:
+ return None
+ fsdp_param_group.lazy_init()
+ fsdp_param_group.unshard(async_op=async_op)
+ handle = UnshardHandle(fsdp_param_group)
+ if async_op:
+ return handle
+ handle.wait()
+ return None
+
def set_is_last_backward(self, is_last_backward: bool) -> None:
"""
Sets whether the next backward is the last one, meaning that FSDP
@@ -247,3 +274,27 @@ class FSDP:
: fsdp_param.sharded_size[0]
]
return ret
+
+
+class UnshardHandle:
+ """
+ A handle to wait on the unshard op.
+
+ Args:
+ fsdp_param_group (FSDPParamGroup): FSDP parameter group to unshard.
+ """
+
+ def __init__(self, fsdp_param_group: FSDPParamGroup):
+ self._fsdp_param_group = fsdp_param_group
+
+ def wait(self):
+ """
+ Waits on the unshard op.
+
+ This ensures that the current stream can use the unsharded parameters,
+ which are now registered to the module.
+ """
+ if hasattr(self, "_fsdp_param_group"):
+ self._fsdp_param_group.wait_for_unshard()
+ # Avoid keeping a reference
+ delattr(self, "_fsdp_param_group")
|
2.41.0
|
56c4572a64dfdf669cd1dd25f4161b5b7f90f78
|
Wed, 17 Apr 2024 00:46:04 +0000
|
[PATCH 0230/1000] Fix typos in docs (#124218)
|
Pull Request resolved: https://github.com/pytorch/pytorch/pull/124218 Approved by: https://github.com/albanD
|
diff --git a/torch/nn/modules/loss.py b/torch/nn/modules/loss.py
index 6cd9088349..637e7a7d45 100644
--- a/torch/nn/modules/loss.py
+++ b/torch/nn/modules/loss.py
@@ -1678,7 +1678,7 @@ class CTCLoss(_Loss):
:math:`(\operatorname{sum}(\text{target\_lengths}))`,
where :math:`N = \text{batch size}` and
:math:`S = \text{max target length, if shape is } (N, S)`.
- It represent the target sequences. Each element in the target
+ It represents the target sequences. Each element in the target
sequence is a class index. And the target index cannot be blank (default=0).
In the :math:`(N, S)` form, targets are padded to the
length of the longest sequence, and stacked.
@@ -1686,12 +1686,12 @@ class CTCLoss(_Loss):
the targets are assumed to be un-padded and
concatenated within 1 dimension.
- Input_lengths: Tuple or tensor of size :math:`(N)` or :math:`()`,
- where :math:`N = \text{batch size}`. It represent the lengths of the
+ where :math:`N = \text{batch size}`. It represents the lengths of the
inputs (must each be :math:`\leq T`). And the lengths are specified
for each sequence to achieve masking under the assumption that sequences
are padded to equal lengths.
- Target_lengths: Tuple or tensor of size :math:`(N)` or :math:`()`,
- where :math:`N = \text{batch size}`. It represent lengths of the targets.
+ where :math:`N = \text{batch size}`. It represents lengths of the targets.
Lengths are specified for each sequence to achieve masking under the
assumption that sequences are padded to equal lengths. If target shape is
:math:`(N,S)`, target_lengths are effectively the stop index
|
2.41.0
|
fd9e320ea415c4b221799863a334c842bdb6ff2
|
Tue, 16 Apr 2024 13:43:30 -0700
|
[PATCH 0231/1000] Remove unnecessary FileLock in Fx Graph Cache (#124212)
|
Writing to file happens via `write_atomic`, there's no need to take a global lock on the file system. This is likely creating unnecessary waits. Differential Revision: [D56208628](https://our.internmc.facebook.com/intern/diff/D56208628/) Pull Request resolved: https://github.com/pytorch/pytorch/pull/124212 Approved by: https://github.com/masnesral, https://github.com/eellison
|
diff --git a/torch/_inductor/codecache.py b/torch/_inductor/codecache.py
index 72786779c0..baf5869b09 100644
--- a/torch/_inductor/codecache.py
+++ b/torch/_inductor/codecache.py
@@ -868,29 +868,25 @@ class FxGraphCache:
Load a compiled graph from the cache. If a cached entry does not exist,
compile the graph and save it to the cache.
"""
- from filelock import FileLock
compiled_graph = None
try:
FxGraphCache._check_can_cache(gm)
key = compiled_fx_graph_hash(gm, example_inputs, fx_kwargs)
- lock_path = os.path.join(get_lock_dir(), key + ".lock")
- with FileLock(lock_path, timeout=LOCK_TIMEOUT):
- compiled_graph = FxGraphCache._lookup_graph(key, example_inputs)
- if compiled_graph is None:
- log.debug("fx graph cache miss for key %s", key)
- counters["inductor"]["fxgraph_cache_miss"] += 1
- compiled_graph = compile_fx_fn(gm, example_inputs, **fx_kwargs)
- FxGraphCache._save_graph(key, compiled_graph, example_inputs)
- else:
- log.debug("fx graph cache hit for key %s", key)
- counters["inductor"]["fxgraph_cache_hit"] += 1
+ compiled_graph = FxGraphCache._lookup_graph(key, example_inputs)
+ if compiled_graph is None:
+ log.debug("fx graph cache miss for key %s", key)
+ counters["inductor"]["fxgraph_cache_miss"] += 1
+ compiled_graph = compile_fx_fn(gm, example_inputs, **fx_kwargs)
+ FxGraphCache._save_graph(key, compiled_graph, example_inputs)
+ else:
+ log.debug("fx graph cache hit for key %s", key)
+ counters["inductor"]["fxgraph_cache_hit"] += 1
except BypassFxGraphCache:
counters["inductor"]["fxgraph_cache_bypass"] += 1
-
- if not compiled_graph:
- compiled_graph = compile_fx_fn(gm, example_inputs, **fx_kwargs)
+ if not compiled_graph:
+ compiled_graph = compile_fx_fn(gm, example_inputs, **fx_kwargs)
return compiled_graph
|
2.41.0
|
e4c4e93b66729145ac87ef384632b1abacaf26d
|
Wed, 17 Apr 2024 02:12:20 +0000
|
[PATCH 0232/1000] [Inductor] add contiguous layout optm for bmm input (#122599)
|
Fixes #117743. Add contiguous layout optimization for `bmm` input, to avoid additional copies. Pull Request resolved: https://github.com/pytorch/pytorch/pull/122599 Approved by: https://github.com/jgong5, https://github.com/leslie-fang-intel, https://github.com/eellison
|
diff --git a/torch/_inductor/kernel/bmm.py b/torch/_inductor/kernel/bmm.py
index 1878cef79f..1bb8c9d820 100644
--- a/torch/_inductor/kernel/bmm.py
+++ b/torch/_inductor/kernel/bmm.py
@@ -1,5 +1,6 @@
import torch
+from .. import ir
from ..lowering import register_lowering
from ..select_algorithm import (
autotune_select_algorithm,
@@ -7,6 +8,7 @@ from ..select_algorithm import (
TritonTemplate,
)
from ..utils import ceildiv as cdiv, use_aten_gemm_kernels, use_triton_template
+from ..virtualized import V
from .mm_common import addmm_epilogue, mm_args, mm_configs, mm_options
@@ -87,6 +89,38 @@ aten_baddbmm = ExternKernelChoice(torch.baddbmm, "at::baddbmm_out")
@register_lowering(aten.bmm)
def tuned_bmm(mat1, mat2, *, layout=None):
+ if all(x.get_device().type == "cpu" for x in [mat1, mat2]):
+
+ def is_valid_to_require_contiguous(t):
+ if not ir.is_storage_and_layout(t):
+ return True
+ _, layout = ir.as_storage_and_layout(t, freeze=False)
+ return isinstance(layout, ir.FlexibleLayout)
+
+ def is_preferred_layout_as_bmm_input(sizes, strides):
+ # contiguous on one of the last two dims
+ return (
+ strides[-1] == 1 and (sizes[-2] == 1 or strides[-2] >= sizes[-1])
+ ) or (strides[-2] == 1 and (sizes[-1] == 1 or strides[-1] >= sizes[-2]))
+
+ # Make the input of bmm contiguous
+ # if it is not contiguous on either of the last two dims,
+ # because bmm cpu implementation would do contiguous() if not.
+ # This is to avoid additional copies in bmm.
+ def may_require_contiguous(t, meta_t):
+ sizes = meta_t.meta["val"].size()
+ strides = meta_t.meta["val"].stride()
+ if not is_preferred_layout_as_bmm_input(sizes, strides):
+ t = ir.ExternKernel.require_contiguous(t)
+ return t
+
+ if is_valid_to_require_contiguous(mat1):
+ meta_mat1 = V.graph.current_node.args[0]
+ mat1 = may_require_contiguous(mat1, meta_mat1)
+ if is_valid_to_require_contiguous(mat2):
+ meta_mat2 = V.graph.current_node.args[1]
+ mat2 = may_require_contiguous(mat2, meta_mat2)
+
m, n, k, layout, mat1, mat2 = mm_args(mat1, mat2, layout=layout)
# options to tune from
|
2.41.0
|
2ca18ea3bac636d42310b90c0b899d50eea7689
|
Wed, 17 Apr 2024 02:18:32 +0000
|
[PATCH 0233/1000] Handle the case when one of the output of forward pass is None (#123988)
|
Summary: When applying FSDP-2 to FM-FB benchmark with FullModel model, we ran into an error that one of the output tensors of a forward pass is None. I double checked that the same output tensor is also None in FSDP-1. So, we just need to handle the None properly here. Test Plan: See that in the internal diff. Differential Revision: D56087956 Pull Request resolved: https://github.com/pytorch/pytorch/pull/123988 Approved by: https://github.com/awgu
|
diff --git a/torch/distributed/_composable/fsdp/_fsdp_state.py b/torch/distributed/_composable/fsdp/_fsdp_state.py
index 547b8e8d9f..88421d1a11 100644
--- a/torch/distributed/_composable/fsdp/_fsdp_state.py
+++ b/torch/distributed/_composable/fsdp/_fsdp_state.py
@@ -235,7 +235,7 @@ class FSDPState(_State):
return output
flat_outputs, _ = tree_flatten(output)
- tensors = tuple(t for t in flat_outputs if t.requires_grad)
+ tensors = tuple(t for t in flat_outputs if (t is not None and t.requires_grad))
if tensors:
grad_fns = tuple(t.grad_fn for t in tensors if t.grad_fn is not None)
pre_backward = functools.partial(self._pre_backward, grad_fns)
|
2.41.0
|
1cc808ac7b0adf40bc71858c5e1924b9e41934e
|
Tue, 16 Apr 2024 16:27:20 -0700
|
[PATCH 0235/1000] [dynamo][cpp-guards] Missing decref on early returns in DictSubclassGuardManager (#124230)
|
I am sad that I missed this earlier. Good thing is that CI caught it. Will be more careful next time. This was the reason https://github.com/pytorch/pytorch/pull/123547 is reverted - https://github.com/pytorch/pytorch/pull/123547#issuecomment-2058350245 Pull Request resolved: https://github.com/pytorch/pytorch/pull/124230 Approved by: https://github.com/mlazos
|
diff --git a/torch/csrc/dynamo/guards.cpp b/torch/csrc/dynamo/guards.cpp
index 578f823d64..f1d8020fb3 100644
--- a/torch/csrc/dynamo/guards.cpp
+++ b/torch/csrc/dynamo/guards.cpp
@@ -2184,12 +2184,16 @@ class DictSubclassGuardManager : public DictGuardManager {
KeyValueManager& key_value_manager = _key_value_managers[dict_pointer];
std::unique_ptr<GuardManager>& key_manager = key_value_manager.first;
if (key_manager && !key_manager->check_nopybind(key)) {
+ Py_DECREF(key);
+ Py_DECREF(iterator);
return false;
}
PyObject* value = PyDict_GetItem(obj, key); // borrowed ref
std::unique_ptr<GuardManager>& value_manager = key_value_manager.second;
if (value_manager && !value_manager->check_nopybind(value)) {
+ Py_DECREF(key);
+ Py_DECREF(iterator);
return false;
}
@@ -2245,6 +2249,8 @@ class DictSubclassGuardManager : public DictGuardManager {
GuardDebugInfo debug_info = key_manager->check_verbose_nopybind(key);
num_guards_executed += debug_info.num_guards_executed;
if (!debug_info.result) {
+ Py_DECREF(key);
+ Py_DECREF(iterator);
return GuardDebugInfo(
false, debug_info.verbose_code_parts, num_guards_executed);
}
@@ -2257,6 +2263,8 @@ class DictSubclassGuardManager : public DictGuardManager {
value_manager->check_verbose_nopybind(value);
num_guards_executed += debug_info.num_guards_executed;
if (!debug_info.result) {
+ Py_DECREF(key);
+ Py_DECREF(iterator);
return GuardDebugInfo(
false, debug_info.verbose_code_parts, num_guards_executed);
}
|
2.41.0
|
50051f412e50d98d506adf0d05aa6e4ceab54bd
|
Mon, 15 Apr 2024 14:02:35 -0700
|
[PATCH 0236/1000] Dont precompile already seen keys, limit epilogue choices (#122642)
|
Two changes: - in epilogue benchmark fusion, only take top 6 choices. There were basically no choices taken after this in HF. - Share a single precompilation function among matmuls with same key. Pull Request resolved: https://github.com/pytorch/pytorch/pull/122642 Approved by: https://github.com/shunting314 ghstack dependencies: #124030
|
diff --git a/test/hi.py b/test/hi.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/test/inductor/test_max_autotune.py b/test/inductor/test_max_autotune.py
index 37b5d84cad..5745f4a405 100644
--- a/test/inductor/test_max_autotune.py
+++ b/test/inductor/test_max_autotune.py
@@ -445,6 +445,22 @@ class TestMaxAutotune(TestCase):
fn_c = torch.compile(mode="max-autotune-no-cudagraphs")(fn)
self.assertEqual(counters["inductor"]["select_algorithm_precompile"], 0)
+ @config.patch(autotune_local_cache=False, autotune_remote_cache=False)
+ def test_precompilations(self):
+ def fn(a, b, c):
+ a = (a @ b) @ c
+ a, b, c = (t.to(torch.float16) for t in [a, b, c])
+ return (a @ b) @ c
+
+ fn_c = torch.compile(mode="max-autotune-no-cudagraphs")(fn)
+ inputs = [torch.rand([256, 256], device="cuda") for _ in range(3)]
+
+ self.assertEqual(fn(*inputs), fn_c(*inputs), atol=1e-2, rtol=1e-2)
+
+ from torch._dynamo.utils import counters
+
+ self.assertEqual(counters["inductor"]["select_algorithm_precompile"], 2)
+
def test_cat_addmm(self):
def fn(a: torch.Tensor, b: torch.Tensor, c: torch.Tensor):
return torch.cat(
diff --git a/torch/_inductor/codecache.py b/torch/_inductor/codecache.py
index 30c15bc64c..bb912bf16a 100644
--- a/torch/_inductor/codecache.py
+++ b/torch/_inductor/codecache.py
@@ -301,7 +301,7 @@ class PersistentCache(CacheBase):
return hit
if config.max_autotune or config.max_autotune_gemm:
- local_cache = self.get_local_cache()
+ local_cache = self.get_local_cache() if config.autotune_local_cache else {}
# check local cache first since it is data specific to the current machine
if (
not check_cache(local_cache)
diff --git a/torch/_inductor/config.py b/torch/_inductor/config.py
index d7141639d0..ae83b9533f 100644
--- a/torch/_inductor/config.py
+++ b/torch/_inductor/config.py
@@ -302,6 +302,9 @@ benchmark_multi_templates = (
os.environ.get("TORCHINDUCTOR_BENCHMARK_MULTI_TEMPLATES", "0") == "1"
)
+# Take how many of the top triton kernels to benchmark epilogue
+max_epilogue_benchmarked_choices = 3
+
# how many nodes to allow into a single fusion
max_fusion_size = 64
diff --git a/torch/_inductor/scheduler.py b/torch/_inductor/scheduler.py
index deb6ed05eb..4ea60b73f6 100644
--- a/torch/_inductor/scheduler.py
+++ b/torch/_inductor/scheduler.py
@@ -1835,6 +1835,8 @@ class Scheduler:
min_ms_fused = float("inf")
ms_fused_choice = None
+ triton_choices = 0
+
for choice, unfused_time in choice_timings.items():
if not isinstance(choice, torch._inductor.ir.TritonTemplateCallerBase):
continue
@@ -1842,6 +1844,10 @@ class Scheduler:
if unfused_time >= ms1 + ms2:
continue
+ triton_choices += 1
+ if triton_choices > config.max_epilogue_benchmarked_choices:
+ break
+
# TODO - parallel compile triton templates
# TODO - should prune/skip choices that are not within certain % of best choice
with node1.node.swap_as_triton_caller(choice):
diff --git a/torch/_inductor/select_algorithm.py b/torch/_inductor/select_algorithm.py
index eedb0995c3..d7361225ee 100644
--- a/torch/_inductor/select_algorithm.py
+++ b/torch/_inductor/select_algorithm.py
@@ -866,6 +866,15 @@ class ErrorFromChoice(RuntimeError):
class AlgorithmSelectorCache(PersistentCache):
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+
+ # the autotuning will get occur in the scheduler, so there is
+ # no guarantee that the first lowering for a given key will also be the
+ # first to benchmark it. share a single precompilation function for all lowerings
+ # of a particular key
+ self.precompile_cache: Dict[str, Callable[[], None]] = {}
+
def __call__(
self,
name,
@@ -902,6 +911,8 @@ class AlgorithmSelectorCache(PersistentCache):
def make_benchmark_fn():
return self.make_benchmark_fn(choices, input_nodes, layout, input_gen_fns)
+ inputs_key = repr([self.key_of(x) for x in input_nodes])
+
def precompile(choices):
if (
precompilation_timeout_seconds is None
@@ -925,7 +936,7 @@ class AlgorithmSelectorCache(PersistentCache):
timings = self.lookup(
choices,
name,
- repr([self.key_of(x) for x in input_nodes]),
+ inputs_key,
benchmark=None,
)
@@ -935,6 +946,12 @@ class AlgorithmSelectorCache(PersistentCache):
if timings:
return no_op
+ precompile_key = (
+ f"{name}: {inputs_key} : {torch.get_float32_matmul_precision()}"
+ )
+ if precompile_func := self.precompile_cache.get(precompile_key):
+ return precompile_func
+
executor = ThreadPoolExecutor(max_workers=num_workers)
futures = executor.map(
lambda c: c.precompile(),
@@ -942,7 +959,9 @@ class AlgorithmSelectorCache(PersistentCache):
timeout=precompilation_timeout_seconds,
)
+ @functools.lru_cache(None)
def wait_on_futures():
+ counters["inductor"]["select_algorithm_precompile"] += 1
try:
iterator = iter(futures)
while True:
@@ -958,8 +977,11 @@ class AlgorithmSelectorCache(PersistentCache):
)
except StopIteration:
pass
+
executor.shutdown(wait=True)
+ self.precompile_cache[precompile_key] = wait_on_futures
+
return wait_on_futures
def autotune(choices):
@@ -980,7 +1002,7 @@ class AlgorithmSelectorCache(PersistentCache):
timings = self.lookup(
choices,
name,
- repr([self.key_of(x) for x in input_nodes]),
+ inputs_key,
autotune,
)
autotune_elapse = time.time() - autotune_start_ts
|
2.41.0
|
36b0d12fa68c62d6af5a9110a23eca4f0a7c4df
|
Tue, 16 Apr 2024 06:36:22 -0700
|
[PATCH 0237/1000] Don't clamp slices generated from cat kernel (#124139)
|
Fixes https://github.com/pytorch/pytorch/issues/123793 Signed-off-by: Edward Z. Yang <ezyang@meta.com> Pull Request resolved: https://github.com/pytorch/pytorch/pull/124139 Approved by: https://github.com/Microve, https://github.com/peterbell10, https://github.com/Skylion007
|
diff --git a/test/inductor/test_torchinductor_dynamic_shapes.py b/test/inductor/test_torchinductor_dynamic_shapes.py
index b7f80471cb..84fc95a2c6 100644
--- a/test/inductor/test_torchinductor_dynamic_shapes.py
+++ b/test/inductor/test_torchinductor_dynamic_shapes.py
@@ -376,6 +376,21 @@ class TestInductorDynamic(TestCase):
if expect_fail:
self.fail("expected to fail, but actually passed")
+ @torch._dynamo.config.patch(
+ capture_scalar_outputs=True, capture_dynamic_output_shape_ops=True
+ )
+ def test_cat_unbacked_duplicate_size(self, device):
+ def f(x):
+ device = x.device
+ s, s2 = x.tolist()
+ g = torch.zeros(s, device=device)
+ g2 = torch.ones(s2, device=device)
+ return torch.ops.aten.cat.default([g, g, g2])
+
+ cf = torch.compile(fullgraph=True)(f)
+ arg = torch.tensor([4, 6], device="cuda")
+ self.assertEqual(f(arg), cf(arg))
+
@torch._dynamo.config.patch(
capture_scalar_outputs=True, capture_dynamic_output_shape_ops=True
)
diff --git a/torch/_inductor/ir.py b/torch/_inductor/ir.py
index 9baba3ae38..9960c02b33 100644
--- a/torch/_inductor/ir.py
+++ b/torch/_inductor/ir.py
@@ -3872,7 +3872,9 @@ class ConcatKernel(NopKernel):
for i in range(len(inputs)):
input_buffer = cls.realize_into(
inputs[i],
- SliceView.create(kernel, dim, offsets_start[i], offsets_end[i]),
+ SliceView.create(
+ kernel, dim, offsets_start[i], offsets_end[i], clamp=False
+ ),
)
concat_kernel.inputs.append(input_buffer)
|
2.41.0
|
d3cea3291346e66a870bbec51e4d1a3550300db
|
Tue, 16 Apr 2024 14:09:59 -0700
|
[PATCH 0238/1000] Fix derived dim bugs in ep.run_decomp (#123326)
|
Differential Revision: [D55730289](https://our.internmc.facebook.com/intern/diff/D55730289) Pull Request resolved: https://github.com/pytorch/pytorch/pull/123326 Approved by: https://github.com/avikchaudhuri
|
diff --git a/test/export/test_export.py b/test/export/test_export.py
index 73795fe0eb..b870fdb58e 100644
--- a/test/export/test_export.py
+++ b/test/export/test_export.py
@@ -690,7 +690,6 @@ class TestExport(TestCase):
self.assertEqual(ep.module()(torch.randn(4), torch.randn(5)).size()[0], 4)
- @testing.expectedFailurePreDispatchRunDecomp # T183703359
def test_derived_dim_nested(self):
class Foo(torch.nn.Module):
def forward(self, x, y):
@@ -756,7 +755,6 @@ class TestExport(TestCase):
self.assertEqual(ep.module()(torch.randn(5), torch.randn(9)).size()[0], 4)
- @testing.expectedFailurePreDispatchRunDecomp # T183703359
def test_derived_dim_integer(self):
class Foo(torch.nn.Module):
def forward(self, w):
@@ -1003,7 +1001,6 @@ class TestExport(TestCase):
6,
)
- @testing.expectedFailurePreDispatchRunDecomp # T183704046
def test_static_dim_constraints(self):
class Foo(torch.nn.Module):
def __init__(self):
@@ -1072,7 +1069,7 @@ class TestExport(TestCase):
self.assertEqual(vr.lower, 1)
self.assertEqual(vr.upper, 2)
- @testing.expectedFailurePreDispatchRunDecomp # T183703359
+ @testing.expectedFailurePreDispatchRunDecomp # T183703911
def test_derived_dim_1_2(self):
class Bar(torch.nn.Module):
def forward(self, x, y):
diff --git a/torch/export/exported_program.py b/torch/export/exported_program.py
index 5c18bbdeda..9ebc5f63cb 100644
--- a/torch/export/exported_program.py
+++ b/torch/export/exported_program.py
@@ -507,6 +507,16 @@ class ExportedProgram:
module.eval = types.MethodType(_eval, module) # type: ignore[method-assign]
return module
+ def _num_lifted_params_buffers(self):
+ return next(
+ (
+ i
+ for i, s in enumerate(self._graph_signature.input_specs)
+ if s.kind == InputKind.USER_INPUT
+ ),
+ len(self._graph_signature.input_specs),
+ )
+
@_disable_prexisiting_fake_mode
def run_decompositions(
self, decomp_table: Optional[Dict[torch._ops.OperatorBase, Callable]] = None
@@ -639,7 +649,12 @@ class ExportedProgram:
# (The node-level meta is addressed above.)
gm.meta.update(self.graph_module.meta)
- new_range_constraints = _get_updated_range_constraints(gm)
+ new_range_constraints = _get_updated_range_constraints(
+ gm,
+ self._num_lifted_params_buffers(),
+ pytree.tree_leaves(self.example_inputs),
+ _is_executorch=False,
+ )
constants = lift_constants_pass(gm, new_graph_signature, ConstantAttrMap())
for k, v in constants.items():
@@ -658,7 +673,6 @@ class ExportedProgram:
verifier=self.verifier,
constants=self.constants,
)
-
if len(new_range_constraints) > 0:
exported_program = exported_program._transform_do_not_use(
_AddRuntimeAssertionsForInlineConstraintsPass(new_range_constraints)
@@ -745,7 +759,12 @@ class ExportedProgram:
self.graph_signature, transformed_gm
),
state_dict=self.state_dict,
- range_constraints=_get_updated_range_constraints(transformed_gm),
+ range_constraints=_get_updated_range_constraints(
+ transformed_gm,
+ self._num_lifted_params_buffers(),
+ pytree.tree_leaves(self.example_inputs),
+ _is_executorch=False,
+ ),
module_call_graph=copy.deepcopy(self._module_call_graph),
example_inputs=self.example_inputs,
verifier=self.verifier,
@@ -790,6 +809,9 @@ class ExportedProgram:
def _get_updated_range_constraints(
gm: torch.fx.GraphModule,
+ num_lifted: Optional[int] = None,
+ example_inputs: Optional[List[Any]] = None,
+ _is_executorch: bool = True,
) -> "Dict[sympy.Symbol, Any]":
def get_shape_env(gm):
vals = [
@@ -801,18 +823,44 @@ def _get_updated_range_constraints(
fake_mode = detect_fake_mode(vals)
if fake_mode is not None:
- return fake_mode.shape_env
+ return fake_mode.shape_env, fake_mode
for v in vals:
if isinstance(v, torch.SymInt):
- return v.node.shape_env
-
- shape_env = get_shape_env(gm)
+ return v.node.shape_env, fake_mode
+
+ # FIXME(tmanlaibaatar) Remove this whole branch once https://github.com/pytorch/pytorch/pull/123764
+ if _is_executorch:
+ assert num_lifted is None
+ assert example_inputs is None
+ shape_env, _ = get_shape_env(gm)
+ if shape_env is None:
+ return {}
+ range_constraints = {
+ k: v
+ for k, v in shape_env.var_to_range.items()
+ if k not in shape_env.replacements
+ }
+ # Only when we have an unbacked symint, and it's used as constructor inputs,
+ # runtime_var_to_range will make a difference compated to var_to_range.
+ # e.g. [2, oo) -> [0, oo)
+ for k, v in shape_env.var_to_range.items():
+ if k not in shape_env.replacements:
+ range_constraints[k] = v
+ return range_constraints
+
+ assert num_lifted is not None
+ assert example_inputs is not None
+
+ shape_env, fake_mode = get_shape_env(gm)
if shape_env is None:
return {}
+
+ from torch.export.dynamic_shapes import _process_constraints
+
+ range_constraints = _process_constraints(fake_mode, gm, num_lifted, example_inputs)
+
range_constraints = {
- k: v
- for k, v in shape_env.var_to_range.items()
- if k not in shape_env.replacements
+ k: v for k, v in range_constraints.items() if k not in shape_env.replacements
}
# Only when we have an unbacked symint, and it's used as constructor inputs,
# runtime_var_to_range will make a difference compated to var_to_range.
|
2.41.0
|
d22dde877f49c68dd2213a059b49447f5c56ad7
|
Wed, 17 Apr 2024 06:15:32 +0000
|
[PATCH 0239/1000] Pointer to the nonzero limit ticket (#124244)
|
For the nonzero impl limits we are still asking at runtime to fill a new ticket but we had already more then one. So I am pointing to the current open ticket. Pull Request resolved: https://github.com/pytorch/pytorch/pull/124244 Approved by: https://github.com/ezyang
|
diff --git a/aten/src/ATen/native/cuda/Nonzero.cu b/aten/src/ATen/native/cuda/Nonzero.cu
index 5d62f7711d..e87f46cd84 100644
--- a/aten/src/ATen/native/cuda/Nonzero.cu
+++ b/aten/src/ATen/native/cuda/Nonzero.cu
@@ -112,7 +112,7 @@ void nonzero_cuda_out_impl(const Tensor& self, Tensor& out){
Tensor& nonzero_out_cuda(const Tensor& self, Tensor& out){
TORCH_CHECK(self.numel() < std::numeric_limits<int>::max(), "nonzero is not supported for tensors with more than INT_MAX elements, \
- file a support request");
+ See https://github.com/pytorch/pytorch/issues/51871");
TORCH_CHECK(out.dtype() == at::kLong, "Expected object of scalar type ", at::kLong, " as out, but got ", out.dtype());
TORCH_CHECK(self.device() == out.device(), "expected self and out to be on the same device, but got out on ",
out.device(), " and self on ", self.device());
diff --git a/aten/src/ATen/native/mps/operations/Indexing.mm b/aten/src/ATen/native/mps/operations/Indexing.mm
index 2b0ab90c1e..d86f57c49f 100644
--- a/aten/src/ATen/native/mps/operations/Indexing.mm
+++ b/aten/src/ATen/native/mps/operations/Indexing.mm
@@ -266,7 +266,7 @@ Tensor& nonzero_out_mps(const Tensor& self, Tensor& out_) {
TORCH_CHECK(self.numel() < std::numeric_limits<int>::max(),
"nonzero is not supported for tensors with more than INT_MAX elements, \
- file a support request");
+ See https://github.com/pytorch/pytorch/issues/51871");
TORCH_CHECK(
out_.dtype() == at::kLong, "Expected object of scalar type ", at::kLong, " as out, but got ", out_.dtype());
TORCH_CHECK(self.device() == out_.device(),
|
2.41.0
|
3effa585510f32bc4ebc94b6b7300d7d99e078d
|
Wed, 17 Apr 2024 06:45:58 +0000
|
[PATCH 0240/1000] Enable UFMT on all of `test/distributed` (#123539)
|
Partially addresses #123062 Ran lintrunner on: - `test/distributed` Pull Request resolved: https://github.com/pytorch/pytorch/pull/123539 Approved by: https://github.com/ezyang
|
diff --git a/.lintrunner.toml b/.lintrunner.toml
index 817d35f34f..9e83a8b96e 100644
--- a/.lintrunner.toml
+++ b/.lintrunner.toml
@@ -1015,134 +1015,6 @@ exclude_patterns = [
'test/_nvfuser/test_python_frontend.py',
'test/_nvfuser/test_torchscript.py',
'test/delete.py',
- 'test/distributed/_shard/sharded_optim/test_sharded_optim.py',
- 'test/distributed/_shard/sharded_tensor/ops/test_binary_cmp.py',
- 'test/distributed/_shard/sharded_tensor/ops/test_embedding.py',
- 'test/distributed/_shard/sharded_tensor/ops/test_embedding_bag.py',
- 'test/distributed/_shard/sharded_tensor/ops/test_init.py',
- 'test/distributed/_shard/sharded_tensor/ops/test_tensor_ops.py',
- 'test/distributed/_shard/sharded_tensor/test_logger.py',
- 'test/distributed/_shard/sharded_tensor/test_sharded_tensor.py',
- 'test/distributed/_shard/sharded_tensor/test_sharded_tensor_reshard.py',
- 'test/distributed/_shard/sharding_plan/test_sharding_plan.py',
- 'test/distributed/_shard/sharding_spec/test_sharding_spec.py',
- 'test/distributed/_shard/test_sharder.py',
- 'test/distributed/_tools/test_memory_tracker.py',
- 'test/distributed/algorithms/ddp_comm_hooks/test_ddp_hooks.py',
- 'test/distributed/algorithms/quantization/test_quantization.py',
- 'test/distributed/algorithms/test_join.py',
- 'test/distributed/argparse_util_test.py',
- 'test/distributed/bin/test_script.py',
- 'test/distributed/elastic/agent/server/test/__init__.py',
- 'test/distributed/elastic/agent/server/test/api_test.py',
- 'test/distributed/elastic/agent/server/test/local_elastic_agent_test.py',
- 'test/distributed/elastic/events/lib_test.py',
- 'test/distributed/elastic/metrics/__init__.py',
- 'test/distributed/elastic/metrics/api_test.py',
- 'test/distributed/elastic/multiprocessing/api_test.py',
- 'test/distributed/elastic/multiprocessing/bin/echo1.py',
- 'test/distributed/elastic/multiprocessing/bin/echo2.py',
- 'test/distributed/elastic/multiprocessing/bin/echo3.py',
- 'test/distributed/elastic/multiprocessing/bin/test_script.py',
- 'test/distributed/elastic/multiprocessing/bin/zombie_test.py',
- 'test/distributed/elastic/multiprocessing/errors/api_test.py',
- 'test/distributed/elastic/multiprocessing/errors/error_handler_test.py',
- 'test/distributed/elastic/multiprocessing/redirects_test.py',
- 'test/distributed/elastic/multiprocessing/tail_log_test.py',
- 'test/distributed/elastic/rendezvous/__init__.py',
- 'test/distributed/elastic/rendezvous/api_test.py',
- 'test/distributed/elastic/rendezvous/c10d_rendezvous_backend_test.py',
- 'test/distributed/elastic/rendezvous/dynamic_rendezvous_test.py',
- 'test/distributed/elastic/rendezvous/etcd_rendezvous_backend_test.py',
- 'test/distributed/elastic/rendezvous/etcd_rendezvous_test.py',
- 'test/distributed/elastic/rendezvous/etcd_server_test.py',
- 'test/distributed/elastic/rendezvous/rendezvous_backend_test.py',
- 'test/distributed/elastic/rendezvous/static_rendezvous_test.py',
- 'test/distributed/elastic/rendezvous/utils_test.py',
- 'test/distributed/elastic/timer/__init__.py',
- 'test/distributed/elastic/timer/api_test.py',
- 'test/distributed/elastic/timer/file_based_local_timer_test.py',
- 'test/distributed/elastic/timer/local_timer_example.py',
- 'test/distributed/elastic/timer/local_timer_test.py',
- 'test/distributed/elastic/utils/__init__.py',
- 'test/distributed/elastic/utils/data/__init__.py',
- 'test/distributed/elastic/utils/data/cycling_iterator_test.py',
- 'test/distributed/elastic/utils/distributed_test.py',
- 'test/distributed/elastic/utils/logging_test.py',
- 'test/distributed/elastic/utils/util_test.py',
- 'test/distributed/launcher/__init__.py',
- 'test/distributed/launcher/api_test.py',
- 'test/distributed/launcher/bin/test_script.py',
- 'test/distributed/launcher/bin/test_script_init_method.py',
- 'test/distributed/launcher/bin/test_script_is_torchelastic_launched.py',
- 'test/distributed/launcher/bin/test_script_local_rank.py',
- 'test/distributed/launcher/launch_test.py',
- 'test/distributed/launcher/run_test.py',
- 'test/distributed/nn/jit/__init__.py',
- 'test/distributed/nn/jit/test_instantiator.py',
- 'test/distributed/optim/test_apply_optimizer_in_backward.py',
- 'test/distributed/optim/test_named_optimizer.py',
- 'test/distributed/optim/test_zero_redundancy_optimizer.py',
- 'test/distributed/pipeline/sync/__init__.py',
- 'test/distributed/pipeline/sync/conftest.py',
- 'test/distributed/pipeline/sync/skip/__init__.py',
- 'test/distributed/pipeline/sync/skip/test_api.py',
- 'test/distributed/pipeline/sync/skip/test_gpipe.py',
- 'test/distributed/pipeline/sync/skip/test_inspect_skip_layout.py',
- 'test/distributed/pipeline/sync/skip/test_leak.py',
- 'test/distributed/pipeline/sync/skip/test_portal.py',
- 'test/distributed/pipeline/sync/skip/test_stash_pop.py',
- 'test/distributed/pipeline/sync/skip/test_tracker.py',
- 'test/distributed/pipeline/sync/skip/test_verify_skippables.py',
- 'test/distributed/pipeline/sync/test_balance.py',
- 'test/distributed/pipeline/sync/test_bugs.py',
- 'test/distributed/pipeline/sync/test_checkpoint.py',
- 'test/distributed/pipeline/sync/test_copy.py',
- 'test/distributed/pipeline/sync/test_deferred_batch_norm.py',
- 'test/distributed/pipeline/sync/test_dependency.py',
- 'test/distributed/pipeline/sync/test_inplace.py',
- 'test/distributed/pipeline/sync/test_microbatch.py',
- 'test/distributed/pipeline/sync/test_phony.py',
- 'test/distributed/pipeline/sync/test_pipe.py',
- 'test/distributed/pipeline/sync/test_pipeline.py',
- 'test/distributed/pipeline/sync/test_stream.py',
- 'test/distributed/pipeline/sync/test_transparency.py',
- 'test/distributed/pipeline/sync/test_worker.py',
- 'test/distributed/rpc/cuda/test_tensorpipe_agent.py',
- 'test/distributed/rpc/test_faulty_agent.py',
- 'test/distributed/rpc/test_share_memory.py',
- 'test/distributed/rpc/test_tensorpipe_agent.py',
- 'test/distributed/tensor/parallel/__init__.py',
- 'test/distributed/tensor/parallel/test_ddp_2d_parallel.py',
- 'test/distributed/tensor/parallel/test_fsdp_2d_parallel.py',
- 'test/distributed/tensor/parallel/test_parallelize_api.py',
- 'test/distributed/tensor/parallel/test_tp_examples.py',
- 'test/distributed/tensor/parallel/test_tp_random_state.py',
- 'test/distributed/tensor/parallel/test_tp_style.py',
- 'test/distributed/tensor/parallel/test_view_sharding_dim_change.py',
- 'test/distributed/test_c10d_common.py',
- 'test/distributed/test_c10d_gloo.py',
- 'test/distributed/test_c10d_logger.py',
- 'test/distributed/test_c10d_nccl.py',
- 'test/distributed/test_c10d_object_collectives.py',
- 'test/distributed/test_c10d_pypg.py',
- 'test/distributed/test_c10d_spawn.py',
- 'test/distributed/test_c10d_spawn_gloo.py',
- 'test/distributed/test_c10d_spawn_nccl.py',
- 'test/distributed/test_c10d_spawn_ucc.py',
- 'test/distributed/test_c10d_ucc.py',
- 'test/distributed/test_collective_utils.py',
- 'test/distributed/test_data_parallel.py',
- 'test/distributed/test_distributed_spawn.py',
- 'test/distributed/test_dynamo_distributed.py',
- 'test/distributed/test_fake_pg.py',
- 'test/distributed/test_functional_api.py',
- 'test/distributed/test_inductor_collectives.py',
- 'test/distributed/test_launcher.py',
- 'test/distributed/test_multi_threaded_pg.py',
- 'test/distributed/test_nccl.py',
- 'test/distributed/test_pg_wrapper.py',
- 'test/distributed/test_store.py',
'test/expect/__init__.py',
'test/jit/__init__.py',
'test/jit/_imported_class_test/__init__.py',
diff --git a/test/distributed/_shard/sharded_optim/test_sharded_optim.py b/test/distributed/_shard/sharded_optim/test_sharded_optim.py
index 30202ee062..6b08479fbb 100644
--- a/test/distributed/_shard/sharded_optim/test_sharded_optim.py
+++ b/test/distributed/_shard/sharded_optim/test_sharded_optim.py
@@ -1,32 +1,21 @@
# Owner(s): ["oncall: distributed"]
+from copy import deepcopy
+
import torch
import torch.optim as optim
-from torch.distributed._shard import (
- sharded_tensor,
- shard_parameter
-)
-
-from copy import deepcopy
-from torch.distributed._shard.sharding_spec import (
- ChunkShardingSpec,
-)
-from torch.distributed._shard.sharded_optim import (
- ShardedOptimizer,
-)
-from torch.testing._internal.common_distributed import (
- requires_nccl,
- skip_if_lt_x_gpu,
-)
-from torch.testing._internal.common_utils import (
- run_tests,
-)
+from torch.distributed._shard import shard_parameter, sharded_tensor
+from torch.distributed._shard.sharded_optim import ShardedOptimizer
+from torch.distributed._shard.sharding_spec import ChunkShardingSpec
+from torch.testing._internal.common_distributed import requires_nccl, skip_if_lt_x_gpu
+from torch.testing._internal.common_utils import run_tests
from torch.testing._internal.distributed._shard.sharded_tensor import (
ShardedTensorTestBase,
with_comms,
)
+
class MyShardedModel(torch.nn.Module):
def __init__(self, spec=None, group=None):
super().__init__()
@@ -34,7 +23,11 @@ class MyShardedModel(torch.nn.Module):
torch.manual_seed(0)
self.param = torch.nn.Parameter(torch.rand(5, 10))
if spec is not None:
- self.sharded_param = torch.nn.Parameter(sharded_tensor.rand(spec, 20, 10, requires_grad=True, process_group=group))
+ self.sharded_param = torch.nn.Parameter(
+ sharded_tensor.rand(
+ spec, 20, 10, requires_grad=True, process_group=group
+ )
+ )
else:
self.sharded_param = torch.nn.Parameter(torch.rand(5, 10))
@@ -87,7 +80,6 @@ class MyShardedLinear(torch.nn.Module):
class TestShardedOptimizer(ShardedTensorTestBase):
-
@with_comms(init_rpc=False)
@skip_if_lt_x_gpu(4)
@requires_nccl()
@@ -105,8 +97,9 @@ class TestShardedOptimizer(ShardedTensorTestBase):
sharded_model = MyShardedModel(spec=rowwise_spec).cuda()
# copy the parameters from local model
- sharded_model.sharded_param.local_shards()[0].tensor = \
+ sharded_model.sharded_param.local_shards()[0].tensor = (
local_model.sharded_param.detach().clone().requires_grad_()
+ )
local_optim = optim.SGD(local_model.parameters(), lr=0.1)
sharded_model_params = dict(sharded_model.named_parameters())
@@ -137,12 +130,10 @@ class TestShardedOptimizer(ShardedTensorTestBase):
new_val = sharded_optim.named_params[key]
if isinstance(val, sharded_tensor.ShardedTensor):
self.assertNotEqual(
- val.local_shards()[0].tensor,
- new_val.local_shards()[0].tensor
+ val.local_shards()[0].tensor, new_val.local_shards()[0].tensor
)
self.assertEqual(
- new_val.local_shards()[0].tensor,
- local_model.sharded_param
+ new_val.local_shards()[0].tensor, local_model.sharded_param
)
else:
self.assertNotEqual(val, new_val)
@@ -179,5 +170,6 @@ class TestShardedOptimizer(ShardedTensorTestBase):
self.assertTrue("linear2.weight" in param_keys)
self.assertFalse("bias" in param_keys)
-if __name__ == '__main__':
+
+if __name__ == "__main__":
run_tests()
diff --git a/test/distributed/_shard/sharded_tensor/ops/test_binary_cmp.py b/test/distributed/_shard/sharded_tensor/ops/test_binary_cmp.py
index 33fc49f81c..1cfed1945a 100644
--- a/test/distributed/_shard/sharded_tensor/ops/test_binary_cmp.py
+++ b/test/distributed/_shard/sharded_tensor/ops/test_binary_cmp.py
@@ -1,37 +1,37 @@
# Owner(s): ["oncall: distributed"]
import sys
+
import torch
import torch.distributed as dist
from torch.distributed._shard import sharded_tensor
+from torch.distributed._shard.sharding_spec import ChunkShardingSpec
from torch.distributed.distributed_c10d import _get_default_group
-from torch.testing._internal.common_distributed import (
- requires_nccl,
- skip_if_lt_x_gpu,
-)
-from torch.distributed._shard.sharding_spec import (
- ChunkShardingSpec,
-)
+from torch.testing._internal.common_distributed import requires_nccl, skip_if_lt_x_gpu
+from torch.testing._internal.common_utils import run_tests, TEST_WITH_DEV_DBG_ASAN
from torch.testing._internal.distributed._shard.sharded_tensor import (
ShardedTensorTestBase,
with_comms,
)
-from torch.testing._internal.common_utils import (
- TEST_WITH_DEV_DBG_ASAN,
- run_tests,
-)
if TEST_WITH_DEV_DBG_ASAN:
- print("Skip dev-asan as torch + multiprocessing spawn have known issues", file=sys.stderr)
+ print(
+ "Skip dev-asan as torch + multiprocessing spawn have known issues",
+ file=sys.stderr,
+ )
sys.exit(0)
+
class TestShardedTensorBinaryOps(ShardedTensorTestBase):
- """ Test base for binary comparison functions such as torch.equal, torch.allclose etc. for ShardedTensor """
+ """Test base for binary comparison functions such as torch.equal, torch.allclose etc. for ShardedTensor"""
+
seed = 42
- def get_random_tensors(self, spec1, spec2, *sizes, pg1=None, pg2=None, seed_offset=0):
+ def get_random_tensors(
+ self, spec1, spec2, *sizes, pg1=None, pg2=None, seed_offset=0
+ ):
pg1 = _get_default_group() if pg1 is None else pg1
pg2 = _get_default_group() if pg2 is None else pg2
torch.manual_seed(TestShardedTensorBinaryOps.seed)
@@ -128,7 +128,7 @@ class TestShardedTensorBinaryOps(ShardedTensorTestBase):
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_torch_equal(self):
- """ Test torch.equal(ShardedTensor, ShardedTensor) """
+ """Test torch.equal(ShardedTensor, ShardedTensor)"""
spec, alt_spec = self.get_gpu_specs()
st1, st2 = self.get_random_tensors(spec, spec, 10, 10)
@@ -144,7 +144,7 @@ class TestShardedTensorBinaryOps(ShardedTensorTestBase):
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_torch_allclose(self):
- """ Test torch.allclose(ShardedTensor, ShardedTensor) """
+ """Test torch.allclose(ShardedTensor, ShardedTensor)"""
spec, alt_spec = self.get_gpu_specs()
@@ -158,5 +158,6 @@ class TestShardedTensorBinaryOps(ShardedTensorTestBase):
# sharded_tensor.rand produces uniform values in the [0,1] range.
self.assertTrue(torch.allclose(st1, st2, atol=1))
-if __name__ == '__main__':
+
+if __name__ == "__main__":
run_tests()
diff --git a/test/distributed/_shard/sharded_tensor/ops/test_embedding.py b/test/distributed/_shard/sharded_tensor/ops/test_embedding.py
index 9291e06e31..98e1efee92 100644
--- a/test/distributed/_shard/sharded_tensor/ops/test_embedding.py
+++ b/test/distributed/_shard/sharded_tensor/ops/test_embedding.py
@@ -4,20 +4,12 @@ import sys
import torch
import torch.distributed as dist
-from torch.distributed._shard import (
- shard_parameter,
-)
-from torch.testing._internal.common_distributed import (
- requires_nccl,
- skip_if_lt_x_gpu,
-)
-from torch.testing._internal.common_utils import (
- TEST_WITH_DEV_DBG_ASAN,
- run_tests,
-)
+from torch.distributed._shard import shard_parameter
+from torch.testing._internal.common_distributed import requires_nccl, skip_if_lt_x_gpu
+from torch.testing._internal.common_utils import run_tests, TEST_WITH_DEV_DBG_ASAN
from torch.testing._internal.distributed._shard.sharded_tensor import (
- TEST_GPU_NUM,
ShardedTensorTestBase,
+ TEST_GPU_NUM,
with_comms,
)
from torch.testing._internal.distributed._shard.sharded_tensor._test_ops_common import (
@@ -64,9 +56,7 @@ class TestShardedEmbedding(ShardedTensorTestBase):
)
# Copy the weights from local embedding
- sharded_embedding.weight = clone_module_parameter(
- local_embedding, "weight"
- )
+ sharded_embedding.weight = clone_module_parameter(local_embedding, "weight")
# Shard the parameter.
shard_parameter(sharded_embedding, "weight", spec)
@@ -134,13 +124,26 @@ class TestShardedEmbedding(ShardedTensorTestBase):
self._run_sharded_embedding(spec, [34], 15, 14, padding_idx=10)
self._run_sharded_embedding(spec, [8, 6, 5, 4], 23, 13, padding_idx=12)
self._run_sharded_embedding(
- spec, [4, 5, 6], 23, 13, max_norm=2.5,
+ spec,
+ [4, 5, 6],
+ 23,
+ 13,
+ max_norm=2.5,
)
self._run_sharded_embedding(
- spec, [12, 7, 16], 23, 13, max_norm=2.5,
+ spec,
+ [12, 7, 16],
+ 23,
+ 13,
+ max_norm=2.5,
)
self._run_sharded_embedding(
- spec, [8, 16, 20], 12, 12, max_norm=1.25, norm_type=1.0,
+ spec,
+ [8, 16, 20],
+ 12,
+ 12,
+ max_norm=1.25,
+ norm_type=1.0,
)
self._run_sharded_embedding(spec, [30], 15, 14, max_norm=2.0)
@@ -154,11 +157,19 @@ class TestShardedEmbedding(ShardedTensorTestBase):
self._run_sharded_embedding(spec, [5, 4], 32, 12)
self._run_sharded_embedding(spec, [6, 7, 6], 64, 11)
self._run_sharded_embedding(
- spec, [5, 12], 16, 22, max_norm=2.5,
+ spec,
+ [5, 12],
+ 16,
+ 22,
+ max_norm=2.5,
)
self._run_sharded_embedding(spec, [6, 7, 6], 64, 11, padding_idx=30)
self._run_sharded_embedding(
- spec, [6, 5, 3], 26, 11, max_norm=2.0,
+ spec,
+ [6, 5, 3],
+ 26,
+ 11,
+ max_norm=2.0,
)
# Test uneven split.
@@ -167,7 +178,11 @@ class TestShardedEmbedding(ShardedTensorTestBase):
self._run_sharded_embedding(spec, [4], 21, 11)
self._run_sharded_embedding(spec, [8, 6, 5, 4], 21, 11, padding_idx=10)
self._run_sharded_embedding(
- spec, [6, 5, 8], 28, 5, max_norm=2.0,
+ spec,
+ [6, 5, 8],
+ 28,
+ 5,
+ max_norm=2.0,
)
self._run_sharded_embedding(spec, [4], 14, 11, max_norm=2.5)
diff --git a/test/distributed/_shard/sharded_tensor/ops/test_embedding_bag.py b/test/distributed/_shard/sharded_tensor/ops/test_embedding_bag.py
index 4843534f68..98feeba767 100644
--- a/test/distributed/_shard/sharded_tensor/ops/test_embedding_bag.py
+++ b/test/distributed/_shard/sharded_tensor/ops/test_embedding_bag.py
@@ -4,20 +4,12 @@ import sys
import torch
import torch.distributed as dist
-from torch.distributed._shard import (
- shard_parameter,
-)
-from torch.testing._internal.common_distributed import (
- requires_nccl,
- skip_if_lt_x_gpu,
-)
-from torch.testing._internal.common_utils import (
- TEST_WITH_DEV_DBG_ASAN,
- run_tests,
-)
+from torch.distributed._shard import shard_parameter
+from torch.testing._internal.common_distributed import requires_nccl, skip_if_lt_x_gpu
+from torch.testing._internal.common_utils import run_tests, TEST_WITH_DEV_DBG_ASAN
from torch.testing._internal.distributed._shard.sharded_tensor import (
- TEST_GPU_NUM,
ShardedTensorTestBase,
+ TEST_GPU_NUM,
with_comms,
)
from torch.testing._internal.distributed._shard.sharded_tensor._test_ops_common import (
@@ -184,7 +176,12 @@ class TestShardedEmbeddingBag(ShardedTensorTestBase):
self._run_sharded_embedding_bag(spec, [5, 4], 17, 12, "mean")
self._run_sharded_embedding_bag(spec, [6, 7], 21, 11, "max")
self._run_sharded_embedding_bag(
- spec, [5, 5], 17, 14, "sum", max_norm=2.5,
+ spec,
+ [5, 5],
+ 17,
+ 14,
+ "sum",
+ max_norm=2.5,
)
self._run_sharded_embedding_bag(
spec,
diff --git a/test/distributed/_shard/sharded_tensor/ops/test_init.py b/test/distributed/_shard/sharded_tensor/ops/test_init.py
index 6cbfd04b21..9d67233376 100644
--- a/test/distributed/_shard/sharded_tensor/ops/test_init.py
+++ b/test/distributed/_shard/sharded_tensor/ops/test_init.py
@@ -1,37 +1,34 @@
# Owner(s): ["oncall: distributed"]
import sys
+
import torch
from torch.distributed._shard import sharded_tensor
-from torch.distributed._shard.sharding_spec import (
- ChunkShardingSpec,
-)
-from torch.testing._internal.common_distributed import (
- requires_nccl,
- skip_if_lt_x_gpu,
-)
+from torch.distributed._shard.sharding_spec import ChunkShardingSpec
+from torch.testing._internal.common_distributed import requires_nccl, skip_if_lt_x_gpu
+from torch.testing._internal.common_utils import run_tests, TEST_WITH_DEV_DBG_ASAN
from torch.testing._internal.distributed._shard.sharded_tensor import (
ShardedTensorTestBase,
with_comms,
)
-from torch.testing._internal.common_utils import (
- TEST_WITH_DEV_DBG_ASAN,
- run_tests,
-)
if TEST_WITH_DEV_DBG_ASAN:
- print("Skip dev-asan as torch + multiprocessing spawn have known issues", file=sys.stderr)
+ print(
+ "Skip dev-asan as torch + multiprocessing spawn have known issues",
+ file=sys.stderr,
+ )
sys.exit(0)
+
class TestShardedTensorNNInit(ShardedTensorTestBase):
- """ Testing torch.nn.init functions for ShardedTensor """
+ """Testing torch.nn.init functions for ShardedTensor"""
@with_comms
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_init_sharded_tensor_with_uniform(self):
- """ Test torch.nn.init.uniform_(ShardedTensor, a, b) """
+ """Test torch.nn.init.uniform_(ShardedTensor, a, b)"""
spec = ChunkShardingSpec(
dim=0,
@@ -66,7 +63,7 @@ class TestShardedTensorNNInit(ShardedTensorTestBase):
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_init_sharded_tensor_with_normal(self):
- """ Test torch.nn.init.normal_(ShardedTensor, mean, std) """
+ """Test torch.nn.init.normal_(ShardedTensor, mean, std)"""
spec = ChunkShardingSpec(
dim=0,
@@ -101,7 +98,7 @@ class TestShardedTensorNNInit(ShardedTensorTestBase):
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_init_sharded_tensor_with_kaiming_uniform(self):
- """ Test torch.nn.init.kaiming_uniform_(ShardedTensor, a, mode, nonlinearit) """
+ """Test torch.nn.init.kaiming_uniform_(ShardedTensor, a, mode, nonlinearit)"""
spec = ChunkShardingSpec(
dim=0,
@@ -115,7 +112,7 @@ class TestShardedTensorNNInit(ShardedTensorTestBase):
h, w = 8, 2
expected_h = 2
expected_device = torch.device(f"cuda:{self.rank}")
- a, mode, nonlinearity = 0, 'fan_in', 'leaky_relu'
+ a, mode, nonlinearity = 0, "fan_in", "leaky_relu"
seed = 1234
dtype = torch.double
@@ -129,8 +126,11 @@ class TestShardedTensorNNInit(ShardedTensorTestBase):
torch.nn.init.kaiming_uniform_(st, a=a, mode=mode, nonlinearity=nonlinearity)
torch.manual_seed(seed)
- torch.nn.init.kaiming_uniform_(local_tensor_clone, a=a, mode=mode, nonlinearity=nonlinearity)
+ torch.nn.init.kaiming_uniform_(
+ local_tensor_clone, a=a, mode=mode, nonlinearity=nonlinearity
+ )
self.assertEqual(local_tensor_clone, st.local_shards()[0].tensor)
-if __name__ == '__main__':
+
+if __name__ == "__main__":
run_tests()
diff --git a/test/distributed/_shard/sharded_tensor/ops/test_tensor_ops.py b/test/distributed/_shard/sharded_tensor/ops/test_tensor_ops.py
index 977fa701b4..ca49f52d08 100644
--- a/test/distributed/_shard/sharded_tensor/ops/test_tensor_ops.py
+++ b/test/distributed/_shard/sharded_tensor/ops/test_tensor_ops.py
@@ -5,22 +5,15 @@ import copy
import torch
import torch.distributed._shard.sharded_tensor as sharded_tensor
-from torch.distributed._shard.sharding_spec import (
- ChunkShardingSpec,
-)
-from torch.testing._internal.common_distributed import (
- requires_nccl,
- skip_if_lt_x_gpu,
-)
+from torch.distributed._shard.sharding_spec import ChunkShardingSpec
+from torch.testing._internal.common_distributed import requires_nccl, skip_if_lt_x_gpu
+from torch.testing._internal.common_utils import run_tests
from torch.testing._internal.distributed._shard.sharded_tensor import (
- TEST_GPU_NUM,
ShardedTensorTestBase,
+ TEST_GPU_NUM,
with_comms,
)
-from torch.testing._internal.common_utils import (
- run_tests,
-)
class TestTensorOps(ShardedTensorTestBase):
diff --git a/test/distributed/_shard/sharded_tensor/test_logger.py b/test/distributed/_shard/sharded_tensor/test_logger.py
index d1560261ad..fa946819f9 100644
--- a/test/distributed/_shard/sharded_tensor/test_logger.py
+++ b/test/distributed/_shard/sharded_tensor/test_logger.py
@@ -3,10 +3,7 @@
import logging
from torch.distributed._shard.sharded_tensor.logger import _get_or_create_logger
-from torch.testing._internal.common_utils import (
- TestCase,
- run_tests,
-)
+from torch.testing._internal.common_utils import run_tests, TestCase
class ShardingSpecLoggerTest(TestCase):
diff --git a/test/distributed/_shard/sharded_tensor/test_sharded_tensor.py b/test/distributed/_shard/sharded_tensor/test_sharded_tensor.py
index da894062ac..141a52ddd8 100644
--- a/test/distributed/_shard/sharded_tensor/test_sharded_tensor.py
+++ b/test/distributed/_shard/sharded_tensor/test_sharded_tensor.py
@@ -70,6 +70,7 @@ if TEST_WITH_DEV_DBG_ASAN:
)
sys.exit(0)
+
class TestShardedTensorMetadata(TestCase):
def test_serialize_and_deserialize(self):
shard_metadatas = [
@@ -92,34 +93,59 @@ class TestShardedTensorMetadata(TestCase):
shard_offsets=[5, 5],
shard_sizes=[5, 5],
placement="rank:3/cuda:3",
- )
+ ),
]
dtypes = [
- torch.float, torch.double, torch.cfloat, torch.cdouble, torch.half,
- torch.bfloat16, torch.uint8, torch.int8, torch.short, torch.int,
- torch.long, torch.bool]
+ torch.float,
+ torch.double,
+ torch.cfloat,
+ torch.cdouble,
+ torch.half,
+ torch.bfloat16,
+ torch.uint8,
+ torch.int8,
+ torch.short,
+ torch.int,
+ torch.long,
+ torch.bool,
+ ]
layouts = [torch.strided, torch.sparse_coo]
requires_grads = [True, False]
- memory_formats = [torch.contiguous_format, torch.channels_last, torch.preserve_format]
+ memory_formats = [
+ torch.contiguous_format,
+ torch.channels_last,
+ torch.preserve_format,
+ ]
pin_memories = [True, False]
- for tensor_properties_input in itertools.product(dtypes, layouts, requires_grads, memory_formats, pin_memories):
- dtype, layout, requires_grad, memory_format, pin_memory = tensor_properties_input
+ for tensor_properties_input in itertools.product(
+ dtypes, layouts, requires_grads, memory_formats, pin_memories
+ ):
+ (
+ dtype,
+ layout,
+ requires_grad,
+ memory_format,
+ pin_memory,
+ ) = tensor_properties_input
expected_st_metadata = sharded_tensor.ShardedTensorMetadata(
shard_metadatas,
(10, 10),
- TensorProperties(dtype, layout, requires_grad, memory_format, pin_memory)
+ TensorProperties(
+ dtype, layout, requires_grad, memory_format, pin_memory
+ ),
)
pickled_obj = pickle.dumps(expected_st_metadata)
st_metadata = pickle.loads(pickled_obj)
self.assertEqual(expected_st_metadata, st_metadata)
+
class TestCreateTensorFromParams(TestCase):
- @skip_but_pass_in_sandcastle_if(not TEST_CUDA, 'CUDA GPU is needed')
+ @skip_but_pass_in_sandcastle_if(not TEST_CUDA, "CUDA GPU is needed")
def test_empty(self):
expected_dtype = torch.double
tensor_properties = TensorProperties(
@@ -127,10 +153,12 @@ class TestCreateTensorFromParams(TestCase):
layout=torch.strided,
requires_grad=False,
pin_memory=False,
- memory_format=torch.contiguous_format)
- local_device = torch.device('cuda:0')
+ memory_format=torch.contiguous_format,
+ )
+ local_device = torch.device("cuda:0")
local_tensor = _create_tensor_from_params(
- 5, 10, local_device=local_device, tensor_properties=tensor_properties)
+ 5, 10, local_device=local_device, tensor_properties=tensor_properties
+ )
self.assertEqual(local_device, local_tensor.device)
self.assertEqual(expected_dtype, local_tensor.dtype)
self.assertEqual(torch.strided, local_tensor.layout)
@@ -154,7 +182,7 @@ class TestShardParameter(ShardedTensorTestBase):
fc = torch.nn.Linear(12, 12).cuda(self.rank)
weight_og = fc.weight.clone()
- shard_parameter(fc, 'weight', spec)
+ shard_parameter(fc, "weight", spec)
# Verify.
self.assertTrue(isinstance(fc.weight, ShardedTensor))
@@ -163,7 +191,9 @@ class TestShardParameter(ShardedTensorTestBase):
self.assertEqual(torch.Size([3, 12]), local_shards[0].tensor.size())
self.assertEqual(3, local_shards[0].tensor.size(0))
self.assertEqual(12, local_shards[0].tensor.size(1))
- self.assertEqual(torch.narrow(weight_og, 0, 3 * self.rank, 3), local_shards[0].tensor)
+ self.assertEqual(
+ torch.narrow(weight_og, 0, 3 * self.rank, 3), local_shards[0].tensor
+ )
@with_comms(init_rpc=False)
@skip_if_lt_x_gpu(4)
@@ -180,20 +210,22 @@ class TestShardParameter(ShardedTensorTestBase):
)
fc = torch.nn.Linear(12, 12).cuda(self.rank)
- with self.assertRaisesRegex(ValueError, 'does not match with src_rank'):
- shard_parameter(fc, 'weight', spec, src_rank=self.rank)
+ with self.assertRaisesRegex(ValueError, "does not match with src_rank"):
+ shard_parameter(fc, "weight", spec, src_rank=self.rank)
- with self.assertRaisesRegex(AttributeError, 'has no attribute'):
- shard_parameter(fc, 'foo', spec)
+ with self.assertRaisesRegex(AttributeError, "has no attribute"):
+ shard_parameter(fc, "foo", spec)
- with self.assertRaisesRegex(ValueError, 'Expected Linear.bias to be a Tensor, but found str'):
+ with self.assertRaisesRegex(
+ ValueError, "Expected Linear.bias to be a Tensor, but found str"
+ ):
del fc.bias
fc.bias = "foo"
- shard_parameter(fc, 'bias', spec)
+ shard_parameter(fc, "bias", spec)
- with self.assertRaisesRegex(ValueError, 'not a contiguous Tensor'):
+ with self.assertRaisesRegex(ValueError, "not a contiguous Tensor"):
fc.bias = torch.rand(10, 10).cuda(self.rank).t()
- shard_parameter(fc, 'bias', spec)
+ shard_parameter(fc, "bias", spec)
spec = ChunkShardingSpec(
dim=0,
@@ -204,23 +236,25 @@ class TestShardParameter(ShardedTensorTestBase):
"rank:3/cuda:3",
],
)
- with self.assertRaisesRegex(ValueError, 'does not match with sharding_spec'):
- shard_parameter(fc, 'weight', spec)
+ with self.assertRaisesRegex(ValueError, "does not match with sharding_spec"):
+ shard_parameter(fc, "weight", spec)
- spec = EnumerableShardingSpec([
- ShardMetadata(
- shard_offsets=[0, 0],
- shard_sizes=[5, 5],
- placement="rank:0/cuda:0",
- ),
- ShardMetadata(
- shard_offsets=[5, 0],
- shard_sizes=[5, 5],
- placement="rank:1/cuda:1",
- ),
- ])
- with self.assertRaisesRegex(NotImplementedError, 'not implemented yet!'):
- shard_parameter(fc, 'weight', spec)
+ spec = EnumerableShardingSpec(
+ [
+ ShardMetadata(
+ shard_offsets=[0, 0],
+ shard_sizes=[5, 5],
+ placement="rank:0/cuda:0",
+ ),
+ ShardMetadata(
+ shard_offsets=[5, 0],
+ shard_sizes=[5, 5],
+ placement="rank:1/cuda:1",
+ ),
+ ]
+ )
+ with self.assertRaisesRegex(NotImplementedError, "not implemented yet!"):
+ shard_parameter(fc, "weight", spec)
class TestShardTensor(ShardedTensorTestBase):
@@ -293,10 +327,10 @@ class TestShardTensor(ShardedTensorTestBase):
)
tensor = torch.rand(12, 12).cuda(self.rank)
- with self.assertRaisesRegex(ValueError, 'does not match with src_rank'):
+ with self.assertRaisesRegex(ValueError, "does not match with src_rank"):
_shard_tensor(tensor, spec, src_rank=self.rank)
- with self.assertRaisesRegex(ValueError, 'not a contiguous Tensor'):
+ with self.assertRaisesRegex(ValueError, "not a contiguous Tensor"):
tensor_t = torch.rand(12, 12).cuda(self.rank).t()
_shard_tensor(tensor_t, spec)
@@ -309,24 +343,24 @@ class TestShardTensor(ShardedTensorTestBase):
"rank:3/cuda:3",
],
)
- with self.assertRaisesRegex(ValueError, 'does not match with sharding_spec'):
+ with self.assertRaisesRegex(ValueError, "does not match with sharding_spec"):
_shard_tensor(tensor, spec)
- spec = EnumerableShardingSpec([
- ShardMetadata(
- shard_offsets=[0, 0],
- shard_sizes=[5, 5],
- placement="rank:0/cuda:0",
- ),
- ShardMetadata(
- shard_offsets=[5, 0],
- shard_sizes=[5, 5],
- placement="rank:1/cuda:1",
- ),
- ])
- with self.assertRaisesRegex(
- NotImplementedError, 'not implemented yet!'
- ):
+ spec = EnumerableShardingSpec(
+ [
+ ShardMetadata(
+ shard_offsets=[0, 0],
+ shard_sizes=[5, 5],
+ placement="rank:0/cuda:0",
+ ),
+ ShardMetadata(
+ shard_offsets=[5, 0],
+ shard_sizes=[5, 5],
+ placement="rank:1/cuda:1",
+ ),
+ ]
+ )
+ with self.assertRaisesRegex(NotImplementedError, "not implemented yet!"):
_shard_tensor(tensor, spec)
@@ -425,7 +459,6 @@ class TestLocalTensor(ShardedTensorTestBase):
class TestShardedTensorChunked(ShardedTensorTestBase):
-
@with_comms
@skip_if_lt_x_gpu(4)
@requires_nccl()
@@ -479,7 +512,6 @@ class TestShardedTensorChunked(ShardedTensorTestBase):
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_complete_world_size(self):
-
for dim in [0, -2]:
spec = ChunkShardingSpec(
dim=dim,
@@ -513,7 +545,9 @@ class TestShardedTensorChunked(ShardedTensorTestBase):
self.assertEqual([1, 20], shard_metadata.shard_sizes)
else:
self.assertEqual([3, 20], shard_metadata.shard_sizes)
- self.assertEqual(f'rank:{rank}/cuda:{rank}', str(shard_metadata.placement))
+ self.assertEqual(
+ f"rank:{rank}/cuda:{rank}", str(shard_metadata.placement)
+ )
# Validate remote shards.
remote_shards = st.remote_shards()
@@ -524,18 +558,20 @@ class TestShardedTensorChunked(ShardedTensorTestBase):
for remote_shard in shards:
self.assertEqual(rpc_rank, remote_shard.owner().id)
shard = remote_shard.to_here()
- self.assertEqual(f'rank:{rpc_rank}/cuda:{rpc_rank}', str(shard.metadata.placement))
+ self.assertEqual(
+ f"rank:{rpc_rank}/cuda:{rpc_rank}",
+ str(shard.metadata.placement),
+ )
if rpc_rank == 3:
self.assertEqual((1, 20), shard.tensor.size())
else:
self.assertEqual((3, 20), shard.tensor.size())
-
@with_comms
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_create_sharded_tensor_with_ones(self):
- """ Test sharded_tensor.ones(...) """
+ """Test sharded_tensor.ones(...)"""
spec = ChunkShardingSpec(
dim=0,
@@ -563,7 +599,7 @@ class TestShardedTensorChunked(ShardedTensorTestBase):
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_gather_even(self) -> None:
- """ Test _sharded_tensor.gather(...) with evenly distributed._shards"""
+ """Test _sharded_tensor.gather(...) with evenly distributed._shards"""
spec = ChunkShardingSpec(
dim=0,
@@ -596,7 +632,7 @@ class TestShardedTensorChunked(ShardedTensorTestBase):
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_gather_uneven(self) -> None:
- """ Test _sharded_tensor.gather(...) with unevenly distributed._shards"""
+ """Test _sharded_tensor.gather(...) with unevenly distributed._shards"""
spec = ChunkShardingSpec(
dim=0,
@@ -630,7 +666,7 @@ class TestShardedTensorChunked(ShardedTensorTestBase):
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_create_sharded_tensor_with_zeros(self):
- """ Test sharded_tensor.zeros(...) """
+ """Test sharded_tensor.zeros(...)"""
spec = ChunkShardingSpec(
dim=0,
@@ -654,12 +690,11 @@ class TestShardedTensorChunked(ShardedTensorTestBase):
self.assertEqual((expected_h, w), local_shard.size())
self.assertEqual(local_shard, torch.zeros(expected_h, w))
-
@with_comms
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_create_sharded_tensor_with_rand(self):
- """ Test sharded_tensor.rand(...)/randn(...) """
+ """Test sharded_tensor.rand(...)/randn(...)"""
spec = ChunkShardingSpec(
dim=0,
@@ -710,7 +745,7 @@ class TestShardedTensorChunked(ShardedTensorTestBase):
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_create_sharded_tensor_with_full(self):
- """ Test sharded_tensor.full(...) """
+ """Test sharded_tensor.full(...)"""
spec = ChunkShardingSpec(
dim=0,
@@ -723,7 +758,9 @@ class TestShardedTensorChunked(ShardedTensorTestBase):
)
h, w = 10, 20
fill_value = 1234
- st = sharded_tensor.full(spec, size=(h, w), fill_value=fill_value, dtype=torch.int32)
+ st = sharded_tensor.full(
+ spec, size=(h, w), fill_value=fill_value, dtype=torch.int32
+ )
# Validate local shard is initialized with torch.full
local_shards = st.local_shards()
@@ -733,14 +770,16 @@ class TestShardedTensorChunked(ShardedTensorTestBase):
# The split: for rank!=3 ceil(h/4)=3 for rank=3 1
expected_h = 1 if self.rank == 3 else math.ceil(h / 4)
self.assertEqual((expected_h, w), local_shard.size())
- self.assertEqual(local_shard,
- torch.full(size=(expected_h, w), fill_value=fill_value, dtype=torch.int32))
+ self.assertEqual(
+ local_shard,
+ torch.full(size=(expected_h, w), fill_value=fill_value, dtype=torch.int32),
+ )
@with_comms
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_create_sharded_tensor_like(self):
- """ Test tensor like methods, i.e. torch.zeros_like(...), torch.full_like, etc. """
+ """Test tensor like methods, i.e. torch.zeros_like(...), torch.full_like, etc."""
spec = ChunkShardingSpec(
dim=0,
@@ -763,22 +802,28 @@ class TestShardedTensorChunked(ShardedTensorTestBase):
torch.rand_like: torch.rand,
torch.randn_like: torch.randn,
torch.empty_like: torch.empty,
- torch.full_like: torch.full
+ torch.full_like: torch.full,
}
for op, expect_local_op in tensor_like_ops.items():
if op == torch.full_like:
# special handle full/full_like as it needs to have additional fill_value arg
- expect_tensor = expect_local_op((expected_h, w), 8.8, device=expected_device, dtype=dtype)
+ expect_tensor = expect_local_op(
+ (expected_h, w), 8.8, device=expected_device, dtype=dtype
+ )
new_op_st = op(st, 8.8, dtype=dtype)
self.assertEqual(new_op_st.local_tensor(), expect_tensor)
elif op == torch.empty_like:
# empty/empty_like we only compare the shape
- expect_tensor = expect_local_op(expected_h, w, device=expected_device, dtype=dtype)
+ expect_tensor = expect_local_op(
+ expected_h, w, device=expected_device, dtype=dtype
+ )
new_op_st = op(st, dtype=dtype)
self.assertEqual(new_op_st.local_tensor().shape, expect_tensor.shape)
else:
torch.manual_seed(seed)
- expect_tensor = expect_local_op(expected_h, w, device=expected_device, dtype=dtype)
+ expect_tensor = expect_local_op(
+ expected_h, w, device=expected_device, dtype=dtype
+ )
torch.manual_seed(seed)
new_op_st = op(st, dtype=dtype)
self.assertEqual(new_op_st.local_tensor(), expect_tensor)
@@ -787,7 +832,6 @@ class TestShardedTensorChunked(ShardedTensorTestBase):
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_partial_world_size(self):
-
spec = ChunkShardingSpec(
dim=0,
placements=[
@@ -815,7 +859,10 @@ class TestShardedTensorChunked(ShardedTensorTestBase):
for shard_rank, shard_metadata in enumerate(shards_metadata):
self.assertEqual([shard_rank * 5, 0], shard_metadata.shard_offsets)
self.assertEqual([5, 20], shard_metadata.shard_sizes)
- self.assertEqual(f'rank:{shard_rank + 2}/cuda:{shard_rank + 2}', str(shard_metadata.placement))
+ self.assertEqual(
+ f"rank:{shard_rank + 2}/cuda:{shard_rank + 2}",
+ str(shard_metadata.placement),
+ )
# Validate remote shards.
remote_shards = st.remote_shards()
@@ -829,14 +876,15 @@ class TestShardedTensorChunked(ShardedTensorTestBase):
for remote_shard in shards:
self.assertEqual(rpc_rank, remote_shard.owner().id)
shard = remote_shard.to_here()
- self.assertEqual(f'rank:{rpc_rank}/cuda:{rpc_rank}', str(shard.metadata.placement))
+ self.assertEqual(
+ f"rank:{rpc_rank}/cuda:{rpc_rank}", str(shard.metadata.placement)
+ )
self.assertEqual((5, 20), shard.tensor.size())
@with_comms
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_new_group(self):
-
spec = ChunkShardingSpec(
dim=0,
placements=[
@@ -866,7 +914,10 @@ class TestShardedTensorChunked(ShardedTensorTestBase):
for shard_rank, shard_metadata in enumerate(shards_metadata):
self.assertEqual([shard_rank * 5, 0], shard_metadata.shard_offsets)
self.assertEqual([5, 20], shard_metadata.shard_sizes)
- self.assertEqual(f'rank:{shard_rank + 2}/cuda:{shard_rank + 2}', str(shard_metadata.placement))
+ self.assertEqual(
+ f"rank:{shard_rank + 2}/cuda:{shard_rank + 2}",
+ str(shard_metadata.placement),
+ )
# Validate remote shards.
remote_shards = st.remote_shards()
@@ -880,7 +931,9 @@ class TestShardedTensorChunked(ShardedTensorTestBase):
for remote_shard in shards:
shard = remote_shard.to_here()
self.assertEqual(rpc_rank, remote_shard.owner().id)
- self.assertEqual(f'rank:{rpc_rank}/cuda:{rpc_rank}', str(shard.metadata.placement))
+ self.assertEqual(
+ f"rank:{rpc_rank}/cuda:{rpc_rank}", str(shard.metadata.placement)
+ )
self.assertEqual((5, 20), shard.tensor.size())
@with_comms
@@ -906,7 +959,9 @@ class TestShardedTensorChunked(ShardedTensorTestBase):
local_shards = st.local_shards()
self.assertEqual(2, len(local_shards))
for local_shard in local_shards:
- self.assertEqual(torch.device(f"cuda:{self.rank}"), local_shard.tensor.device)
+ self.assertEqual(
+ torch.device(f"cuda:{self.rank}"), local_shard.tensor.device
+ )
self.assertEqual((2, 20), local_shard.tensor.size())
# Validate global metadata.
@@ -917,7 +972,10 @@ class TestShardedTensorChunked(ShardedTensorTestBase):
for shard_idx, shard_metadata in enumerate(shards_metadata):
self.assertEqual([shard_idx * 2, 0], shard_metadata.shard_offsets)
self.assertEqual([2, 20], shard_metadata.shard_sizes)
- self.assertEqual(f'rank:{shard_idx % 4}/cuda:{shard_idx % 4}', str(shard_metadata.placement))
+ self.assertEqual(
+ f"rank:{shard_idx % 4}/cuda:{shard_idx % 4}",
+ str(shard_metadata.placement),
+ )
# Validate remote shards.
remote_shards = st.remote_shards()
@@ -930,7 +988,6 @@ class TestShardedTensorChunked(ShardedTensorTestBase):
self.assertEqual((2, 20), shard.tensor.size())
self.assertEqual(rpc_rank, remote_shard.owner().id)
-
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_sharding_columns(self):
@@ -964,58 +1021,74 @@ class TestShardedTensorChunked(ShardedTensorTestBase):
for rank, shard_metadata in enumerate(shards_metadata):
self.assertEqual([0, rank * 8], shard_metadata.shard_offsets)
self.assertEqual([10, 8], shard_metadata.shard_sizes)
- self.assertEqual(f'rank:{rank}/cuda:{rank}', str(shard_metadata.placement))
+ self.assertEqual(
+ f"rank:{rank}/cuda:{rank}", str(shard_metadata.placement)
+ )
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_invalid_sharding(self):
self.init_pg()
- with self.assertRaisesRegex(NotImplementedError, 'does not support named dimension'):
- spec = ChunkShardingSpec(dim='H', placements=["rank:1/cuda:1"])
+ with self.assertRaisesRegex(
+ NotImplementedError, "does not support named dimension"
+ ):
+ spec = ChunkShardingSpec(dim="H", placements=["rank:1/cuda:1"])
sharded_tensor.empty(spec, 10, 20)
for dim in [2, 3, 4, -3, -4, -5]:
spec = ChunkShardingSpec(dim=dim, placements=["rank:1/cuda:1"])
- with self.assertRaisesRegex(ValueError, 'Invalid sharding dim'):
+ with self.assertRaisesRegex(ValueError, "Invalid sharding dim"):
sharded_tensor.empty(spec, 10, 20)
spec = ChunkShardingSpec(dim=0, placements=["rank:5/cuda:1"])
with self.assertRaisesRegex(
- ValueError,
- "Global rank 5 does not exist in input process group"
+ ValueError, "Global rank 5 does not exist in input process group"
):
sharded_tensor.empty(spec, 10, 20)
spec = ChunkShardingSpec(dim=0, placements=["rank:0/cuda:1"])
st = sharded_tensor.empty(spec, 10, 20)
tensor = torch.empty(10, 20)
- with self.assertRaisesRegex(RuntimeError, r".*not supported for ShardedTensor!$"):
+ with self.assertRaisesRegex(
+ RuntimeError, r".*not supported for ShardedTensor!$"
+ ):
torch.add(st, tensor)
spec = ChunkShardingSpec(dim=0, placements=["rank:0/cuda:1"])
- with self.assertRaisesRegex(ValueError, 'Only torch.strided layout is currently supported'):
+ with self.assertRaisesRegex(
+ ValueError, "Only torch.strided layout is currently supported"
+ ):
sharded_tensor.empty(spec, 10, 20, layout=torch.sparse_coo)
spec = ChunkShardingSpec(dim=0, placements=["rank:0/cuda:1"])
- with self.assertRaisesRegex(ValueError, 'Only torch.contiguous_format memory_format is currently supported'):
+ with self.assertRaisesRegex(
+ ValueError,
+ "Only torch.contiguous_format memory_format is currently supported",
+ ):
sharded_tensor.empty(spec, 10, 20, memory_format=torch.channels_last)
spec = ChunkShardingSpec(dim=0, placements=["worker0/cuda:1"])
- with self.assertRaisesRegex(RuntimeError, 'RPC framework needs to be initialized'):
+ with self.assertRaisesRegex(
+ RuntimeError, "RPC framework needs to be initialized"
+ ):
sharded_tensor.empty(spec, 10, 20)
spec = ChunkShardingSpec(dim=0, placements=["rank:0/cuda:1"])
- with self.assertRaisesRegex(RuntimeError, 'RPC Framework needs to be initialized'):
+ with self.assertRaisesRegex(
+ RuntimeError, "RPC Framework needs to be initialized"
+ ):
st = sharded_tensor.empty(spec, 10, 20, init_rrefs=True)
- with self.assertRaisesRegex(RuntimeError, 'ShardedTensor created with init_rrefs=False'):
+ with self.assertRaisesRegex(
+ RuntimeError, "ShardedTensor created with init_rrefs=False"
+ ):
st = sharded_tensor.empty(spec, 10, 20)
st.remote_shards()
self.init_rpc()
spec = ChunkShardingSpec(dim=0, placements=["workerfoo/cuda:1"])
- with self.assertRaisesRegex(ValueError, 'Invalid worker name'):
+ with self.assertRaisesRegex(ValueError, "Invalid worker name"):
sharded_tensor.empty(spec, 10, 20, init_rrefs=True)
@skip_if_lt_x_gpu(4)
@@ -1024,18 +1097,22 @@ class TestShardedTensorChunked(ShardedTensorTestBase):
self.init_pg()
# Init RPC with different ranks.
- rpc_backend_options = rpc.TensorPipeRpcBackendOptions(_transports=tp_transports())
+ rpc_backend_options = rpc.TensorPipeRpcBackendOptions(
+ _transports=tp_transports()
+ )
rpc_backend_options.init_method = f"file://{self.file_name}"
rank = (self.rank + 1) % self.world_size
rpc.init_rpc(
- name=f'worker{rank}',
+ name=f"worker{rank}",
rank=rank,
world_size=self.world_size,
rpc_backend_options=rpc_backend_options,
)
spec = ChunkShardingSpec(dim=0, placements=["rank:1/cuda:1"])
- with self.assertRaisesRegex(ValueError, 'Default ProcessGroup and RPC ranks must be the same'):
+ with self.assertRaisesRegex(
+ ValueError, "Default ProcessGroup and RPC ranks must be the same"
+ ):
sharded_tensor.empty(spec, 10, 20, init_rrefs=True)
@skip_if_lt_x_gpu(4)
@@ -1074,7 +1151,9 @@ class TestShardedTensorChunked(ShardedTensorTestBase):
for shard_rank, shard_metadata in enumerate(shards_metadata):
self.assertEqual([shard_rank, 0], shard_metadata.shard_offsets)
- self.assertEqual(f'rank:{shard_rank}/cuda:{shard_rank}', str(shard_metadata.placement))
+ self.assertEqual(
+ f"rank:{shard_rank}/cuda:{shard_rank}", str(shard_metadata.placement)
+ )
if shard_rank <= 1:
self.assertEqual([1, 20], shard_metadata.shard_sizes)
else:
@@ -1127,13 +1206,13 @@ class TestShardedTensorChunked(ShardedTensorTestBase):
self.assertEqual(st.ndim, 2)
# Test with invalid input
st = sharded_tensor.empty(spec, (10, 20), init_rrefs=True)
- with self.assertRaisesRegex(IndexError, 'Dimension out of range'):
+ with self.assertRaisesRegex(IndexError, "Dimension out of range"):
st.size(-3)
- with self.assertRaisesRegex(IndexError, 'Dimension out of range'):
+ with self.assertRaisesRegex(IndexError, "Dimension out of range"):
st.size(2)
with self.assertRaises(TypeError):
- st = sharded_tensor.empty(spec, 'foo')
+ st = sharded_tensor.empty(spec, "foo")
@with_comms
@skip_if_lt_x_gpu(4)
@@ -1174,7 +1253,11 @@ class TestShardedTensorChunked(ShardedTensorTestBase):
self.assertTrue("submodule.sharded_tensor2" in loaded_dict_keys)
# Verify after load.
self.assertTrue(torch.equal(m.sharded_tensor1, module_load.sharded_tensor1))
- self.assertTrue(torch.equal(m.submodule.sharded_tensor2, module_load.submodule.sharded_tensor2))
+ self.assertTrue(
+ torch.equal(
+ m.submodule.sharded_tensor2, module_load.submodule.sharded_tensor2
+ )
+ )
@with_comms
@skip_if_lt_x_gpu(4)
@@ -1210,7 +1293,11 @@ class TestShardedTensorChunked(ShardedTensorTestBase):
# Verify after load.
self.assertTrue(torch.equal(m.sharded_tensor1, module_load.sharded_tensor1))
- self.assertTrue(torch.equal(m.submodule.sharded_tensor2, module_load.submodule.sharded_tensor2))
+ self.assertTrue(
+ torch.equal(
+ m.submodule.sharded_tensor2, module_load.submodule.sharded_tensor2
+ )
+ )
@with_comms
@skip_if_lt_x_gpu(4)
@@ -1271,17 +1358,21 @@ class TestShardedTensorChunked(ShardedTensorTestBase):
buffer.seek(0)
if self.rank != 0:
- with self.assertRaisesRegex(RuntimeError, 'Local rank at save time was'):
+ with self.assertRaisesRegex(RuntimeError, "Local rank at save time was"):
with load_with_process_group(pg):
state_dict_deser = torch.load(buffer)
else:
- with self.assertRaisesRegex(RuntimeError, 'Local world size at save time was'):
+ with self.assertRaisesRegex(
+ RuntimeError, "Local world size at save time was"
+ ):
with load_with_process_group(pg):
state_dict_deser = torch.load(buffer)
dist.destroy_process_group()
buffer.seek(0)
- with self.assertRaisesRegex(RuntimeError, 'Need to initialize default process group'):
+ with self.assertRaisesRegex(
+ RuntimeError, "Need to initialize default process group"
+ ):
state_dict_deser = torch.load(buffer)
rpc.shutdown()
@@ -1289,7 +1380,6 @@ class TestShardedTensorChunked(ShardedTensorTestBase):
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_cleanup(self):
-
def create_tensors():
spec = ChunkShardingSpec(
dim=0,
@@ -1308,33 +1398,34 @@ class TestShardedTensorChunked(ShardedTensorTestBase):
class TestShardedTensorEnumerable(ShardedTensorTestBase):
-
@with_comms
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_sharded_tensor_metadata(self):
- spec = EnumerableShardingSpec([
- ShardMetadata(
- shard_offsets=[0, 0],
- shard_sizes=[5, 5],
- placement="rank:0/cuda:0",
- ),
- ShardMetadata(
- shard_offsets=[0, 5],
- shard_sizes=[5, 5],
- placement="rank:1/cuda:1",
- ),
- ShardMetadata(
- shard_offsets=[5, 0],
- shard_sizes=[5, 5],
- placement="rank:2/cuda:2",
- ),
- ShardMetadata(
- shard_offsets=[5, 5],
- shard_sizes=[5, 5],
- placement="rank:3/cuda:3",
- )
- ])
+ spec = EnumerableShardingSpec(
+ [
+ ShardMetadata(
+ shard_offsets=[0, 0],
+ shard_sizes=[5, 5],
+ placement="rank:0/cuda:0",
+ ),
+ ShardMetadata(
+ shard_offsets=[0, 5],
+ shard_sizes=[5, 5],
+ placement="rank:1/cuda:1",
+ ),
+ ShardMetadata(
+ shard_offsets=[5, 0],
+ shard_sizes=[5, 5],
+ placement="rank:2/cuda:2",
+ ),
+ ShardMetadata(
+ shard_offsets=[5, 5],
+ shard_sizes=[5, 5],
+ placement="rank:3/cuda:3",
+ ),
+ ]
+ )
st = sharded_tensor.empty(spec, 10, 10, init_rrefs=True)
st_metadata = st.metadata()
@@ -1352,28 +1443,30 @@ class TestShardedTensorEnumerable(ShardedTensorTestBase):
self.assertEqual(torch.double, st.dtype)
# Need CPU for pin_memory
- spec = EnumerableShardingSpec([
- ShardMetadata(
- shard_offsets=[0, 0],
- shard_sizes=[5, 5],
- placement="rank:0/cpu",
- ),
- ShardMetadata(
- shard_offsets=[0, 5],
- shard_sizes=[5, 5],
- placement="rank:1/cpu",
- ),
- ShardMetadata(
- shard_offsets=[5, 0],
- shard_sizes=[5, 5],
- placement="rank:2/cpu",
- ),
- ShardMetadata(
- shard_offsets=[5, 5],
- shard_sizes=[5, 5],
- placement="rank:3/cpu",
- )
- ])
+ spec = EnumerableShardingSpec(
+ [
+ ShardMetadata(
+ shard_offsets=[0, 0],
+ shard_sizes=[5, 5],
+ placement="rank:0/cpu",
+ ),
+ ShardMetadata(
+ shard_offsets=[0, 5],
+ shard_sizes=[5, 5],
+ placement="rank:1/cpu",
+ ),
+ ShardMetadata(
+ shard_offsets=[5, 0],
+ shard_sizes=[5, 5],
+ placement="rank:2/cpu",
+ ),
+ ShardMetadata(
+ shard_offsets=[5, 5],
+ shard_sizes=[5, 5],
+ placement="rank:3/cpu",
+ ),
+ ]
+ )
st = sharded_tensor.empty(spec, 10, 10, pin_memory=True, init_rrefs=True)
self.assertTrue(st.is_pinned())
@@ -1382,29 +1475,30 @@ class TestShardedTensorEnumerable(ShardedTensorTestBase):
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_grid_sharding(self):
-
- spec = EnumerableShardingSpec([
- ShardMetadata(
- shard_offsets=[0, 0],
- shard_sizes=[5, 5],
- placement="rank:0/cuda:0",
- ),
- ShardMetadata(
- shard_offsets=[0, 5],
- shard_sizes=[5, 5],
- placement="rank:1/cuda:1",
- ),
- ShardMetadata(
- shard_offsets=[5, 0],
- shard_sizes=[5, 5],
- placement="rank:2/cuda:2",
- ),
- ShardMetadata(
- shard_offsets=[5, 5],
- shard_sizes=[5, 5],
- placement="rank:3/cuda:3",
- )
- ])
+ spec = EnumerableShardingSpec(
+ [
+ ShardMetadata(
+ shard_offsets=[0, 0],
+ shard_sizes=[5, 5],
+ placement="rank:0/cuda:0",
+ ),
+ ShardMetadata(
+ shard_offsets=[0, 5],
+ shard_sizes=[5, 5],
+ placement="rank:1/cuda:1",
+ ),
+ ShardMetadata(
+ shard_offsets=[5, 0],
+ shard_sizes=[5, 5],
+ placement="rank:2/cuda:2",
+ ),
+ ShardMetadata(
+ shard_offsets=[5, 5],
+ shard_sizes=[5, 5],
+ placement="rank:3/cuda:3",
+ ),
+ ]
+ )
st = sharded_tensor.empty(spec, 10, 10, init_rrefs=True)
self.assertEqual((10, 10), st.size())
@@ -1412,22 +1506,29 @@ class TestShardedTensorEnumerable(ShardedTensorTestBase):
# Verify local shard.
local_shard = st.local_shards()[0]
- self.assertEqual(torch.device(f'cuda:{self.rank}'), local_shard.tensor.device)
+ self.assertEqual(torch.device(f"cuda:{self.rank}"), local_shard.tensor.device)
self.assertEqual((5, 5), local_shard.tensor.size())
# Verify local shard metadata.
- self.assertEqual((self.rank // 2 * 5, (self.rank % 2) * 5), local_shard.metadata.shard_offsets)
+ self.assertEqual(
+ (self.rank // 2 * 5, (self.rank % 2) * 5),
+ local_shard.metadata.shard_offsets,
+ )
self.assertEqual((5, 5), local_shard.metadata.shard_sizes)
- self.assertEqual(f'rank:{self.rank}/cuda:{self.rank}', str(local_shard.metadata.placement))
+ self.assertEqual(
+ f"rank:{self.rank}/cuda:{self.rank}", str(local_shard.metadata.placement)
+ )
# Verify global metadata.
st_metadata = st.metadata()
shards_metadata = st_metadata.shards_metadata
self.assertEqual(4, len(shards_metadata))
for rank, shard_metadata in enumerate(shards_metadata):
- self.assertEqual((rank // 2 * 5, (rank % 2) * 5), shard_metadata.shard_offsets)
+ self.assertEqual(
+ (rank // 2 * 5, (rank % 2) * 5), shard_metadata.shard_offsets
+ )
self.assertEqual((5, 5), shard_metadata.shard_sizes)
- self.assertEqual(f'rank:{rank}/cuda:{rank}', str(shard_metadata.placement))
+ self.assertEqual(f"rank:{rank}/cuda:{rank}", str(shard_metadata.placement))
# Validate remote shards.
remote_shards = st.remote_shards()
@@ -1444,30 +1545,32 @@ class TestShardedTensorEnumerable(ShardedTensorTestBase):
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_create_sharded_tensor_with_ones(self):
- """ Test sharded_tensor.ones(...) """
+ """Test sharded_tensor.ones(...)"""
- spec = EnumerableShardingSpec([
- ShardMetadata(
- shard_offsets=[0, 0],
- shard_sizes=[5, 5],
- placement="rank:0/cuda:0",
- ),
- ShardMetadata(
- shard_offsets=[0, 5],
- shard_sizes=[5, 5],
- placement="rank:1/cuda:1",
- ),
- ShardMetadata(
- shard_offsets=[5, 0],
- shard_sizes=[5, 5],
- placement="rank:2/cuda:2",
- ),
- ShardMetadata(
- shard_offsets=[5, 5],
- shard_sizes=[5, 5],
- placement="rank:3/cuda:3",
- )
- ])
+ spec = EnumerableShardingSpec(
+ [
+ ShardMetadata(
+ shard_offsets=[0, 0],
+ shard_sizes=[5, 5],
+ placement="rank:0/cuda:0",
+ ),
+ ShardMetadata(
+ shard_offsets=[0, 5],
+ shard_sizes=[5, 5],
+ placement="rank:1/cuda:1",
+ ),
+ ShardMetadata(
+ shard_offsets=[5, 0],
+ shard_sizes=[5, 5],
+ placement="rank:2/cuda:2",
+ ),
+ ShardMetadata(
+ shard_offsets=[5, 5],
+ shard_sizes=[5, 5],
+ placement="rank:3/cuda:3",
+ ),
+ ]
+ )
st = sharded_tensor.ones(spec, 10, 10, init_rrefs=True)
self.assertEqual((10, 10), st.size())
@@ -1475,7 +1578,7 @@ class TestShardedTensorEnumerable(ShardedTensorTestBase):
# Verify local shard is initialized with torch.ones
local_shard = st.local_shards()[0]
- self.assertEqual(torch.device(f'cuda:{self.rank}'), local_shard.tensor.device)
+ self.assertEqual(torch.device(f"cuda:{self.rank}"), local_shard.tensor.device)
self.assertEqual((5, 5), local_shard.tensor.size())
self.assertEqual(local_shard.tensor, torch.ones(5, 5))
@@ -1483,30 +1586,32 @@ class TestShardedTensorEnumerable(ShardedTensorTestBase):
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_gather_even(self) -> None:
- """ Test _sharded_tensor.gather(...) with evenly distributed._shards"""
+ """Test _sharded_tensor.gather(...) with evenly distributed._shards"""
- spec = EnumerableShardingSpec([
- ShardMetadata(
- shard_offsets=[0, 0],
- shard_sizes=[5, 5],
- placement="rank:0/cuda:0",
- ),
- ShardMetadata(
- shard_offsets=[0, 5],
- shard_sizes=[5, 5],
- placement="rank:1/cuda:1",
- ),
- ShardMetadata(
- shard_offsets=[5, 0],
- shard_sizes=[5, 5],
- placement="rank:2/cuda:2",
- ),
- ShardMetadata(
- shard_offsets=[5, 5],
- shard_sizes=[5, 5],
- placement="rank:3/cuda:3",
- )
- ])
+ spec = EnumerableShardingSpec(
+ [
+ ShardMetadata(
+ shard_offsets=[0, 0],
+ shard_sizes=[5, 5],
+ placement="rank:0/cuda:0",
+ ),
+ ShardMetadata(
+ shard_offsets=[0, 5],
+ shard_sizes=[5, 5],
+ placement="rank:1/cuda:1",
+ ),
+ ShardMetadata(
+ shard_offsets=[5, 0],
+ shard_sizes=[5, 5],
+ placement="rank:2/cuda:2",
+ ),
+ ShardMetadata(
+ shard_offsets=[5, 5],
+ shard_sizes=[5, 5],
+ placement="rank:3/cuda:3",
+ ),
+ ]
+ )
h, w = 10, 10
st = sharded_tensor.ones(spec, h, w, init_rrefs=True)
@@ -1514,11 +1619,7 @@ class TestShardedTensorEnumerable(ShardedTensorTestBase):
full_tensor = None
dst = 0
if self.rank == dst:
- full_tensor = torch.zeros(
- h,
- w,
- device=torch.device(f"cuda:{dst}")
- )
+ full_tensor = torch.zeros(h, w, device=torch.device(f"cuda:{dst}"))
st.gather(dst, full_tensor)
if self.rank == dst:
@@ -1530,30 +1631,32 @@ class TestShardedTensorEnumerable(ShardedTensorTestBase):
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_gather_uneven(self) -> None:
- """ Test _sharded_tensor.gather(...) with unevenly distributed._shards"""
+ """Test _sharded_tensor.gather(...) with unevenly distributed._shards"""
- spec = EnumerableShardingSpec([
- ShardMetadata(
- shard_offsets=[0, 0],
- shard_sizes=[5, 5],
- placement="rank:0/cuda:0",
- ),
- ShardMetadata(
- shard_offsets=[0, 5],
- shard_sizes=[5, 5],
- placement="rank:1/cuda:1",
- ),
- ShardMetadata(
- shard_offsets=[5, 0],
- shard_sizes=[5, 5],
- placement="rank:0/cuda:0",
- ),
- ShardMetadata(
- shard_offsets=[5, 5],
- shard_sizes=[5, 5],
- placement="rank:3/cuda:3",
- )
- ])
+ spec = EnumerableShardingSpec(
+ [
+ ShardMetadata(
+ shard_offsets=[0, 0],
+ shard_sizes=[5, 5],
+ placement="rank:0/cuda:0",
+ ),
+ ShardMetadata(
+ shard_offsets=[0, 5],
+ shard_sizes=[5, 5],
+ placement="rank:1/cuda:1",
+ ),
+ ShardMetadata(
+ shard_offsets=[5, 0],
+ shard_sizes=[5, 5],
+ placement="rank:0/cuda:0",
+ ),
+ ShardMetadata(
+ shard_offsets=[5, 5],
+ shard_sizes=[5, 5],
+ placement="rank:3/cuda:3",
+ ),
+ ]
+ )
h, w = 10, 10
st = sharded_tensor.ones(spec, h, w, init_rrefs=True)
@@ -1561,11 +1664,7 @@ class TestShardedTensorEnumerable(ShardedTensorTestBase):
full_tensor = None
dst = 0
if self.rank == dst:
- full_tensor = torch.zeros(
- h,
- w,
- device=torch.device(f"cuda:{dst}")
- )
+ full_tensor = torch.zeros(h, w, device=torch.device(f"cuda:{dst}"))
st.gather(dst, full_tensor)
if self.rank == dst:
@@ -1616,7 +1715,9 @@ class TestShardedTensorEnumerable(ShardedTensorTestBase):
self.assertIsInstance(new_st._process_group, distributed_c10d.ProcessGroup)
# test specs before and after the move almost the same except placement device
self.assertEqual(spec_before_move.dim, spec_after_move.dim)
- self.assertEqual(len(spec_before_move.placements), len(spec_after_move.placements))
+ self.assertEqual(
+ len(spec_before_move.placements), len(spec_after_move.placements)
+ )
for i, remote_device_after in enumerate(spec_after_move.placements):
remote_device_before = spec_before_move.placements[i]
self.assertEqual(remote_device_before.rank(), remote_device_after.rank())
@@ -1700,7 +1801,9 @@ class TestShardedTensorEnumerable(ShardedTensorTestBase):
self.assertIsInstance(spec_after_move, ChunkShardingSpec)
# test specs before and after the move almost the same except placement device
self.assertEqual(spec_before_move.dim, spec_after_move.dim)
- self.assertEqual(len(spec_before_move.placements), len(spec_after_move.placements))
+ self.assertEqual(
+ len(spec_before_move.placements), len(spec_after_move.placements)
+ )
for i, remote_device_after in enumerate(spec_after_move.placements):
remote_device_before = spec_before_move.placements[i]
self.assertEqual(remote_device_before.rank(), remote_device_after.rank())
@@ -1815,28 +1918,30 @@ class TestShardedTensorEnumerable(ShardedTensorTestBase):
def test_uneven_shards(self):
self.init_pg()
- spec = EnumerableShardingSpec([
- ShardMetadata(
- shard_offsets=[0, 0],
- shard_sizes=[2, 4],
- placement="rank:0/cuda:0",
- ),
- ShardMetadata(
- shard_offsets=[0, 4],
- shard_sizes=[4, 2],
- placement="rank:1/cuda:1",
- ),
- ShardMetadata(
- shard_offsets=[2, 0],
- shard_sizes=[4, 4],
- placement="rank:2/cuda:2",
- ),
- ShardMetadata(
- shard_offsets=[4, 4],
- shard_sizes=[2, 2],
- placement="rank:3/cuda:3",
- ),
- ])
+ spec = EnumerableShardingSpec(
+ [
+ ShardMetadata(
+ shard_offsets=[0, 0],
+ shard_sizes=[2, 4],
+ placement="rank:0/cuda:0",
+ ),
+ ShardMetadata(
+ shard_offsets=[0, 4],
+ shard_sizes=[4, 2],
+ placement="rank:1/cuda:1",
+ ),
+ ShardMetadata(
+ shard_offsets=[2, 0],
+ shard_sizes=[4, 4],
+ placement="rank:2/cuda:2",
+ ),
+ ShardMetadata(
+ shard_offsets=[4, 4],
+ shard_sizes=[2, 2],
+ placement="rank:3/cuda:3",
+ ),
+ ]
+ )
st = sharded_tensor.empty(spec, 6, 6)
self.assertEqual((6, 6), st.size())
@@ -1864,13 +1969,15 @@ class TestShardedTensorEnumerable(ShardedTensorTestBase):
# Verify local shard.
local_shard = st.local_shards()[0]
- self.assertEqual(torch.device(f'cuda:{self.rank}'), local_shard.tensor.device)
+ self.assertEqual(torch.device(f"cuda:{self.rank}"), local_shard.tensor.device)
verify_size(self.rank, local_shard.tensor.size())
# Verify local shard metadata.
verify_offsets(self.rank, local_shard.metadata.shard_offsets)
verify_size(self.rank, local_shard.metadata.shard_sizes)
- self.assertEqual(f'rank:{self.rank}/cuda:{self.rank}', str(local_shard.metadata.placement))
+ self.assertEqual(
+ f"rank:{self.rank}/cuda:{self.rank}", str(local_shard.metadata.placement)
+ )
# Verify global metadata.
st_metadata = st.metadata()
@@ -1879,24 +1986,26 @@ class TestShardedTensorEnumerable(ShardedTensorTestBase):
for rank, shard_metadata in enumerate(shards_metadata):
verify_offsets(rank, shard_metadata.shard_offsets)
verify_size(rank, shard_metadata.shard_sizes)
- self.assertEqual(f'rank:{rank}/cuda:{rank}', str(shard_metadata.placement))
+ self.assertEqual(f"rank:{rank}/cuda:{rank}", str(shard_metadata.placement))
@with_comms
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_partial_world_size(self):
- spec = EnumerableShardingSpec([
- ShardMetadata(
- shard_offsets=[0, 0],
- shard_sizes=[5, 5],
- placement="rank:0/cuda:0",
- ),
- ShardMetadata(
- shard_offsets=[5, 0],
- shard_sizes=[5, 5],
- placement="rank:1/cuda:1",
- ),
- ])
+ spec = EnumerableShardingSpec(
+ [
+ ShardMetadata(
+ shard_offsets=[0, 0],
+ shard_sizes=[5, 5],
+ placement="rank:0/cuda:0",
+ ),
+ ShardMetadata(
+ shard_offsets=[5, 0],
+ shard_sizes=[5, 5],
+ placement="rank:1/cuda:1",
+ ),
+ ]
+ )
st = sharded_tensor.empty(spec, 10, 5, init_rrefs=True)
self.assertEqual((10, 5), st.size())
@@ -1908,13 +2017,18 @@ class TestShardedTensorEnumerable(ShardedTensorTestBase):
if self.rank <= 1:
# Verify local shard.
local_shard = st.local_shards()[0]
- self.assertEqual(torch.device(f'cuda:{self.rank}'), local_shard.tensor.device)
+ self.assertEqual(
+ torch.device(f"cuda:{self.rank}"), local_shard.tensor.device
+ )
self.assertEqual((5, 5), local_shard.tensor.size())
# Verify local shard metadata.
self.assertEqual((self.rank * 5, 0), local_shard.metadata.shard_offsets)
self.assertEqual((5, 5), local_shard.metadata.shard_sizes)
- self.assertEqual(f'rank:{self.rank}/cuda:{self.rank}', str(local_shard.metadata.placement))
+ self.assertEqual(
+ f"rank:{self.rank}/cuda:{self.rank}",
+ str(local_shard.metadata.placement),
+ )
# Verify global metadata.
st_metadata = st.metadata()
@@ -1923,7 +2037,7 @@ class TestShardedTensorEnumerable(ShardedTensorTestBase):
for rank, shard_metadata in enumerate(shards_metadata):
self.assertEqual((rank * 5, 0), shard_metadata.shard_offsets)
self.assertEqual((5, 5), shard_metadata.shard_sizes)
- self.assertEqual(f'rank:{rank}/cuda:{rank}', str(shard_metadata.placement))
+ self.assertEqual(f"rank:{rank}/cuda:{rank}", str(shard_metadata.placement))
# Validate remote shards.
remote_shards = st.remote_shards()
@@ -1944,18 +2058,20 @@ class TestShardedTensorEnumerable(ShardedTensorTestBase):
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_new_group(self):
- spec = EnumerableShardingSpec([
- ShardMetadata(
- shard_offsets=[0, 0],
- shard_sizes=[5, 5],
- placement="rank:1/cuda:1",
- ),
- ShardMetadata(
- shard_offsets=[5, 0],
- shard_sizes=[5, 5],
- placement="rank:3/cuda:3",
- ),
- ])
+ spec = EnumerableShardingSpec(
+ [
+ ShardMetadata(
+ shard_offsets=[0, 0],
+ shard_sizes=[5, 5],
+ placement="rank:1/cuda:1",
+ ),
+ ShardMetadata(
+ shard_offsets=[5, 0],
+ shard_sizes=[5, 5],
+ placement="rank:3/cuda:3",
+ ),
+ ]
+ )
pg = dist.new_group(ranks=[1, 2, 3])
@@ -1964,13 +2080,20 @@ class TestShardedTensorEnumerable(ShardedTensorTestBase):
if self.rank == 1 or self.rank == 3:
# Verify local shard.
local_shard = st.local_shards()[0]
- self.assertEqual(torch.device(f'cuda:{self.rank}'), local_shard.tensor.device)
+ self.assertEqual(
+ torch.device(f"cuda:{self.rank}"), local_shard.tensor.device
+ )
self.assertEqual((5, 5), local_shard.tensor.size())
# Verify local shard metadata.
- self.assertEqual((self.rank // 2 * 5, 0), local_shard.metadata.shard_offsets)
+ self.assertEqual(
+ (self.rank // 2 * 5, 0), local_shard.metadata.shard_offsets
+ )
self.assertEqual((5, 5), local_shard.metadata.shard_sizes)
- self.assertEqual(f'rank:{self.rank}/cuda:{self.rank}', str(local_shard.metadata.placement))
+ self.assertEqual(
+ f"rank:{self.rank}/cuda:{self.rank}",
+ str(local_shard.metadata.placement),
+ )
# Verify global metadata.
st_metadata = st.metadata()
@@ -1979,7 +2102,10 @@ class TestShardedTensorEnumerable(ShardedTensorTestBase):
for rank, shard_metadata in enumerate(shards_metadata):
self.assertEqual((rank * 5, 0), shard_metadata.shard_offsets)
self.assertEqual((5, 5), shard_metadata.shard_sizes)
- self.assertEqual(f'rank:{rank * 2 + 1}/cuda:{rank * 2 + 1}', str(shard_metadata.placement))
+ self.assertEqual(
+ f"rank:{rank * 2 + 1}/cuda:{rank * 2 + 1}",
+ str(shard_metadata.placement),
+ )
# Validate remote shards.
remote_shards = st.remote_shards()
@@ -2000,28 +2126,30 @@ class TestShardedTensorEnumerable(ShardedTensorTestBase):
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_multiple_local_shards(self):
- spec = EnumerableShardingSpec([
- ShardMetadata(
- shard_offsets=[0, 0],
- shard_sizes=[5, 5],
- placement="rank:0/cuda:0",
- ),
- ShardMetadata(
- shard_offsets=[0, 5],
- shard_sizes=[5, 5],
- placement="rank:1/cuda:1",
- ),
- ShardMetadata(
- shard_offsets=[5, 0],
- shard_sizes=[5, 5],
- placement="rank:0/cuda:0",
- ),
- ShardMetadata(
- shard_offsets=[5, 5],
- shard_sizes=[5, 5],
- placement="rank:1/cuda:1",
- )
- ])
+ spec = EnumerableShardingSpec(
+ [
+ ShardMetadata(
+ shard_offsets=[0, 0],
+ shard_sizes=[5, 5],
+ placement="rank:0/cuda:0",
+ ),
+ ShardMetadata(
+ shard_offsets=[0, 5],
+ shard_sizes=[5, 5],
+ placement="rank:1/cuda:1",
+ ),
+ ShardMetadata(
+ shard_offsets=[5, 0],
+ shard_sizes=[5, 5],
+ placement="rank:0/cuda:0",
+ ),
+ ShardMetadata(
+ shard_offsets=[5, 5],
+ shard_sizes=[5, 5],
+ placement="rank:1/cuda:1",
+ ),
+ ]
+ )
st = sharded_tensor.empty(spec, 10, 10, init_rrefs=True)
self.assertEqual((10, 10), st.size())
@@ -2031,13 +2159,20 @@ class TestShardedTensorEnumerable(ShardedTensorTestBase):
# Verify local shards.
for idx, local_shard in enumerate(st.local_shards()):
- self.assertEqual(torch.device(f'cuda:{self.rank}'), local_shard.tensor.device)
+ self.assertEqual(
+ torch.device(f"cuda:{self.rank}"), local_shard.tensor.device
+ )
self.assertEqual((5, 5), local_shard.tensor.size())
# Verify local shard metadata.
- self.assertEqual((idx * 5, self.rank * 5), local_shard.metadata.shard_offsets)
+ self.assertEqual(
+ (idx * 5, self.rank * 5), local_shard.metadata.shard_offsets
+ )
self.assertEqual((5, 5), local_shard.metadata.shard_sizes)
- self.assertEqual(f'rank:{self.rank}/cuda:{self.rank}', str(local_shard.metadata.placement))
+ self.assertEqual(
+ f"rank:{self.rank}/cuda:{self.rank}",
+ str(local_shard.metadata.placement),
+ )
else:
self.assertEqual(0, len(st.local_shards()))
@@ -2046,9 +2181,15 @@ class TestShardedTensorEnumerable(ShardedTensorTestBase):
shards_metadata = st_metadata.shards_metadata
self.assertEqual(4, len(shards_metadata))
for shard_rank, shard_metadata in enumerate(shards_metadata):
- self.assertEqual((shard_rank // 2 * 5, (shard_rank % 2) * 5), shard_metadata.shard_offsets)
+ self.assertEqual(
+ (shard_rank // 2 * 5, (shard_rank % 2) * 5),
+ shard_metadata.shard_offsets,
+ )
self.assertEqual((5, 5), shard_metadata.shard_sizes)
- self.assertEqual(f'rank:{shard_rank % 2}/cuda:{shard_rank % 2}', str(shard_metadata.placement))
+ self.assertEqual(
+ f"rank:{shard_rank % 2}/cuda:{shard_rank % 2}",
+ str(shard_metadata.placement),
+ )
# Validate remote shards.
remote_shards = st.remote_shards()
@@ -2069,28 +2210,30 @@ class TestShardedTensorEnumerable(ShardedTensorTestBase):
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_with_rpc_names(self):
- spec = EnumerableShardingSpec([
- ShardMetadata(
- shard_offsets=[0, 0],
- shard_sizes=[5, 5],
- placement="worker0/cuda:0",
- ),
- ShardMetadata(
- shard_offsets=[0, 5],
- shard_sizes=[5, 5],
- placement="worker1/cuda:1",
- ),
- ShardMetadata(
- shard_offsets=[5, 0],
- shard_sizes=[5, 5],
- placement="worker2/cuda:2",
- ),
- ShardMetadata(
- shard_offsets=[5, 5],
- shard_sizes=[5, 5],
- placement="worker3/cuda:3",
- )
- ])
+ spec = EnumerableShardingSpec(
+ [
+ ShardMetadata(
+ shard_offsets=[0, 0],
+ shard_sizes=[5, 5],
+ placement="worker0/cuda:0",
+ ),
+ ShardMetadata(
+ shard_offsets=[0, 5],
+ shard_sizes=[5, 5],
+ placement="worker1/cuda:1",
+ ),
+ ShardMetadata(
+ shard_offsets=[5, 0],
+ shard_sizes=[5, 5],
+ placement="worker2/cuda:2",
+ ),
+ ShardMetadata(
+ shard_offsets=[5, 5],
+ shard_sizes=[5, 5],
+ placement="worker3/cuda:3",
+ ),
+ ]
+ )
st = sharded_tensor.empty(spec, 10, 10, init_rrefs=True)
self.assertEqual((10, 10), st.size())
@@ -2098,22 +2241,29 @@ class TestShardedTensorEnumerable(ShardedTensorTestBase):
# Verify local shard.
local_shard = st.local_shards()[0]
- self.assertEqual(torch.device(f'cuda:{self.rank}'), local_shard.tensor.device)
+ self.assertEqual(torch.device(f"cuda:{self.rank}"), local_shard.tensor.device)
self.assertEqual((5, 5), local_shard.tensor.size())
# Verify local shard metadata.
- self.assertEqual((self.rank // 2 * 5, (self.rank % 2) * 5), local_shard.metadata.shard_offsets)
+ self.assertEqual(
+ (self.rank // 2 * 5, (self.rank % 2) * 5),
+ local_shard.metadata.shard_offsets,
+ )
self.assertEqual((5, 5), local_shard.metadata.shard_sizes)
- self.assertEqual(f'worker{self.rank}/cuda:{self.rank}', str(local_shard.metadata.placement))
+ self.assertEqual(
+ f"worker{self.rank}/cuda:{self.rank}", str(local_shard.metadata.placement)
+ )
# Verify global metadata.
st_metadata = st.metadata()
shards_metadata = st_metadata.shards_metadata
self.assertEqual(4, len(shards_metadata))
for rank, shard_metadata in enumerate(shards_metadata):
- self.assertEqual((rank // 2 * 5, (rank % 2) * 5), shard_metadata.shard_offsets)
+ self.assertEqual(
+ (rank // 2 * 5, (rank % 2) * 5), shard_metadata.shard_offsets
+ )
self.assertEqual((5, 5), shard_metadata.shard_sizes)
- self.assertEqual(f'worker{rank}/cuda:{rank}', str(shard_metadata.placement))
+ self.assertEqual(f"worker{rank}/cuda:{rank}", str(shard_metadata.placement))
# Validate remote shards.
remote_shards = st.remote_shards()
@@ -2136,7 +2286,9 @@ class TestShardedTensorFromLocalTensor(ShardedTensorTestBase):
local_shard_metadata = None
rank_to_metadata = {}
for shard_metadata in tensor_meta.shards_metadata:
- rank, device = _parse_and_validate_remote_device(pg, shard_metadata.placement)
+ rank, device = _parse_and_validate_remote_device(
+ pg, shard_metadata.placement
+ )
rank_to_metadata[rank] = shard_metadata
if rank == self.rank:
local_tensor = torch.rand(shard_metadata.shard_sizes).cuda(device)
@@ -2218,9 +2370,7 @@ class TestShardedTensorFromLocalTensor(ShardedTensorTestBase):
)
st_size = [24, 12]
local_tensor = torch.rand(*st_size).cuda(self.rank)
- with self.assertRaisesRegex(
- ValueError, "do not cover the entire tensor"
- ):
+ with self.assertRaisesRegex(ValueError, "do not cover the entire tensor"):
ShardedTensor._init_from_local_tensor(
local_tensor,
enumerable_sharding_spec,
@@ -2238,7 +2388,6 @@ class TestShardedTensorFromLocalTensor(ShardedTensorTestBase):
class TestShardedTensorFromLocalShards(ShardedTensorTestBase):
-
@with_comms(init_rpc=False)
@skip_if_lt_x_gpu(4)
@requires_nccl()
@@ -2247,24 +2396,22 @@ class TestShardedTensorFromLocalShards(ShardedTensorTestBase):
local_shard_metadata = ShardMetadata(
shard_offsets=shard_offsets,
shard_sizes=[5, 5],
- placement=f"rank:{self.rank}/cuda:{self.rank}"
+ placement=f"rank:{self.rank}/cuda:{self.rank}",
)
local_tensor = torch.randn(5, 5, device=f"cuda:{self.rank}")
local_shard = sharded_tensor.Shard(local_tensor, local_shard_metadata)
local_shard_from_offsets = sharded_tensor.Shard.from_tensor_and_offsets(
- local_tensor,
- shard_offsets=shard_offsets,
- rank=self.rank
+ local_tensor, shard_offsets=shard_offsets, rank=self.rank
)
self.assertEqual(local_shard.metadata, local_shard_from_offsets.metadata)
wrong_local_shard_metadata = ShardMetadata(
shard_offsets=shard_offsets,
shard_sizes=[6, 5],
- placement=f"rank:{self.rank}/cuda:{self.rank}"
+ placement=f"rank:{self.rank}/cuda:{self.rank}",
)
- with self.assertRaisesRegex(ValueError, 'Shard tensor size does not match'):
+ with self.assertRaisesRegex(ValueError, "Shard tensor size does not match"):
local_shard_from_wrong_meta = sharded_tensor.Shard(
local_tensor,
metadata=wrong_local_shard_metadata,
@@ -2277,32 +2424,45 @@ class TestShardedTensorFromLocalShards(ShardedTensorTestBase):
local_shard_metadata = ShardMetadata(
shard_offsets=[(self.rank // 2) * 5, (self.rank % 2) * 5],
shard_sizes=[5, 5],
- placement=f"rank:{self.rank}/cuda:{self.rank}"
+ placement=f"rank:{self.rank}/cuda:{self.rank}",
)
- local_shards = [sharded_tensor.Shard(torch.randn(5, 5, device=f"cuda:{self.rank}"), local_shard_metadata)]
+ local_shards = [
+ sharded_tensor.Shard(
+ torch.randn(5, 5, device=f"cuda:{self.rank}"), local_shard_metadata
+ )
+ ]
- st = sharded_tensor.init_from_local_shards(local_shards, [10, 10], init_rrefs=True)
+ st = sharded_tensor.init_from_local_shards(
+ local_shards, [10, 10], init_rrefs=True
+ )
self.assertEqual((10, 10), st.size())
self.assertEqual(1, len(st.local_shards()))
# Verify local shard.
local_shard = st.local_shards()[0]
- self.assertEqual(torch.device(f'cuda:{self.rank}'), local_shard.tensor.device)
+ self.assertEqual(torch.device(f"cuda:{self.rank}"), local_shard.tensor.device)
self.assertEqual((5, 5), local_shard.tensor.size())
# Verify local shard metadata.
- self.assertEqual((self.rank // 2 * 5, (self.rank % 2) * 5), local_shard.metadata.shard_offsets)
+ self.assertEqual(
+ (self.rank // 2 * 5, (self.rank % 2) * 5),
+ local_shard.metadata.shard_offsets,
+ )
self.assertEqual((5, 5), local_shard.metadata.shard_sizes)
- self.assertEqual(f'rank:{self.rank}/cuda:{self.rank}', str(local_shard.metadata.placement))
+ self.assertEqual(
+ f"rank:{self.rank}/cuda:{self.rank}", str(local_shard.metadata.placement)
+ )
# Verify global metadata.
shards_metadata = st.metadata().shards_metadata
self.assertEqual(4, len(shards_metadata))
for rank, shard_metadata in enumerate(shards_metadata):
- self.assertEqual((rank // 2 * 5, (rank % 2) * 5), shard_metadata.shard_offsets)
+ self.assertEqual(
+ (rank // 2 * 5, (rank % 2) * 5), shard_metadata.shard_offsets
+ )
self.assertEqual((5, 5), shard_metadata.shard_sizes)
- self.assertEqual(f'rank:{rank}/cuda:{rank}', str(shard_metadata.placement))
+ self.assertEqual(f"rank:{rank}/cuda:{rank}", str(shard_metadata.placement))
# Validate remote shards.
remote_shards = st.remote_shards()
@@ -2382,7 +2542,7 @@ class TestShardedTensorFromLocalShards(ShardedTensorTestBase):
local_shard_metadata = ShardMetadata(
shard_offsets=[(self.rank // 2) * 5, (self.rank % 2) * 5],
shard_sizes=[5, 5],
- placement=f"rank:{self.rank}/cuda:{self.rank}"
+ placement=f"rank:{self.rank}/cuda:{self.rank}",
)
shards_metadata = []
@@ -2390,13 +2550,19 @@ class TestShardedTensorFromLocalShards(ShardedTensorTestBase):
if r == self.rank:
shards_metadata.append(local_shard_metadata)
else:
- shards_metadata.append(ShardMetadata(
- shard_offsets=[(r // 2) * 5, (r % 2) * 5],
- shard_sizes=[5, 5],
- placement=f"rank:{r}/cuda:{r}"
- ))
+ shards_metadata.append(
+ ShardMetadata(
+ shard_offsets=[(r // 2) * 5, (r % 2) * 5],
+ shard_sizes=[5, 5],
+ placement=f"rank:{r}/cuda:{r}",
+ )
+ )
- local_shards = [sharded_tensor.Shard(torch.randn(5, 5, device=f"cuda:{self.rank}"), local_shard_metadata)]
+ local_shards = [
+ sharded_tensor.Shard(
+ torch.randn(5, 5, device=f"cuda:{self.rank}"), local_shard_metadata
+ )
+ ]
tensor_properties = TensorProperties(
dtype=torch.get_default_dtype(),
@@ -2422,21 +2588,28 @@ class TestShardedTensorFromLocalShards(ShardedTensorTestBase):
# Verify local shard.
local_shard = st.local_shards()[0]
- self.assertEqual(torch.device(f'cuda:{self.rank}'), local_shard.tensor.device)
+ self.assertEqual(torch.device(f"cuda:{self.rank}"), local_shard.tensor.device)
self.assertEqual((5, 5), local_shard.tensor.size())
# Verify local shard metadata.
- self.assertEqual((self.rank // 2 * 5, (self.rank % 2) * 5), local_shard.metadata.shard_offsets)
+ self.assertEqual(
+ (self.rank // 2 * 5, (self.rank % 2) * 5),
+ local_shard.metadata.shard_offsets,
+ )
self.assertEqual((5, 5), local_shard.metadata.shard_sizes)
- self.assertEqual(f'rank:{self.rank}/cuda:{self.rank}', str(local_shard.metadata.placement))
+ self.assertEqual(
+ f"rank:{self.rank}/cuda:{self.rank}", str(local_shard.metadata.placement)
+ )
# Verify global metadata.
shards_metadata = st.metadata().shards_metadata
self.assertEqual(4, len(shards_metadata))
for rank, shard_metadata in enumerate(shards_metadata):
- self.assertEqual((rank // 2 * 5, (rank % 2) * 5), shard_metadata.shard_offsets)
+ self.assertEqual(
+ (rank // 2 * 5, (rank % 2) * 5), shard_metadata.shard_offsets
+ )
self.assertEqual((5, 5), shard_metadata.shard_sizes)
- self.assertEqual(f'rank:{rank}/cuda:{rank}', str(shard_metadata.placement))
+ self.assertEqual(f"rank:{rank}/cuda:{rank}", str(shard_metadata.placement))
# Validate remote shards.
remote_shards = st.remote_shards()
@@ -2459,21 +2632,34 @@ class TestShardedTensorFromLocalShards(ShardedTensorTestBase):
local_shard_metadata = ShardMetadata(
shard_offsets=[5 * (self.rank - 1), 0],
shard_sizes=[5, 5],
- placement=f"rank:{self.rank}/cuda:{self.rank}"
+ placement=f"rank:{self.rank}/cuda:{self.rank}",
)
- local_shards = [sharded_tensor.Shard(torch.randn(5, 5, device=f"cuda:{self.rank}"), local_shard_metadata)]
+ local_shards = [
+ sharded_tensor.Shard(
+ torch.randn(5, 5, device=f"cuda:{self.rank}"), local_shard_metadata
+ )
+ ]
- st = sharded_tensor.init_from_local_shards(local_shards, [15, 5], process_group=new_pg)
+ st = sharded_tensor.init_from_local_shards(
+ local_shards, [15, 5], process_group=new_pg
+ )
# Verify local shard.
local_shard = st.local_shards()[0]
- self.assertEqual(torch.device(f'cuda:{self.rank}'), local_shard.tensor.device)
+ self.assertEqual(
+ torch.device(f"cuda:{self.rank}"), local_shard.tensor.device
+ )
self.assertEqual((5, 5), local_shard.tensor.size())
# Verify local shard metadata.
- self.assertEqual(((self.rank - 1) * 5, 0), local_shard.metadata.shard_offsets)
+ self.assertEqual(
+ ((self.rank - 1) * 5, 0), local_shard.metadata.shard_offsets
+ )
self.assertEqual((5, 5), local_shard.metadata.shard_sizes)
- self.assertEqual(f'rank:{self.rank}/cuda:{self.rank}', str(local_shard.metadata.placement))
+ self.assertEqual(
+ f"rank:{self.rank}/cuda:{self.rank}",
+ str(local_shard.metadata.placement),
+ )
# Verify global metadata.
st_metadata = st.metadata()
@@ -2482,8 +2668,9 @@ class TestShardedTensorFromLocalShards(ShardedTensorTestBase):
for rank, shard_metadata in enumerate(shards_metadata):
self.assertEqual((rank * 5, 0), shard_metadata.shard_offsets)
self.assertEqual((5, 5), shard_metadata.shard_sizes)
- self.assertEqual(f'rank:{rank + 1}/cuda:{rank + 1}', str(shard_metadata.placement))
-
+ self.assertEqual(
+ f"rank:{rank + 1}/cuda:{rank + 1}", str(shard_metadata.placement)
+ )
@with_comms
@skip_if_lt_x_gpu(4)
@@ -2492,36 +2679,57 @@ class TestShardedTensorFromLocalShards(ShardedTensorTestBase):
local_shard_metadata = ShardMetadata(
shard_offsets=[(self.rank // 2) * 5, (self.rank % 2) * 5],
shard_sizes=[5, 5],
- placement=f"rank:{self.rank}/cuda:{self.rank}"
+ placement=f"rank:{self.rank}/cuda:{self.rank}",
)
indices = [[0, 1, 1], [2, 0, 2]]
values = [3.2, 4.5, 5.8]
- sparse_tensor = torch.sparse_coo_tensor(indices, values, (5, 5), device=f"cuda:{self.rank}")
+ sparse_tensor = torch.sparse_coo_tensor(
+ indices, values, (5, 5), device=f"cuda:{self.rank}"
+ )
empty_local_shards = []
- with self.assertRaisesRegex(ValueError, 'have no local shards on all ranks'):
- st = sharded_tensor.init_from_local_shards(empty_local_shards, [10, 10], init_rrefs=True)
+ with self.assertRaisesRegex(ValueError, "have no local shards on all ranks"):
+ st = sharded_tensor.init_from_local_shards(
+ empty_local_shards, [10, 10], init_rrefs=True
+ )
wrong_layout_shards = [
sharded_tensor.Shard(sparse_tensor, local_shard_metadata)
]
- with self.assertRaisesRegex(ValueError, 'Only torch.strided layout is currently supported'):
+ with self.assertRaisesRegex(
+ ValueError, "Only torch.strided layout is currently supported"
+ ):
st = sharded_tensor.init_from_local_shards(
- wrong_layout_shards, [10, 10], init_rrefs=True)
+ wrong_layout_shards, [10, 10], init_rrefs=True
+ )
wrong_memory_format_shards = [
- sharded_tensor.Shard(torch.randn(5, 5, device=f"cuda:{self.rank}").t(), local_shard_metadata)
+ sharded_tensor.Shard(
+ torch.randn(5, 5, device=f"cuda:{self.rank}").t(), local_shard_metadata
+ )
]
- with self.assertRaisesRegex(ValueError, 'Only torch.contiguous_format memory_format is currently supported'):
+ with self.assertRaisesRegex(
+ ValueError,
+ "Only torch.contiguous_format memory_format is currently supported",
+ ):
st = sharded_tensor.init_from_local_shards(
- wrong_memory_format_shards, [10, 10], init_rrefs=True)
+ wrong_memory_format_shards, [10, 10], init_rrefs=True
+ )
- with self.assertRaisesRegex(ValueError, 'Shard tensor size does not match'):
- wrong_size_shards = [sharded_tensor.Shard(torch.randn(2, 3, device=f"cuda:{self.rank}"), local_shard_metadata)]
+ with self.assertRaisesRegex(ValueError, "Shard tensor size does not match"):
+ wrong_size_shards = [
+ sharded_tensor.Shard(
+ torch.randn(2, 3, device=f"cuda:{self.rank}"), local_shard_metadata
+ )
+ ]
- with self.assertRaisesRegex(ValueError, "Local shard tensor device does not match"):
- wrong_device_shards = [sharded_tensor.Shard(torch.randn(5, 5), local_shard_metadata)]
+ with self.assertRaisesRegex(
+ ValueError, "Local shard tensor device does not match"
+ ):
+ wrong_device_shards = [
+ sharded_tensor.Shard(torch.randn(5, 5), local_shard_metadata)
+ ]
@with_comms
@skip_if_lt_x_gpu(4)
@@ -2530,37 +2738,58 @@ class TestShardedTensorFromLocalShards(ShardedTensorTestBase):
local_shard_metadata = ShardMetadata(
shard_offsets=[(self.rank // 2) * 5, (self.rank % 2) * 5],
shard_sizes=[5, 5],
- placement=f"rank:{self.rank}/cuda:{self.rank}"
+ placement=f"rank:{self.rank}/cuda:{self.rank}",
)
tensor_overall_size = [10, 10] if self.rank == 0 else [10, 5]
wrong_dtype_shards = [
- sharded_tensor.Shard(torch.ones(5, 5, device=f"cuda:{self.rank}"), local_shard_metadata)
+ sharded_tensor.Shard(
+ torch.ones(5, 5, device=f"cuda:{self.rank}"), local_shard_metadata
+ )
]
- with self.assertRaisesRegex(ValueError, "ShardedTensor global_size property does not match from different ranks!"):
- st = sharded_tensor.init_from_local_shards(wrong_dtype_shards, tensor_overall_size, init_rrefs=True)
+ with self.assertRaisesRegex(
+ ValueError,
+ "ShardedTensor global_size property does not match from different ranks!",
+ ):
+ st = sharded_tensor.init_from_local_shards(
+ wrong_dtype_shards, tensor_overall_size, init_rrefs=True
+ )
tensor_dtype = torch.int if self.rank == 0 else torch.float32
wrong_dtype_shards = [
- sharded_tensor.Shard(torch.ones(5, 5, device=f"cuda:{self.rank}", dtype=tensor_dtype), local_shard_metadata)
+ sharded_tensor.Shard(
+ torch.ones(5, 5, device=f"cuda:{self.rank}", dtype=tensor_dtype),
+ local_shard_metadata,
+ )
]
- with self.assertRaisesRegex(ValueError, "ShardedTensor dtype property does not match from different ranks!"):
- st = sharded_tensor.init_from_local_shards(wrong_dtype_shards, [10, 10], init_rrefs=True)
+ with self.assertRaisesRegex(
+ ValueError,
+ "ShardedTensor dtype property does not match from different ranks!",
+ ):
+ st = sharded_tensor.init_from_local_shards(
+ wrong_dtype_shards, [10, 10], init_rrefs=True
+ )
tensor_requires_grad = True if self.rank == 0 else False
wrong_requires_grad_shards = [
sharded_tensor.Shard(
- torch.randn(5, 5, device=f"cuda:{self.rank}", requires_grad=tensor_requires_grad),
- local_shard_metadata
+ torch.randn(
+ 5, 5, device=f"cuda:{self.rank}", requires_grad=tensor_requires_grad
+ ),
+ local_shard_metadata,
)
]
- with self.assertRaisesRegex(ValueError, 'ShardedTensor requires_grad property does not match from different ranks!'):
+ with self.assertRaisesRegex(
+ ValueError,
+ "ShardedTensor requires_grad property does not match from different ranks!",
+ ):
st = sharded_tensor.init_from_local_shards(
- wrong_requires_grad_shards, [10, 10], init_rrefs=True)
+ wrong_requires_grad_shards, [10, 10], init_rrefs=True
+ )
local_shard_metadata = ShardMetadata(
shard_offsets=[(self.rank // 2) * 5, (self.rank % 2) * 5],
shard_sizes=[5, 5],
- placement=f"rank:{self.rank}/cpu"
+ placement=f"rank:{self.rank}/cpu",
)
@with_comms(init_rpc=False, backend="gloo")
@@ -2570,24 +2799,36 @@ class TestShardedTensorFromLocalShards(ShardedTensorTestBase):
local_shard_metadata = ShardMetadata(
shard_offsets=[(self.rank // 2) * 5, (self.rank % 2) * 5],
shard_sizes=[5, 5],
- placement=f"rank:{self.rank}/cpu"
+ placement=f"rank:{self.rank}/cpu",
)
wrong_pin_memory_local_shards = [
- sharded_tensor.Shard(torch.randn(5, 5, pin_memory=True), local_shard_metadata),
- sharded_tensor.Shard(torch.randn(5, 5, pin_memory=False), local_shard_metadata)
+ sharded_tensor.Shard(
+ torch.randn(5, 5, pin_memory=True), local_shard_metadata
+ ),
+ sharded_tensor.Shard(
+ torch.randn(5, 5, pin_memory=False), local_shard_metadata
+ ),
]
- with self.assertRaisesRegex(ValueError, "Local shards' tensor pin_memory property need to be the same"):
+ with self.assertRaisesRegex(
+ ValueError, "Local shards' tensor pin_memory property need to be the same"
+ ):
st = sharded_tensor.init_from_local_shards(
- wrong_pin_memory_local_shards, [10, 10], init_rrefs=True)
+ wrong_pin_memory_local_shards, [10, 10], init_rrefs=True
+ )
tensor_pin_memory = True if self.rank == 0 else False
wrong_pin_memory_shards_cross_ranks = [
- sharded_tensor.Shard(torch.randn(5, 5, pin_memory=tensor_pin_memory), local_shard_metadata)
+ sharded_tensor.Shard(
+ torch.randn(5, 5, pin_memory=tensor_pin_memory), local_shard_metadata
+ )
]
- with self.assertRaisesRegex(ValueError, 'ShardedTensor pin_memory property does not match from different ranks!'):
+ with self.assertRaisesRegex(
+ ValueError,
+ "ShardedTensor pin_memory property does not match from different ranks!",
+ ):
st = sharded_tensor.init_from_local_shards(
- wrong_pin_memory_shards_cross_ranks, [10, 10], init_rrefs=True)
-
+ wrong_pin_memory_shards_cross_ranks, [10, 10], init_rrefs=True
+ )
@with_comms
@skip_if_lt_x_gpu(4)
@@ -2597,14 +2838,20 @@ class TestShardedTensorFromLocalShards(ShardedTensorTestBase):
local_shard_metadata = ShardMetadata(
shard_offsets=[(self.rank // 2) * 5, (self.rank % 2) * 5],
shard_sizes=local_shard_size,
- placement=f"rank:{self.rank}/cuda:{self.rank}"
+ placement=f"rank:{self.rank}/cuda:{self.rank}",
)
- local_shards = [sharded_tensor.Shard(torch.randn(local_shard_size, device=f"cuda:{self.rank}"), local_shard_metadata)]
+ local_shards = [
+ sharded_tensor.Shard(
+ torch.randn(local_shard_size, device=f"cuda:{self.rank}"),
+ local_shard_metadata,
+ )
+ ]
with self.assertRaisesRegex(ValueError, "overlap"):
- sharded_tensor.init_from_local_shards(local_shards, [10, 10], init_rrefs=True)
-
+ sharded_tensor.init_from_local_shards(
+ local_shards, [10, 10], init_rrefs=True
+ )
@with_comms
@skip_if_lt_x_gpu(4)
@@ -2614,13 +2861,20 @@ class TestShardedTensorFromLocalShards(ShardedTensorTestBase):
local_shard_metadata = ShardMetadata(
shard_offsets=[(self.rank // 2) * 5, (self.rank % 2) * 5],
shard_sizes=local_shard_size,
- placement=f"rank:{self.rank}/cuda:{self.rank}"
+ placement=f"rank:{self.rank}/cuda:{self.rank}",
)
- local_shards = [sharded_tensor.Shard(torch.randn(local_shard_size, device=f"cuda:{self.rank}"), local_shard_metadata)]
+ local_shards = [
+ sharded_tensor.Shard(
+ torch.randn(local_shard_size, device=f"cuda:{self.rank}"),
+ local_shard_metadata,
+ )
+ ]
with self.assertRaisesRegex(ValueError, "does not match tensor volume"):
- sharded_tensor.init_from_local_shards(local_shards, [10, 10], init_rrefs=True)
+ sharded_tensor.init_from_local_shards(
+ local_shards, [10, 10], init_rrefs=True
+ )
@with_comms
@skip_if_lt_x_gpu(4)
@@ -2629,7 +2883,7 @@ class TestShardedTensorFromLocalShards(ShardedTensorTestBase):
local_shard_metadata = ShardMetadata(
shard_offsets=[(self.rank // 2) * 5, (self.rank % 2) * 5],
shard_sizes=[5, 5],
- placement=f"rank:{self.rank}/cuda:{self.rank}"
+ placement=f"rank:{self.rank}/cuda:{self.rank}",
)
shards_metadata = []
@@ -2637,11 +2891,13 @@ class TestShardedTensorFromLocalShards(ShardedTensorTestBase):
if r == self.rank:
shards_metadata.append(local_shard_metadata)
else:
- shards_metadata.append(ShardMetadata(
- shard_offsets=[(r // 2) * 5, (r % 2) * 5],
- shard_sizes=[5, 5],
- placement=f"rank:{r}/cuda:{r}"
- ))
+ shards_metadata.append(
+ ShardMetadata(
+ shard_offsets=[(r // 2) * 5, (r % 2) * 5],
+ shard_sizes=[5, 5],
+ placement=f"rank:{r}/cuda:{r}",
+ )
+ )
tensor_properties = TensorProperties(
dtype=torch.get_default_dtype(),
@@ -2658,85 +2914,120 @@ class TestShardedTensorFromLocalShards(ShardedTensorTestBase):
)
empty_local_shards = []
- with self.assertRaisesRegex(RuntimeError, 'does not match number of local shards metadata'):
+ with self.assertRaisesRegex(
+ RuntimeError, "does not match number of local shards metadata"
+ ):
ShardedTensor._init_from_local_shards_and_global_metadata(
- empty_local_shards,
- sharded_tensor_metadata
+ empty_local_shards, sharded_tensor_metadata
)
wrong_num_shards = [
- sharded_tensor.Shard(torch.randn(5, 5, device=f"cuda:{self.rank}"), local_shard_metadata),
- sharded_tensor.Shard(torch.randn(5, 5, device=f"cuda:{self.rank}"), local_shard_metadata)
+ sharded_tensor.Shard(
+ torch.randn(5, 5, device=f"cuda:{self.rank}"), local_shard_metadata
+ ),
+ sharded_tensor.Shard(
+ torch.randn(5, 5, device=f"cuda:{self.rank}"), local_shard_metadata
+ ),
]
- with self.assertRaisesRegex(RuntimeError, 'does not match number of local shards metadata'):
+ with self.assertRaisesRegex(
+ RuntimeError, "does not match number of local shards metadata"
+ ):
ShardedTensor._init_from_local_shards_and_global_metadata(
- wrong_num_shards,
- sharded_tensor_metadata
+ wrong_num_shards, sharded_tensor_metadata
)
- with self.assertRaisesRegex(ValueError, 'Shard tensor size does not match with metadata.shard_lengths'):
- wrong_size_shards = [sharded_tensor.Shard(torch.randn(2, 3, device=f"cuda:{self.rank}"), local_shard_metadata)]
+ with self.assertRaisesRegex(
+ ValueError, "Shard tensor size does not match with metadata.shard_lengths"
+ ):
+ wrong_size_shards = [
+ sharded_tensor.Shard(
+ torch.randn(2, 3, device=f"cuda:{self.rank}"), local_shard_metadata
+ )
+ ]
- with self.assertRaisesRegex(ValueError, "Local shard tensor device does not match with local Shard's placement"):
- wrong_device_shards = [sharded_tensor.Shard(torch.randn(5, 5), local_shard_metadata)]
+ with self.assertRaisesRegex(
+ ValueError,
+ "Local shard tensor device does not match with local Shard's placement",
+ ):
+ wrong_device_shards = [
+ sharded_tensor.Shard(torch.randn(5, 5), local_shard_metadata)
+ ]
wrong_dtype_shards = [
- sharded_tensor.Shard(torch.ones(5, 5, device=f"cuda:{self.rank}", dtype=torch.int), local_shard_metadata)
+ sharded_tensor.Shard(
+ torch.ones(5, 5, device=f"cuda:{self.rank}", dtype=torch.int),
+ local_shard_metadata,
+ )
]
- with self.assertRaisesRegex(ValueError, "Local shards' tensor dtype property is incompatible with"):
+ with self.assertRaisesRegex(
+ ValueError, "Local shards' tensor dtype property is incompatible with"
+ ):
ShardedTensor._init_from_local_shards_and_global_metadata(
- wrong_dtype_shards,
- sharded_tensor_metadata
+ wrong_dtype_shards, sharded_tensor_metadata
)
indices = [[0, 1, 1], [2, 0, 2]]
values = [3.2, 4.5, 5.8]
- sparse_tensor = torch.sparse_coo_tensor(indices, values, (5, 5), device=f"cuda:{self.rank}")
+ sparse_tensor = torch.sparse_coo_tensor(
+ indices, values, (5, 5), device=f"cuda:{self.rank}"
+ )
wrong_layout_shards = [
sharded_tensor.Shard(sparse_tensor, local_shard_metadata)
]
- with self.assertRaisesRegex(ValueError, "Local shards' tensor layout property is incompatible with"):
+ with self.assertRaisesRegex(
+ ValueError, "Local shards' tensor layout property is incompatible with"
+ ):
ShardedTensor._init_from_local_shards_and_global_metadata(
- wrong_layout_shards,
- sharded_tensor_metadata
+ wrong_layout_shards, sharded_tensor_metadata
)
wrong_requires_grad_shards = [
- sharded_tensor.Shard(torch.randn(5, 5, device=f"cuda:{self.rank}", requires_grad=True), local_shard_metadata)
+ sharded_tensor.Shard(
+ torch.randn(5, 5, device=f"cuda:{self.rank}", requires_grad=True),
+ local_shard_metadata,
+ )
]
- with self.assertRaisesRegex(ValueError, "Local shards' tensor requires_grad property is incompatible with"):
+ with self.assertRaisesRegex(
+ ValueError,
+ "Local shards' tensor requires_grad property is incompatible with",
+ ):
ShardedTensor._init_from_local_shards_and_global_metadata(
- wrong_requires_grad_shards,
- sharded_tensor_metadata
+ wrong_requires_grad_shards, sharded_tensor_metadata
)
wrong_memory_format_shards = [
- sharded_tensor.Shard(torch.randn(5, 5, device=f"cuda:{self.rank}").t(), local_shard_metadata)
+ sharded_tensor.Shard(
+ torch.randn(5, 5, device=f"cuda:{self.rank}").t(), local_shard_metadata
+ )
]
- with self.assertRaisesRegex(ValueError, 'Only torch.contiguous_format memory_format is currently supported'):
+ with self.assertRaisesRegex(
+ ValueError,
+ "Only torch.contiguous_format memory_format is currently supported",
+ ):
ShardedTensor._init_from_local_shards_and_global_metadata(
- wrong_memory_format_shards,
- sharded_tensor_metadata
+ wrong_memory_format_shards, sharded_tensor_metadata
)
# pin_memory can only be on CPU
local_shard_metadata.placement = _remote_device(f"rank:{self.rank}/cpu")
wrong_pin_memory_shards = [
- sharded_tensor.Shard(torch.randn(5, 5, pin_memory=True), local_shard_metadata)
+ sharded_tensor.Shard(
+ torch.randn(5, 5, pin_memory=True), local_shard_metadata
+ )
]
- with self.assertRaisesRegex(ValueError, "Local shards' tensor pin_memory property is incompatible with"):
+ with self.assertRaisesRegex(
+ ValueError, "Local shards' tensor pin_memory property is incompatible with"
+ ):
ShardedTensor._init_from_local_shards_and_global_metadata(
- wrong_pin_memory_shards,
- sharded_tensor_metadata
+ wrong_pin_memory_shards, sharded_tensor_metadata
)
-class TestShardedTensorCustomOps(ShardedTensorTestBase):
+class TestShardedTensorCustomOps(ShardedTensorTestBase):
@with_comms
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_custom_op(self):
-
@custom_sharded_op_impl(torch.asin)
def my_sharded_asin(types, args, kwargs, process_group):
return torch.asin(args[0].local_shards()[0].tensor)
@@ -2759,7 +3050,6 @@ class TestShardedTensorCustomOps(ShardedTensorTestBase):
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_custom_op_override(self):
-
t = torch.rand(10, 10).cuda(self.rank)
from torch.distributed._shard.sharding_spec.api import custom_sharding_spec_op
@@ -2778,7 +3068,7 @@ class TestShardedTensorCustomOps(ShardedTensorTestBase):
],
)
m = torch.nn.Linear(32, 16).cuda(self.rank)
- shard_parameter(m, 'weight', spec)
+ shard_parameter(m, "weight", spec)
result = m(torch.rand(15, 32).cuda(self.rank))
self.assertEqual(t, result)
@@ -2787,17 +3077,19 @@ class TestShardedTensorCustomOps(ShardedTensorTestBase):
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_custom_op_errors(self):
+ with self.assertRaisesRegex(TypeError, "expects signature"):
- with self.assertRaisesRegex(TypeError, 'expects signature'):
@custom_sharded_op_impl(torch.nn.functional.linear)
def my_op1(types, args, kwargs, process_group, random_param):
pass
- with self.assertRaisesRegex(TypeError, 'expects signature'):
+ with self.assertRaisesRegex(TypeError, "expects signature"):
+
@custom_sharded_op_impl(torch.nn.functional.linear)
def my_op2(types):
pass
+
class TestShardMetadata(ShardedTensorTestBase):
@with_comms
@requires_nccl()
@@ -2846,8 +3138,8 @@ class TestShardedTensorSubGroupInit(TestCase):
metadata=ShardMetadata(
shard_offsets=[3 * (rank // sub_group_sz)],
shard_sizes=[3],
- placement=f"rank:{rank}/meta"
- )
+ placement=f"rank:{rank}/meta",
+ ),
)
],
6,
@@ -2871,8 +3163,7 @@ class TestShardedTensorSubGroupInit(TestCase):
for r in sub_pg_ranks:
_parse_and_validate_remote_device(
- sub_pg,
- _remote_device(f"rank:{r}/cuda:{r % sub_group_sz}")
+ sub_pg, _remote_device(f"rank:{r}/cuda:{r % sub_group_sz}")
)
@@ -2925,7 +3216,10 @@ class TestCreateTensorNoProcessGroupMode(TestCase):
sizes = shard_metadata.shard_sizes
st_local_shards.append(
Shard(
- tensor=src[offsets[0]:offsets[0] + sizes[0], offsets[1]:offsets[1] + sizes[1]],
+ tensor=src[
+ offsets[0] : offsets[0] + sizes[0],
+ offsets[1] : offsets[1] + sizes[1],
+ ],
metadata=shard_metadata,
)
)
@@ -2935,5 +3229,6 @@ class TestCreateTensorNoProcessGroupMode(TestCase):
sharded_tensor_metadata=st_metadata,
)
-if __name__ == '__main__':
+
+if __name__ == "__main__":
run_tests()
diff --git a/test/distributed/_shard/sharded_tensor/test_sharded_tensor_reshard.py b/test/distributed/_shard/sharded_tensor/test_sharded_tensor_reshard.py
index ec053c95b4..c3fe5ee681 100644
--- a/test/distributed/_shard/sharded_tensor/test_sharded_tensor_reshard.py
+++ b/test/distributed/_shard/sharded_tensor/test_sharded_tensor_reshard.py
@@ -4,22 +4,10 @@ import sys
from itertools import product
import torch
-from torch.distributed._shard import (
- sharded_tensor,
- _shard_tensor,
-)
-from torch.distributed._shard.sharding_spec import (
- EnumerableShardingSpec,
- ShardMetadata,
-)
-from torch.testing._internal.common_distributed import (
- requires_nccl,
- skip_if_lt_x_gpu,
-)
-from torch.testing._internal.common_utils import (
- TEST_WITH_DEV_DBG_ASAN,
- run_tests,
-)
+from torch.distributed._shard import _shard_tensor, sharded_tensor
+from torch.distributed._shard.sharding_spec import EnumerableShardingSpec, ShardMetadata
+from torch.testing._internal.common_distributed import requires_nccl, skip_if_lt_x_gpu
+from torch.testing._internal.common_utils import run_tests, TEST_WITH_DEV_DBG_ASAN
from torch.testing._internal.distributed._shard.sharded_tensor import (
ShardedTensorTestBase,
with_comms,
@@ -45,7 +33,9 @@ class TestReshard(ShardedTensorTestBase):
st.reshard(reshard_spec)
self.assertEqual(1, len(st.local_shards()))
self.assertEqual(1, len(st_compare.local_shards()))
- st_compare._metadata.shards_metadata.sort(key=lambda metadata: metadata.placement.rank())
+ st_compare._metadata.shards_metadata.sort(
+ key=lambda metadata: metadata.placement.rank()
+ )
self.assertEqual(st._metadata, st_compare._metadata)
self.assertEqual(st.local_tensor(), st_compare.local_tensor())
self.assertEqual(
diff --git a/test/distributed/_shard/sharding_plan/test_sharding_plan.py b/test/distributed/_shard/sharding_plan/test_sharding_plan.py
index 0536163a18..c1ca7a6c7b 100644
--- a/test/distributed/_shard/sharding_plan/test_sharding_plan.py
+++ b/test/distributed/_shard/sharding_plan/test_sharding_plan.py
@@ -1,26 +1,19 @@
-
# Owner(s): ["oncall: distributed"]
import sys
import torch
-import torch.nn as nn
import torch.distributed as dist
-from torch.testing._internal.common_distributed import (
- requires_nccl,
- skip_if_lt_x_gpu,
-)
+import torch.nn as nn
from torch.distributed._shard import shard_module
+from torch.distributed._shard.sharded_tensor import ShardedTensor
from torch.distributed._shard.sharding_plan import ShardingPlan, ShardingPlanner
from torch.distributed._shard.sharding_spec import ChunkShardingSpec
-from torch.distributed._shard.sharded_tensor import ShardedTensor
+from torch.testing._internal.common_distributed import requires_nccl, skip_if_lt_x_gpu
-from torch.testing._internal.common_utils import (
- TEST_WITH_DEV_DBG_ASAN,
- run_tests,
-)
+from torch.testing._internal.common_utils import run_tests, TEST_WITH_DEV_DBG_ASAN
from torch.testing._internal.distributed._shard.sharded_tensor import (
- TEST_GPU_NUM,
ShardedTensorTestBase,
+ TEST_GPU_NUM,
with_comms,
)
from torch.testing._internal.distributed._shard.sharded_tensor._test_ops_common import (
@@ -65,9 +58,7 @@ class TestShardingPlan(ShardedTensorTestBase):
plan={
"fc1.weight": torch.randn(3, 4),
},
- output_plan={
- "": rowwise_sharding_spec
- },
+ output_plan={"": rowwise_sharding_spec},
)
megatron_lm = SimpleMegatronLM([[17, 12], [12, 29]]).cuda(self.rank)
@@ -82,9 +73,7 @@ class TestShardingPlan(ShardedTensorTestBase):
plan={
"fc1.weight": rowwise_sharding_spec,
},
- output_plan={
- "": torch.randn(3, 4)
- },
+ output_plan={"": torch.randn(3, 4)},
)
with self.assertRaisesRegex(
@@ -98,9 +87,7 @@ class TestShardingPlan(ShardedTensorTestBase):
"fc3.weight": rowwise_sharding_spec,
},
)
- with self.assertRaisesRegex(
- AttributeError, "has no attribute"
- ):
+ with self.assertRaisesRegex(AttributeError, "has no attribute"):
# shard the module with the provided sharding plan
shard_module(megatron_lm, sharding_plan_wrong_module_path)
@@ -109,9 +96,7 @@ class TestShardingPlan(ShardedTensorTestBase):
"fc1.biass": rowwise_sharding_spec,
},
)
- with self.assertRaisesRegex(
- AttributeError, "has no attribute"
- ):
+ with self.assertRaisesRegex(AttributeError, "has no attribute"):
# shard the module with the provided sharding plan
shard_module(megatron_lm, sharding_plan_wrong_param_path)
@@ -155,7 +140,7 @@ class TestShardingPlan(ShardedTensorTestBase):
sharding_plan = ShardingPlan(
plan={
"fc1.weight": colwise_sharding_spec,
- "fc2.weight": rowwise_sharding_spec
+ "fc2.weight": rowwise_sharding_spec,
}
)
@@ -164,5 +149,6 @@ class TestShardingPlan(ShardedTensorTestBase):
if self.rank >= 2:
shard_module(megatron_lm, sharding_plan, process_group=pg)
+
if __name__ == "__main__":
run_tests()
diff --git a/test/distributed/_shard/sharding_spec/test_sharding_spec.py b/test/distributed/_shard/sharding_spec/test_sharding_spec.py
index 7ff27a0f30..8502a63f25 100644
--- a/test/distributed/_shard/sharding_spec/test_sharding_spec.py
+++ b/test/distributed/_shard/sharding_spec/test_sharding_spec.py
@@ -1,51 +1,48 @@
# Owner(s): ["oncall: distributed"]
-from typing import List, Union
+import copy
from dataclasses import dataclass
+from typing import List, Union
-import copy
import torch
-from torch.testing._internal.common_utils import TestCase
-from torch.testing._internal.common_distributed import (
- requires_nccl,
- skip_if_lt_x_gpu,
+from torch.distributed._shard import _shard_tensor, sharded_tensor
+from torch.distributed._shard.sharded_tensor import (
+ ShardedTensor,
+ ShardedTensorMetadata,
+ TensorProperties,
)
-from torch.distributed._shard import sharded_tensor, _shard_tensor
from torch.distributed._shard.sharding_spec import (
- ShardingSpec,
+ _infer_sharding_spec_from_shards_metadata,
ChunkShardingSpec,
DevicePlacementSpec,
EnumerableShardingSpec,
+ ShardingSpec,
ShardMetadata,
- _infer_sharding_spec_from_shards_metadata,
-)
-from torch.distributed._shard.sharded_tensor import (
- TensorProperties,
- ShardedTensor,
- ShardedTensorMetadata,
)
from torch.distributed._shard.sharding_spec._internals import (
check_tensor,
- get_split_size,
- get_chunked_dim_size,
get_chunk_sharding_params,
+ get_chunked_dim_size,
+ get_split_size,
validate_non_overlapping_shards_metadata,
)
from torch.testing._internal.common_cuda import TEST_MULTIGPU
+from torch.testing._internal.common_distributed import requires_nccl, skip_if_lt_x_gpu
from torch.testing._internal.common_utils import (
run_tests,
skip_but_pass_in_sandcastle_if,
-)
-from torch.testing._internal.distributed._shard.sharded_tensor._test_st_common import (
- _chunk_sharding_specs_list_for_test,
+ TestCase,
)
from torch.testing._internal.distributed._shard.sharded_tensor import (
ShardedTensorTestBase,
with_comms,
)
+from torch.testing._internal.distributed._shard.sharded_tensor._test_st_common import (
+ _chunk_sharding_specs_list_for_test,
+)
-class TestShardingSpec(TestCase):
- @skip_but_pass_in_sandcastle_if(not TEST_MULTIGPU, '2 CUDA GPUs are needed')
+class TestShardingSpec(TestCase):
+ @skip_but_pass_in_sandcastle_if(not TEST_MULTIGPU, "2 CUDA GPUs are needed")
def test_device_placement(self):
# valid devices
DevicePlacementSpec("cuda:0")
@@ -65,7 +62,7 @@ class TestShardingSpec(TestCase):
with self.assertRaisesRegex(RuntimeError, "Invalid device string"):
DevicePlacementSpec("rank:0/cpu2")
- @skip_but_pass_in_sandcastle_if(not TEST_MULTIGPU, '2 CUDA GPUs are needed')
+ @skip_but_pass_in_sandcastle_if(not TEST_MULTIGPU, "2 CUDA GPUs are needed")
def test_chunked_sharding_spec(self):
# Test valid specs.
ChunkShardingSpec(0, [torch.device(0), torch.device(1)])
@@ -98,165 +95,173 @@ class TestShardingSpec(TestCase):
with self.assertRaisesRegex(RuntimeError, "Invalid device string"):
ChunkShardingSpec(0, ["rank:0/cuda:foo", "cuda:1"])
- @skip_but_pass_in_sandcastle_if(not TEST_MULTIGPU, '2 CUDA GPUs are needed')
+ @skip_but_pass_in_sandcastle_if(not TEST_MULTIGPU, "2 CUDA GPUs are needed")
def test_enumerable_sharding_spec(self):
# test valid specs
# test row-wise sharding
- spec = EnumerableShardingSpec([
- ShardMetadata(
- shard_offsets=[0, 0],
- shard_sizes=[5, 5],
- placement="cuda:0",
- ),
- ShardMetadata(
- shard_offsets=[5, 0],
- shard_sizes=[5, 5],
- placement="cuda:1",
- )
- ])
+ spec = EnumerableShardingSpec(
+ [
+ ShardMetadata(
+ shard_offsets=[0, 0],
+ shard_sizes=[5, 5],
+ placement="cuda:0",
+ ),
+ ShardMetadata(
+ shard_offsets=[5, 0],
+ shard_sizes=[5, 5],
+ placement="cuda:1",
+ ),
+ ]
+ )
check_tensor(spec.shards, torch.rand(10, 5).size())
# test row and column sharding
- spec = EnumerableShardingSpec([
- ShardMetadata(
- shard_offsets=[0, 0],
- shard_sizes=[3, 3],
- placement="cuda:0",
- ),
- ShardMetadata(
- shard_offsets=[0, 3],
- shard_sizes=[3, 3],
- placement="cuda:1",
- ),
- ShardMetadata(
- shard_offsets=[3, 0],
- shard_sizes=[3, 3],
- placement="cuda:2",
- ),
- ShardMetadata(
- shard_offsets=[3, 3],
- shard_sizes=[3, 3],
- placement="cuda:3",
- ),
- ])
+ spec = EnumerableShardingSpec(
+ [
+ ShardMetadata(
+ shard_offsets=[0, 0],
+ shard_sizes=[3, 3],
+ placement="cuda:0",
+ ),
+ ShardMetadata(
+ shard_offsets=[0, 3],
+ shard_sizes=[3, 3],
+ placement="cuda:1",
+ ),
+ ShardMetadata(
+ shard_offsets=[3, 0],
+ shard_sizes=[3, 3],
+ placement="cuda:2",
+ ),
+ ShardMetadata(
+ shard_offsets=[3, 3],
+ shard_sizes=[3, 3],
+ placement="cuda:3",
+ ),
+ ]
+ )
check_tensor(spec.shards, torch.rand(6, 6).size())
# test uneven shard sizes.
- spec = EnumerableShardingSpec([
- ShardMetadata(
- shard_offsets=[0, 0],
- shard_sizes=[2, 4],
- placement="cuda:0",
- ),
- ShardMetadata(
- shard_offsets=[0, 4],
- shard_sizes=[4, 2],
- placement="cuda:1",
- ),
- ShardMetadata(
- shard_offsets=[2, 0],
- shard_sizes=[4, 4],
- placement="cuda:2",
- ),
- ShardMetadata(
- shard_offsets=[4, 4],
- shard_sizes=[2, 2],
- placement="cuda:3",
- ),
- ])
+ spec = EnumerableShardingSpec(
+ [
+ ShardMetadata(
+ shard_offsets=[0, 0],
+ shard_sizes=[2, 4],
+ placement="cuda:0",
+ ),
+ ShardMetadata(
+ shard_offsets=[0, 4],
+ shard_sizes=[4, 2],
+ placement="cuda:1",
+ ),
+ ShardMetadata(
+ shard_offsets=[2, 0],
+ shard_sizes=[4, 4],
+ placement="cuda:2",
+ ),
+ ShardMetadata(
+ shard_offsets=[4, 4],
+ shard_sizes=[2, 2],
+ placement="cuda:3",
+ ),
+ ]
+ )
check_tensor(spec.shards, torch.rand(6, 6).size())
# test invalid sharding
- with self.assertRaisesRegex(ValueError, 'Could not parse remote_device'):
+ with self.assertRaisesRegex(ValueError, "Could not parse remote_device"):
ShardMetadata(shard_offsets=[0], shard_sizes=[1], placement="cuda:foo")
- with self.assertRaisesRegex(ValueError, 'same number of elements'):
+ with self.assertRaisesRegex(ValueError, "same number of elements"):
ShardMetadata(shard_offsets=[0, 0], shard_sizes=[1], placement="cuda:0")
- with self.assertRaisesRegex(ValueError, 'shard_offsets should be >=0'):
+ with self.assertRaisesRegex(ValueError, "shard_offsets should be >=0"):
ShardMetadata(shard_offsets=[-1, 0], shard_sizes=[1, 1], placement="cuda:0")
- with self.assertRaisesRegex(ValueError, 'shard_sizes should be >= 0'):
+ with self.assertRaisesRegex(ValueError, "shard_sizes should be >= 0"):
ShardMetadata(shard_offsets=[0, 0], shard_sizes=[-1, 1], placement="cuda:0")
- with self.assertRaisesRegex(ValueError, 'Empty shard list provided'):
+ with self.assertRaisesRegex(ValueError, "Empty shard list provided"):
EnumerableShardingSpec([])
- with self.assertRaisesRegex(ValueError, 'Found inconsistent ranks for shards'):
- EnumerableShardingSpec([
+ with self.assertRaisesRegex(ValueError, "Found inconsistent ranks for shards"):
+ EnumerableShardingSpec(
+ [
+ ShardMetadata(
+ shard_offsets=[0, 0], shard_sizes=[1, 1], placement="cpu"
+ ),
+ ShardMetadata(
+ shard_offsets=[0, 0, 0], shard_sizes=[1, 1, 1], placement="cpu"
+ ),
+ ]
+ )
+
+ with self.assertRaisesRegex(ValueError, "Shards.*overlap"):
+ EnumerableShardingSpec(
+ [
+ ShardMetadata(
+ shard_offsets=[0, 0], shard_sizes=[3, 3], placement="cpu"
+ ),
+ ShardMetadata(
+ shard_offsets=[2, 0], shard_sizes=[3, 3], placement="cpu"
+ ),
+ ]
+ )
+
+ spec = EnumerableShardingSpec(
+ [
ShardMetadata(
shard_offsets=[0, 0],
- shard_sizes=[1, 1],
- placement="cpu"
+ shard_sizes=[5, 5],
+ placement="cuda:0",
),
ShardMetadata(
- shard_offsets=[0, 0, 0],
- shard_sizes=[1, 1, 1],
- placement="cpu"
+ shard_offsets=[5, 0],
+ shard_sizes=[5, 5],
+ placement="cuda:1",
),
- ])
+ ]
+ )
- with self.assertRaisesRegex(ValueError, 'Shards.*overlap'):
- EnumerableShardingSpec([
+ with self.assertRaisesRegex(ValueError, "Rank of tensor is.*but shards rank"):
+ check_tensor(spec.shards, torch.rand(10, 10, 10).size())
+
+ spec = EnumerableShardingSpec(
+ [
ShardMetadata(
shard_offsets=[0, 0],
- shard_sizes=[3, 3],
- placement="cpu"
+ shard_sizes=[5, 5],
+ placement="cuda:0",
),
ShardMetadata(
- shard_offsets=[2, 0],
- shard_sizes=[3, 3],
- placement="cpu"
+ shard_offsets=[5, 0],
+ shard_sizes=[5, 5],
+ placement="cuda:1",
),
- ])
-
- spec = EnumerableShardingSpec([
- ShardMetadata(
- shard_offsets=[0, 0],
- shard_sizes=[5, 5],
- placement="cuda:0",
- ),
- ShardMetadata(
- shard_offsets=[5, 0],
- shard_sizes=[5, 5],
- placement="cuda:1",
- )
- ])
-
- with self.assertRaisesRegex(ValueError, 'Rank of tensor is.*but shards rank'):
- check_tensor(spec.shards, torch.rand(10, 10, 10).size())
-
- spec = EnumerableShardingSpec([
- ShardMetadata(
- shard_offsets=[0, 0],
- shard_sizes=[5, 5],
- placement="cuda:0",
- ),
- ShardMetadata(
- shard_offsets=[5, 0],
- shard_sizes=[5, 5],
- placement="cuda:1",
- )
- ])
+ ]
+ )
- with self.assertRaisesRegex(ValueError, 'exceeds tensor dim'):
+ with self.assertRaisesRegex(ValueError, "exceeds tensor dim"):
check_tensor(spec.shards, torch.rand(10, 3).size())
- spec = EnumerableShardingSpec([
- ShardMetadata(
- shard_offsets=[0, 0],
- shard_sizes=[5, 5],
- placement="cuda:0",
- ),
- ShardMetadata(
- shard_offsets=[5, 5],
- shard_sizes=[5, 5],
- placement="cuda:1",
- )
- ])
+ spec = EnumerableShardingSpec(
+ [
+ ShardMetadata(
+ shard_offsets=[0, 0],
+ shard_sizes=[5, 5],
+ placement="cuda:0",
+ ),
+ ShardMetadata(
+ shard_offsets=[5, 5],
+ shard_sizes=[5, 5],
+ placement="cuda:1",
+ ),
+ ]
+ )
- with self.assertRaisesRegex(ValueError, 'does not match tensor volume'):
+ with self.assertRaisesRegex(ValueError, "does not match tensor volume"):
check_tensor(spec.shards, torch.rand(10, 10).size())
def test_get_split_size(self):
@@ -313,7 +318,7 @@ class TestShardingSpec(TestCase):
shard_offsets=[5, 0],
shard_sizes=[10, 5],
placement="cuda:1",
- )
+ ),
]
spec = _infer_sharding_spec_from_shards_metadata(shards_metadata)
self.assertTrue(isinstance(spec, EnumerableShardingSpec))
@@ -329,7 +334,7 @@ class TestShardingSpec(TestCase):
shard_offsets=[16],
shard_sizes=[9],
placement="cuda:1",
- )
+ ),
]
spec = _infer_sharding_spec_from_shards_metadata(shards_metadata)
self.assertTrue(isinstance(spec, EnumerableShardingSpec))
@@ -369,7 +374,9 @@ class TestShardingSpec(TestCase):
shard_size = copy.deepcopy(st_size)
offsets = [0] * len(st_size)
offsets[sharding_dim] = split_size * idx
- shard_size[sharding_dim] = get_chunked_dim_size(st_size[sharding_dim], split_size, idx)
+ shard_size[sharding_dim] = get_chunked_dim_size(
+ st_size[sharding_dim], split_size, idx
+ )
shards_metadata[placement.rank()] = ShardMetadata(
shard_offsets=offsets,
shard_sizes=shard_size,
@@ -390,73 +397,100 @@ class TestShardingSpec(TestCase):
self._infer_chunk_sharding_spec_case(spec.placements, 1, [12, 16])
self._infer_chunk_sharding_spec_case(spec.placements, 2, [4, 18, 15])
self._infer_chunk_sharding_spec_case(spec.placements, 3, [7, 12, 16, 37])
- self._infer_chunk_sharding_spec_case(spec.placements, 4, [50, 4, 18, 15, 77])
+ self._infer_chunk_sharding_spec_case(
+ spec.placements, 4, [50, 4, 18, 15, 77]
+ )
def test_check_overlapping(self):
shards = [
ShardMetadata(
- shard_offsets=[0, 0], shard_sizes=[5, 5], placement="cuda:0",
+ shard_offsets=[0, 0],
+ shard_sizes=[5, 5],
+ placement="cuda:0",
),
ShardMetadata(
- shard_offsets=[5, 0], shard_sizes=[5, 5], placement="cuda:1",
- )
+ shard_offsets=[5, 0],
+ shard_sizes=[5, 5],
+ placement="cuda:1",
+ ),
]
validate_non_overlapping_shards_metadata(shards)
shards = [
ShardMetadata(
- shard_offsets=[0, 0], shard_sizes=[5, 5], placement="cuda:0",
+ shard_offsets=[0, 0],
+ shard_sizes=[5, 5],
+ placement="cuda:0",
),
ShardMetadata(
- shard_offsets=[4, 0], shard_sizes=[5, 5], placement="cuda:1",
- )
+ shard_offsets=[4, 0],
+ shard_sizes=[5, 5],
+ placement="cuda:1",
+ ),
]
with self.assertRaisesRegex(ValueError, "overlap"):
validate_non_overlapping_shards_metadata(shards)
shards = [
ShardMetadata(
- shard_offsets=[0, 0], shard_sizes=[5, 5], placement="cuda:0",
+ shard_offsets=[0, 0],
+ shard_sizes=[5, 5],
+ placement="cuda:0",
),
ShardMetadata(
- shard_offsets=[0, 4], shard_sizes=[5, 5], placement="cuda:1",
- )
+ shard_offsets=[0, 4],
+ shard_sizes=[5, 5],
+ placement="cuda:1",
+ ),
]
with self.assertRaisesRegex(ValueError, "overlap"):
validate_non_overlapping_shards_metadata(shards)
shards = [
ShardMetadata(
- shard_offsets=[5, 0, 5], shard_sizes=[5, 5, 5], placement="cuda:0",
+ shard_offsets=[5, 0, 5],
+ shard_sizes=[5, 5, 5],
+ placement="cuda:0",
),
ShardMetadata(
- shard_offsets=[5, 5, 5], shard_sizes=[5, 5, 5], placement="cuda:1",
- )
+ shard_offsets=[5, 5, 5],
+ shard_sizes=[5, 5, 5],
+ placement="cuda:1",
+ ),
]
validate_non_overlapping_shards_metadata(shards)
shards = [
ShardMetadata(
- shard_offsets=[5, 0, 5], shard_sizes=[5, 5, 5], placement="cuda:0",
+ shard_offsets=[5, 0, 5],
+ shard_sizes=[5, 5, 5],
+ placement="cuda:0",
),
ShardMetadata(
- shard_offsets=[5, 4, 5], shard_sizes=[5, 5, 5], placement="cuda:1",
- )
+ shard_offsets=[5, 4, 5],
+ shard_sizes=[5, 5, 5],
+ placement="cuda:1",
+ ),
]
with self.assertRaisesRegex(ValueError, "overlap"):
validate_non_overlapping_shards_metadata(shards)
shards = [
ShardMetadata(
- shard_offsets=[5, 0, 5], shard_sizes=[5, 5, 5], placement="cuda:0",
+ shard_offsets=[5, 0, 5],
+ shard_sizes=[5, 5, 5],
+ placement="cuda:0",
),
ShardMetadata(
- shard_offsets=[5, 4, 9], shard_sizes=[5, 5, 5], placement="cuda:1",
- )
+ shard_offsets=[5, 4, 9],
+ shard_sizes=[5, 5, 5],
+ placement="cuda:1",
+ ),
]
with self.assertRaisesRegex(ValueError, "overlap"):
validate_non_overlapping_shards_metadata(shards)
+
# Custom ShardingSpec, an simple example to do grid sharding
@dataclass
class GridShardingSpec(ShardingSpec):
@@ -468,10 +502,11 @@ class GridShardingSpec(ShardingSpec):
if not isinstance(remote_device, torch.distributed._remote_device):
self.placements[i] = torch.distributed._remote_device(remote_device)
- def build_metadata(self,
- tensor_sizes: torch.Size,
- tensor_properties: TensorProperties,
- ) -> ShardedTensorMetadata:
+ def build_metadata(
+ self,
+ tensor_sizes: torch.Size,
+ tensor_properties: TensorProperties,
+ ) -> ShardedTensorMetadata:
tensor_num_dim = len(tensor_sizes)
assert tensor_num_dim == 2, "only support 2-dim tensor for grid sharding"
shards_metadata = []
@@ -488,25 +523,26 @@ class GridShardingSpec(ShardingSpec):
for col_idx in range(col_chunks):
shards_metadata.append(
ShardMetadata(
- shard_offsets=[row_idx * self.grid_size, col_idx * self.grid_size],
+ shard_offsets=[
+ row_idx * self.grid_size,
+ col_idx * self.grid_size,
+ ],
shard_sizes=[self.grid_size, self.grid_size],
- placement=self.placements[row_idx * row_chunks + col_idx]
+ placement=self.placements[row_idx * row_chunks + col_idx],
)
)
return ShardedTensorMetadata(
shards_metadata=shards_metadata,
size=tensor_sizes,
- tensor_properties=tensor_properties
+ tensor_properties=tensor_properties,
)
-
- def shard(self,
- tensor: torch.Tensor,
- src_rank: int = 0,
- process_group=None) -> ShardedTensor:
-
+ def shard(
+ self, tensor: torch.Tensor, src_rank: int = 0, process_group=None
+ ) -> ShardedTensor:
raise NotImplementedError("GridShardingSpec.shard not implemented yet!")
+
class TestCustomShardingSpec(ShardedTensorTestBase):
def test_custom_sharding_spec(self):
ranks = [
@@ -516,10 +552,7 @@ class TestCustomShardingSpec(ShardedTensorTestBase):
"rank:3/cuda:3",
]
- grid_spec = GridShardingSpec(
- grid_size=4,
- placements=ranks
- )
+ grid_spec = GridShardingSpec(grid_size=4, placements=ranks)
tensor_properties = TensorProperties(
dtype=torch.get_default_dtype(),
@@ -536,8 +569,8 @@ class TestCustomShardingSpec(ShardedTensorTestBase):
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_custom_sharding_spec_tensor_ctor(self):
- """ Test sharded_tensor.ones(...) with the custom
- grid sharding spec.
+ """Test sharded_tensor.ones(...) with the custom
+ grid sharding spec.
"""
ranks = [
@@ -547,10 +580,7 @@ class TestCustomShardingSpec(ShardedTensorTestBase):
"rank:3/cuda:3",
]
- grid_spec = GridShardingSpec(
- grid_size=2,
- placements=ranks
- )
+ grid_spec = GridShardingSpec(grid_size=2, placements=ranks)
st = sharded_tensor.ones(grid_spec, 4, 4)
@@ -566,8 +596,8 @@ class TestCustomShardingSpec(ShardedTensorTestBase):
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_custom_sharding_spec_shard_tensor(self):
- """ Test custom spec can be invoked from the
- _shard_tensor callsite.
+ """Test custom spec can be invoked from the
+ _shard_tensor callsite.
"""
ranks = [
@@ -577,14 +607,11 @@ class TestCustomShardingSpec(ShardedTensorTestBase):
"rank:3/cuda:3",
]
- grid_spec = GridShardingSpec(
- grid_size=2,
- placements=ranks
- )
+ grid_spec = GridShardingSpec(grid_size=2, placements=ranks)
- with self.assertRaisesRegex(NotImplementedError, 'not implemented'):
+ with self.assertRaisesRegex(NotImplementedError, "not implemented"):
_shard_tensor(torch.randn(8, 8), grid_spec)
-if __name__ == '__main__':
+if __name__ == "__main__":
run_tests()
diff --git a/test/distributed/_shard/test_sharder.py b/test/distributed/_shard/test_sharder.py
index 79bcfe56f3..9a59f891bc 100644
--- a/test/distributed/_shard/test_sharder.py
+++ b/test/distributed/_shard/test_sharder.py
@@ -1,24 +1,20 @@
-
# Owner(s): ["oncall: distributed"]
-import sys
import copy
+import sys
import torch
import torch.nn as nn
-from torch.testing._internal.common_distributed import (
- requires_nccl,
- skip_if_lt_x_gpu,
-)
from torch.distributed._shard import shard_module
-from torch.distributed._shard.sharding_plan import ShardingPlan
+from torch.distributed._shard.sharded_tensor import ShardedTensor
from torch.distributed._shard.sharder import Sharder
+from torch.distributed._shard.sharding_plan import ShardingPlan
from torch.distributed._shard.sharding_spec import ChunkShardingSpec
-from torch.distributed._shard.sharded_tensor import ShardedTensor
+from torch.testing._internal.common_distributed import requires_nccl, skip_if_lt_x_gpu
-from torch.testing._internal.common_utils import TEST_WITH_DEV_DBG_ASAN, run_tests
+from torch.testing._internal.common_utils import run_tests, TEST_WITH_DEV_DBG_ASAN
from torch.testing._internal.distributed._shard.sharded_tensor import (
- TEST_GPU_NUM,
ShardedTensorTestBase,
+ TEST_GPU_NUM,
with_comms,
)
@@ -29,6 +25,7 @@ if TEST_WITH_DEV_DBG_ASAN:
)
sys.exit(0)
+
# a simple collection of embedding bag implementation
class CustomEmbeddingBagCollection(nn.Module):
def __init__(self, num_bags, num_embeddings_per_bag, num_dims):
@@ -38,9 +35,8 @@ class CustomEmbeddingBagCollection(nn.Module):
for i in range(num_bags):
self.embedding_bags[f"embedding_bag_{i}"] = nn.EmbeddingBag(
- num_embeddings_per_bag,
- num_dims,
- mode="sum")
+ num_embeddings_per_bag, num_dims, mode="sum"
+ )
def forward(self, inputs):
outputs = []
@@ -48,6 +44,7 @@ class CustomEmbeddingBagCollection(nn.Module):
outputs.append(bag(inputs))
return torch.cat(outputs)
+
# a simple sharded version of EBC
class CustomShardedEBC(nn.Module):
def __init__(self, ebc, split_idx, specs):
@@ -62,9 +59,19 @@ class CustomShardedEBC(nn.Module):
for i in range(ebc.num_bags):
bag_key = f"embedding_bag_{i}"
if i < self.split_idx:
- shard_module(ebc, plan=ShardingPlan(plan={f"embedding_bags.{bag_key}.weight": row_spec}))
+ shard_module(
+ ebc,
+ plan=ShardingPlan(
+ plan={f"embedding_bags.{bag_key}.weight": row_spec}
+ ),
+ )
else:
- shard_module(ebc, plan=ShardingPlan(plan={f"embedding_bags.{bag_key}.weight": col_spec}))
+ shard_module(
+ ebc,
+ plan=ShardingPlan(
+ plan={f"embedding_bags.{bag_key}.weight": col_spec}
+ ),
+ )
self.embedding_bags[bag_key] = ebc.embedding_bags[bag_key]
@@ -78,13 +85,16 @@ class CustomSharder(Sharder):
def shard(self, ebc: nn.Module) -> nn.Module:
if not isinstance(ebc, CustomEmbeddingBagCollection):
- raise RuntimeError("The custom sharder only supports CustomEmbeddingBagCollection")
+ raise RuntimeError(
+ "The custom sharder only supports CustomEmbeddingBagCollection"
+ )
- return CustomShardedEBC(ebc, self.split_sharding_idx, (self.rowwise_spec, self.colwise_spec))
+ return CustomShardedEBC(
+ ebc, self.split_sharding_idx, (self.rowwise_spec, self.colwise_spec)
+ )
class TestCustomSharder(ShardedTensorTestBase):
-
@with_comms(init_rpc=False)
@skip_if_lt_x_gpu(TEST_GPU_NUM)
@requires_nccl()
@@ -99,13 +109,14 @@ class TestCustomSharder(ShardedTensorTestBase):
custom_sharder = CustomSharder(
devices=[f"rank:{i}/cuda:{i}" for i in range(TEST_GPU_NUM)],
- split_sharding_idx=TEST_GPU_NUM // 2
+ split_sharding_idx=TEST_GPU_NUM // 2,
)
sharding_plan = ShardingPlan(
plan={
"ebc": custom_sharder,
- })
+ }
+ )
local_model = MyModule().cuda(self.rank)
sharded_model = copy.deepcopy(local_model)
@@ -117,8 +128,14 @@ class TestCustomSharder(ShardedTensorTestBase):
emb_bags = sharded_model.ebc.embedding_bags
self.assertTrue(isinstance(emb_bags["embedding_bag_0"].weight, ShardedTensor))
self.assertTrue(isinstance(emb_bags["embedding_bag_9"].weight, ShardedTensor))
- self.assertEqual(emb_bags["embedding_bag_0"].weight.sharding_spec(), custom_sharder.rowwise_spec)
- self.assertEqual(emb_bags["embedding_bag_9"].weight.sharding_spec(), custom_sharder.colwise_spec)
+ self.assertEqual(
+ emb_bags["embedding_bag_0"].weight.sharding_spec(),
+ custom_sharder.rowwise_spec,
+ )
+ self.assertEqual(
+ emb_bags["embedding_bag_9"].weight.sharding_spec(),
+ custom_sharder.colwise_spec,
+ )
# make sure we can run sharded computation and compare outputs
# with the local model version
@@ -134,13 +151,14 @@ class TestCustomSharder(ShardedTensorTestBase):
def test_custom_sharder_errors(self):
custom_sharder = CustomSharder(
devices=[f"rank:{i}/cuda:{i}" for i in range(TEST_GPU_NUM)],
- split_sharding_idx=TEST_GPU_NUM // 2
+ split_sharding_idx=TEST_GPU_NUM // 2,
)
sharding_plan = ShardingPlan(
plan={
"": custom_sharder,
- })
+ }
+ )
sharded_model = CustomEmbeddingBagCollection(10, 10, 8).cuda(self.rank)
@@ -156,7 +174,8 @@ class TestCustomSharder(ShardedTensorTestBase):
plan={
"embedding_bags.embedding_bag_0.weight": spec,
"embedding_bags": custom_sharder,
- })
+ }
+ )
with self.assertRaisesRegex(
RuntimeError, "should not conflict with the submodule tree"
diff --git a/test/distributed/_tools/test_memory_tracker.py b/test/distributed/_tools/test_memory_tracker.py
index 90dded6797..3523e51e36 100644
--- a/test/distributed/_tools/test_memory_tracker.py
+++ b/test/distributed/_tools/test_memory_tracker.py
@@ -1,17 +1,14 @@
# Owner(s): ["oncall: distributed"]
import os
-from torch.testing._internal.common_cuda import TEST_CUDA
-from torch.testing._internal.common_utils import (
- TestCase,
- run_tests,
-)
+
+import unittest
import torch
import torch.nn as nn
from torch.distributed._tools import MemoryTracker
-
-import unittest
+from torch.testing._internal.common_cuda import TEST_CUDA
+from torch.testing._internal.common_utils import run_tests, TestCase
class TestMemoryTracker(TestCase):
diff --git a/test/distributed/algorithms/ddp_comm_hooks/test_ddp_hooks.py b/test/distributed/algorithms/ddp_comm_hooks/test_ddp_hooks.py
index 2d6a17bf8d..80cb52a7e4 100644
--- a/test/distributed/algorithms/ddp_comm_hooks/test_ddp_hooks.py
+++ b/test/distributed/algorithms/ddp_comm_hooks/test_ddp_hooks.py
@@ -4,8 +4,8 @@ import os
import sys
import torch
-from torch import nn
import torch.distributed as dist
+from torch import nn
if not dist.is_available():
print("Distributed not available, skipping tests", file=sys.stderr)
@@ -21,15 +21,13 @@ from torch.testing._internal.common_distributed import (
requires_nccl,
skip_if_lt_x_gpu,
)
-from torch.testing._internal.common_utils import (
- run_tests,
- TEST_WITH_DEV_DBG_ASAN,
-)
+from torch.testing._internal.common_utils import run_tests, TEST_WITH_DEV_DBG_ASAN
if TEST_WITH_DEV_DBG_ASAN:
print("Multiprocessing spawn is not compatible with dev/dbg asan", file=sys.stderr)
sys.exit(0)
+
def gpus_for_rank(world_size):
visible_devices = list(range(torch.cuda.device_count()))
gpus_per_process = torch.cuda.device_count() // world_size
@@ -185,7 +183,6 @@ class DistributedDataParallelCommHookTest(MultiProcessTestCase):
torch.testing.assert_close(hook_grads, reference_grads, rtol=1e-5, atol=1e-4)
-
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_ddp_comm_hook_noop_hook(self):
@@ -220,7 +217,7 @@ class DistributedDataParallelCommHookTest(MultiProcessTestCase):
device_id = gpus_for_rank(self.world_size)[self.rank][0]
model = nn.Sequential(
nn.Linear(2, 4000, bias=False),
- *[nn.Linear(4000, 4000, bias=False) for _ in range(10)]
+ *[nn.Linear(4000, 4000, bias=False) for _ in range(10)],
)
gpu_model = DistributedDataParallel(
model.to(device_id),
diff --git a/test/distributed/algorithms/quantization/test_quantization.py b/test/distributed/algorithms/quantization/test_quantization.py
index d0b6656ec9..daa8e9ffca 100644
--- a/test/distributed/algorithms/quantization/test_quantization.py
+++ b/test/distributed/algorithms/quantization/test_quantization.py
@@ -1,25 +1,26 @@
# Owner(s): ["oncall: distributed"]
-import torch
import os
-import torch.cuda
import sys
+
+import torch
+import torch.cuda
import torch.distributed as dist
import torch.distributed.algorithms._quantization.quantization as quant
from torch.distributed.algorithms._quantization.quantization import DQuantType
from torch.testing._internal.common_distributed import (
- MultiProcessTestCase,
init_multigpu_helper,
+ MultiProcessTestCase,
requires_gloo,
- skip_if_rocm,
- skip_if_lt_x_gpu,
requires_nccl,
+ skip_if_lt_x_gpu,
+ skip_if_rocm,
)
from torch.testing._internal.common_utils import (
- skip_but_pass_in_sandcastle_if,
+ NO_MULTIPROCESSING_SPAWN,
run_tests,
+ skip_but_pass_in_sandcastle_if,
TEST_WITH_DEV_DBG_ASAN,
- NO_MULTIPROCESSING_SPAWN,
)
torch.backends.cuda.matmul.allow_tf32 = False
diff --git a/test/distributed/algorithms/test_join.py b/test/distributed/algorithms/test_join.py
index 77ac3de4fb..89a8e9e04a 100644
--- a/test/distributed/algorithms/test_join.py
+++ b/test/distributed/algorithms/test_join.py
@@ -20,7 +20,10 @@ from torch.testing._internal.common_distributed import (
from torch.testing._internal.common_utils import run_tests, TEST_WITH_DEV_DBG_ASAN
if TEST_WITH_DEV_DBG_ASAN:
- print("Skip dev-asan as torch + multiprocessing spawn have known issues", file=sys.stderr)
+ print(
+ "Skip dev-asan as torch + multiprocessing spawn have known issues",
+ file=sys.stderr,
+ )
sys.exit(0)
BACKEND = dist.Backend.NCCL if torch.cuda.is_available() else dist.Backend.GLOO
@@ -42,12 +45,8 @@ class AllReducerJoinHook(JoinHook):
iteration.
run_post_hook (bool): a flag enabling the post-hook logic.
"""
- def __init__(
- self,
- allreducer,
- num_allreduces,
- run_post_hook
- ):
+
+ def __init__(self, allreducer, num_allreduces, run_post_hook):
self.allreducer = allreducer
self.num_allreduces = num_allreduces
self.run_post_hook = run_post_hook
@@ -73,7 +72,9 @@ class AllReducerJoinHook(JoinHook):
common_rank = self.allreducer.find_common_rank(rank, is_last_joiner)
device = self.allreducer.device
if rank == common_rank:
- self.allreducer.post_hook_tensor = torch.tensor([AFTER_CONSTANT], device=device)
+ self.allreducer.post_hook_tensor = torch.tensor(
+ [AFTER_CONSTANT], device=device
+ )
dist.broadcast(self.allreducer.post_hook_tensor, src=common_rank)
@@ -82,6 +83,7 @@ class AllReducer(Joinable):
Example :class:`Joinable` that performs some number of all-reduces as its
per-iteration collective communication.
"""
+
def __init__(self, device, process_group):
super().__init__()
self.device = device
@@ -109,11 +111,7 @@ class AllReducer(Joinable):
"""
num_allreduces = kwargs.get("num_allreduces", 1)
run_post_hook = kwargs.get("run_post_hooks", False)
- return AllReducerJoinHook(
- self,
- num_allreduces,
- run_post_hook
- )
+ return AllReducerJoinHook(self, num_allreduces, run_post_hook)
@property
def join_device(self) -> torch.device:
@@ -127,17 +125,16 @@ class AllReducer(Joinable):
r"""
Returns the max rank of the ones to consider over the process group.
"""
- common_rank = torch.tensor(
- [rank if to_consider else -1],
- device=self.device
- )
+ common_rank = torch.tensor([rank if to_consider else -1], device=self.device)
dist.all_reduce(common_rank, op=dist.ReduceOp.MAX, group=self.process_group)
common_rank = common_rank.item()
assert common_rank >= 0
return common_rank
+
class TestJoin(MultiProcessTestCase):
r"""Test cases for the generic join context."""
+
def setUp(self):
super().setUp()
os.environ["WORLD_SIZE"] = str(self.world_size)
@@ -146,8 +143,11 @@ class TestJoin(MultiProcessTestCase):
@property
def device(self):
- return torch.device(self.rank) if BACKEND == dist.Backend.NCCL \
+ return (
+ torch.device(self.rank)
+ if BACKEND == dist.Backend.NCCL
else torch.device("cpu")
+ )
@property
def world_size(self):
@@ -170,10 +170,7 @@ class TestJoin(MultiProcessTestCase):
def dist_init(self, rank, world_size, backend=BACKEND):
store = dist.FileStore(self.file_name, world_size)
return dist.init_process_group(
- backend=backend,
- store=store,
- rank=rank,
- world_size=world_size
+ backend=backend, store=store, rank=rank, world_size=world_size
)
def construct_uneven_inputs(self, base, offset, device=None):
@@ -231,32 +228,35 @@ class TestJoin(MultiProcessTestCase):
self.dist_init(self.rank, self.world_size)
allreducers = [
- AllReducer(self.device, self.process_group)
- for _ in range(num_joinables)
+ AllReducer(self.device, self.process_group) for _ in range(num_joinables)
]
for allreducer in allreducers:
self.assertEqual(allreducer.post_hook_tensor.item(), BEFORE_CONSTANT)
- inputs = self.construct_uneven_inputs(self.base_num_inputs, self.offset) \
- if uneven_inputs \
+ inputs = (
+ self.construct_uneven_inputs(self.base_num_inputs, self.offset)
+ if uneven_inputs
else self.construct_even_inputs(self.base_num_inputs)
+ )
allreduce_total = 0
# Expect a `RuntimeError` if `throw_on_early_termination=True`
# Rank 0 exhausts its inputs first
- expected_msg = "Rank 0 exhausted all inputs." if self.rank == 0 \
- else "Detected at least one rank that exhausted inputs. " \
+ expected_msg = (
+ "Rank 0 exhausted all inputs."
+ if self.rank == 0
+ else "Detected at least one rank that exhausted inputs. "
"Throwing across all ranks."
+ )
with self.assertRaisesRegex(
- RuntimeError,
- expected_msg
+ RuntimeError, expected_msg
) if throw_on_early_termination else contextlib.nullcontext():
with Join(
allreducers,
enable=enable,
throw_on_early_termination=throw_on_early_termination,
num_allreduces=num_allreduces,
- run_post_hooks=run_post_hooks
+ run_post_hooks=run_post_hooks,
):
for _ in inputs:
for allreducer in allreducers:
@@ -275,9 +275,7 @@ class TestJoin(MultiProcessTestCase):
for allreducer in allreducers:
self.assertEqual(allreducer.post_hook_tensor.item(), AFTER_CONSTANT)
- @require_n_gpus_for_nccl_backend(
- WORLD_SIZE, BACKEND
- )
+ @require_n_gpus_for_nccl_backend(WORLD_SIZE, BACKEND)
def test_single_joinable_main_hooks(self):
r"""Tests the main hooks of a single :class:`Joinable`."""
num_joinables = 1
@@ -298,12 +296,10 @@ class TestJoin(MultiProcessTestCase):
throw_on_early_termination=False,
num_allreduces=num_allreduces,
run_post_hooks=run_post_hooks,
- expected_total=expected_total
+ expected_total=expected_total,
)
- @require_n_gpus_for_nccl_backend(
- WORLD_SIZE, BACKEND
- )
+ @require_n_gpus_for_nccl_backend(WORLD_SIZE, BACKEND)
def test_single_joinable_post_hooks(self):
r"""Tests the post-hooks of a single :class:`Joinable`."""
num_joinables = 1
@@ -317,12 +313,10 @@ class TestJoin(MultiProcessTestCase):
throw_on_early_termination=False,
num_allreduces=num_allreduces,
run_post_hooks=run_post_hooks,
- expected_total=None
+ expected_total=None,
)
- @require_n_gpus_for_nccl_backend(
- WORLD_SIZE, BACKEND
- )
+ @require_n_gpus_for_nccl_backend(WORLD_SIZE, BACKEND)
def test_single_joinable(self):
r"""
Tests the main hooks and post-hooks of a single :class:`Joinable`
@@ -347,12 +341,10 @@ class TestJoin(MultiProcessTestCase):
throw_on_early_termination=False,
num_allreduces=num_allreduces,
run_post_hooks=run_post_hooks,
- expected_total=expected_total
+ expected_total=expected_total,
)
- @require_n_gpus_for_nccl_backend(
- WORLD_SIZE, BACKEND
- )
+ @require_n_gpus_for_nccl_backend(WORLD_SIZE, BACKEND)
def test_multiple_joinables(self):
r"""
Tests the main hooks and post-hooks of multiple :class:`Joinable` s
@@ -378,12 +370,10 @@ class TestJoin(MultiProcessTestCase):
throw_on_early_termination=False,
num_allreduces=num_allreduces,
run_post_hooks=run_post_hooks,
- expected_total=expected_total
+ expected_total=expected_total,
)
- @require_n_gpus_for_nccl_backend(
- WORLD_SIZE, BACKEND
- )
+ @require_n_gpus_for_nccl_backend(WORLD_SIZE, BACKEND)
def test_single_joinable_disable(self):
r"""Tests ``enable=False`` for a single :class:`Joinable`."""
num_joinables = 1
@@ -401,12 +391,10 @@ class TestJoin(MultiProcessTestCase):
throw_on_early_termination=False,
num_allreduces=num_allreduces,
run_post_hooks=run_post_hooks,
- expected_total=expected_total
+ expected_total=expected_total,
)
- @require_n_gpus_for_nccl_backend(
- WORLD_SIZE, BACKEND
- )
+ @require_n_gpus_for_nccl_backend(WORLD_SIZE, BACKEND)
def test_multiple_joinable_disable(self):
r"""
Tests ``enable=False`` for multiple :class:`Joinable` s.
@@ -429,12 +417,10 @@ class TestJoin(MultiProcessTestCase):
throw_on_early_termination=False,
num_allreduces=num_allreduces,
run_post_hooks=run_post_hooks,
- expected_total=expected_total
+ expected_total=expected_total,
)
- @require_n_gpus_for_nccl_backend(
- WORLD_SIZE, BACKEND
- )
+ @require_n_gpus_for_nccl_backend(WORLD_SIZE, BACKEND)
def test_single_joinable_throw(self):
r"""
Tests ``throw_on_early_termination=True`` for a single
@@ -452,12 +438,10 @@ class TestJoin(MultiProcessTestCase):
throw_on_early_termination=throw_on_early_termination,
num_allreduces=num_allreduces,
run_post_hooks=run_post_hooks,
- expected_total=None
+ expected_total=None,
)
- @require_n_gpus_for_nccl_backend(
- WORLD_SIZE, BACKEND
- )
+ @require_n_gpus_for_nccl_backend(WORLD_SIZE, BACKEND)
def test_multiple_joinables_throw(self):
r"""
Tests ``throw_on_early_termination=True`` for multiple
@@ -478,12 +462,10 @@ class TestJoin(MultiProcessTestCase):
throw_on_early_termination=throw_on_early_termination,
num_allreduces=num_allreduces,
run_post_hooks=run_post_hooks,
- expected_total=None
+ expected_total=None,
)
- @require_n_gpus_for_nccl_backend(
- WORLD_SIZE, BACKEND
- )
+ @require_n_gpus_for_nccl_backend(WORLD_SIZE, BACKEND)
def test_join_kwargs(self):
r"""
Tests passing keyword arguments to the context manager.
@@ -505,8 +487,9 @@ class TestJoin(MultiProcessTestCase):
throw_on_early_termination=False,
num_allreduces=num_allreduces,
run_post_hooks=run_post_hooks,
- expected_total=expected_total
+ expected_total=expected_total,
)
+
if __name__ == "__main__":
run_tests()
diff --git a/test/distributed/elastic/agent/server/test/local_elastic_agent_test.py b/test/distributed/elastic/agent/server/test/local_elastic_agent_test.py
index 038f4cdcec..2d64b90515 100644
--- a/test/distributed/elastic/agent/server/test/local_elastic_agent_test.py
+++ b/test/distributed/elastic/agent/server/test/local_elastic_agent_test.py
@@ -413,8 +413,7 @@ class LocalElasticAgentTest(unittest.TestCase):
"max_restarts": 0,
"exit_barrier_timeout": exit_barrier_timeout,
"is_host": node_idx == 0,
- "log_line_prefix_template": log_line_prefix_template
-
+ "log_line_prefix_template": log_line_prefix_template,
}
p = mp.Process(target=self.run_agent, kwargs=run_agent_args)
procs.append(p)
@@ -454,15 +453,21 @@ class LocalElasticAgentTest(unittest.TestCase):
self.assertIsInstance(return_value, torch.Tensor)
self.assertEqual((100, 100), return_value.shape)
- @skip_but_pass_in_sandcastle_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
+ @skip_but_pass_in_sandcastle_if(
+ TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
+ )
def test_dummy_compute_c10d(self):
self.run_test_with_backend(backend="c10d", test_to_run=self.dummy_compute)
- @skip_but_pass_in_sandcastle_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
+ @skip_but_pass_in_sandcastle_if(
+ TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
+ )
def test_dummy_compute_etcd(self):
self.run_test_with_backend(backend="etcd", test_to_run=self.dummy_compute)
- @skip_but_pass_in_sandcastle_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
+ @skip_but_pass_in_sandcastle_if(
+ TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
+ )
def test_dummy_compute_etcd_v2(self):
self.run_test_with_backend(backend="etcd-v2", test_to_run=self.dummy_compute)
@@ -472,15 +477,21 @@ class LocalElasticAgentTest(unittest.TestCase):
self.assertIsNone(res.return_values[0])
self.assertIsNone(res.return_values[1])
- @skip_but_pass_in_sandcastle_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
+ @skip_but_pass_in_sandcastle_if(
+ TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
+ )
def test_run_happy_function_c10d(self):
self.run_test_with_backend(backend="c10d", test_to_run=self.run_happy_function)
- @skip_but_pass_in_sandcastle_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
+ @skip_but_pass_in_sandcastle_if(
+ TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
+ )
def test_run_happy_function_etcd(self):
self.run_test_with_backend(backend="etcd", test_to_run=self.run_happy_function)
- @skip_but_pass_in_sandcastle_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
+ @skip_but_pass_in_sandcastle_if(
+ TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
+ )
def test_run_happy_function_etcd_v2(self):
self.run_test_with_backend(
backend="etcd-v2", test_to_run=self.run_happy_function
@@ -501,13 +512,17 @@ class LocalElasticAgentTest(unittest.TestCase):
self.assertFalse(res.is_failed())
self.assertIsNone(res.return_values[0])
- @skip_but_pass_in_sandcastle_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
+ @skip_but_pass_in_sandcastle_if(
+ TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
+ )
def test_check_master_addr_port_override_etcd(self):
self.run_test_with_backend(
backend="etcd", test_to_run=self.check_master_addr_port_override
)
- @skip_but_pass_in_sandcastle_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
+ @skip_but_pass_in_sandcastle_if(
+ TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
+ )
def test_check_master_addr_port_override_etcd_v2(self):
self.run_test_with_backend(
backend="etcd-v2", test_to_run=self.check_master_addr_port_override
@@ -548,7 +563,11 @@ class LocalElasticAgentTest(unittest.TestCase):
watchdog_file_path = "/tmp/watchdog_timer_" + str(uuid.uuid4())
os.environ[watchdog_env_name] = watchdog_file_path
# Run the agent
- node_conf = Conf(entrypoint=_check_local_watchdog_setup, local_world_size=1, args=(TORCHELASTIC_TIMER_FILE, True))
+ node_conf = Conf(
+ entrypoint=_check_local_watchdog_setup,
+ local_world_size=1,
+ args=(TORCHELASTIC_TIMER_FILE, True),
+ )
spec = self.get_worker_spec(node_conf, max_restarts=2)
agent = self.get_agent(spec, node_config=node_conf)
res = agent.run()
@@ -560,31 +579,43 @@ class LocalElasticAgentTest(unittest.TestCase):
if watchdog_env_name in os.environ:
del os.environ[watchdog_env_name]
# Run the agent
- node_conf = Conf(entrypoint=_check_local_watchdog_setup, local_world_size=1, args=(TORCHELASTIC_TIMER_FILE, False))
+ node_conf = Conf(
+ entrypoint=_check_local_watchdog_setup,
+ local_world_size=1,
+ args=(TORCHELASTIC_TIMER_FILE, False),
+ )
spec = self.get_worker_spec(node_conf, max_restarts=2)
agent = self.get_agent(spec, node_config=node_conf)
res = agent.run()
self.assertFalse(res.is_failed())
- @skip_but_pass_in_sandcastle_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
+ @skip_but_pass_in_sandcastle_if(
+ TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
+ )
def test_run_agent_local_watchdog_setup_enabled_etcd(self):
self.run_test_with_backend(
backend="etcd", test_to_run=self.run_agent_local_watchdog_setup_enabled
)
- @skip_but_pass_in_sandcastle_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
+ @skip_but_pass_in_sandcastle_if(
+ TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
+ )
def test_run_agent_local_watchdog_setup_enabled_c10d(self):
self.run_test_with_backend(
backend="c10d", test_to_run=self.run_agent_local_watchdog_setup_enabled
)
- @skip_but_pass_in_sandcastle_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
+ @skip_but_pass_in_sandcastle_if(
+ TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
+ )
def test_run_agent_local_watchdog_setup_disabled_etcd(self):
self.run_test_with_backend(
backend="etcd", test_to_run=self.run_agent_local_watchdog_setup_disabled
)
- @skip_but_pass_in_sandcastle_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
+ @skip_but_pass_in_sandcastle_if(
+ TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
+ )
def test_run_agent_local_watchdog_setup_disabled_c10d(self):
self.run_test_with_backend(
backend="c10d", test_to_run=self.run_agent_local_watchdog_setup_disabled
@@ -595,7 +626,11 @@ class LocalElasticAgentTest(unittest.TestCase):
healthcheck_port_env_name = TORCHELASTIC_HEALTH_CHECK_PORT
os.environ[healthcheck_port_env_name] = "12345"
# Run the agent
- node_conf = Conf(entrypoint=_check_local_watchdog_setup, local_world_size=1, args=(TORCHELASTIC_HEALTH_CHECK_PORT, True))
+ node_conf = Conf(
+ entrypoint=_check_local_watchdog_setup,
+ local_world_size=1,
+ args=(TORCHELASTIC_HEALTH_CHECK_PORT, True),
+ )
spec = self.get_worker_spec(node_conf, max_restarts=2)
agent = self.get_agent(spec, node_config=node_conf)
res = agent.run()
@@ -607,49 +642,67 @@ class LocalElasticAgentTest(unittest.TestCase):
if healthcheck_port_env_name in os.environ:
del os.environ[healthcheck_port_env_name]
# Run the agent
- node_conf = Conf(entrypoint=_check_local_watchdog_setup, local_world_size=1, args=(TORCHELASTIC_HEALTH_CHECK_PORT, False))
+ node_conf = Conf(
+ entrypoint=_check_local_watchdog_setup,
+ local_world_size=1,
+ args=(TORCHELASTIC_HEALTH_CHECK_PORT, False),
+ )
spec = self.get_worker_spec(node_conf, max_restarts=2)
agent = self.get_agent(spec, node_config=node_conf)
res = agent.run()
self.assertFalse(res.is_failed())
- @skip_but_pass_in_sandcastle_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
+ @skip_but_pass_in_sandcastle_if(
+ TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
+ )
def test_run_agent_healthcheck_setup_enabled_etcd(self):
self.run_test_with_backend(
backend="etcd", test_to_run=self.run_agent_healthcheck_setup_enabled
)
- @skip_but_pass_in_sandcastle_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
+ @skip_but_pass_in_sandcastle_if(
+ TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
+ )
def test_run_agent_healthcheck_setup_enabled_c10d(self):
self.run_test_with_backend(
backend="c10d", test_to_run=self.run_agent_healthcheck_setup_enabled
)
- @skip_but_pass_in_sandcastle_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
+ @skip_but_pass_in_sandcastle_if(
+ TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
+ )
def test_run_agent_healthcheck_setup_disabled_etcd(self):
self.run_test_with_backend(
backend="etcd", test_to_run=self.run_agent_healthcheck_setup_disabled
)
- @skip_but_pass_in_sandcastle_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
+ @skip_but_pass_in_sandcastle_if(
+ TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
+ )
def test_run_agent_healthcheck_setup_disabled_c10d(self):
self.run_test_with_backend(
backend="c10d", test_to_run=self.run_agent_healthcheck_setup_disabled
)
- @skip_but_pass_in_sandcastle_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
+ @skip_but_pass_in_sandcastle_if(
+ TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
+ )
def test_run_check_env_function_etcd(self):
self.run_test_with_backend(
backend="etcd", test_to_run=self.run_check_env_function
)
- @skip_but_pass_in_sandcastle_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
+ @skip_but_pass_in_sandcastle_if(
+ TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
+ )
def test_run_check_nccl_async_error_handling_env_c10d(self):
self.run_test_with_backend(
backend="c10d", test_to_run=self.run_check_nccl_async_error_handling_env
)
- @skip_but_pass_in_sandcastle_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
+ @skip_but_pass_in_sandcastle_if(
+ TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
+ )
def test_run_check_nccl_async_error_handling_env_default_c10d(self):
self.run_test_with_backend(
backend="c10d",
@@ -662,19 +715,25 @@ class LocalElasticAgentTest(unittest.TestCase):
self.assertEqual("foo", res.return_values[0])
self.assertEqual("foo", res.return_values[1])
- @skip_but_pass_in_sandcastle_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
+ @skip_but_pass_in_sandcastle_if(
+ TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
+ )
def test_run_function_with_return_value_c10d(self):
self.run_test_with_backend(
backend="c10d", test_to_run=self.run_function_with_return_value
)
- @skip_but_pass_in_sandcastle_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
+ @skip_but_pass_in_sandcastle_if(
+ TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
+ )
def test_run_function_with_return_value_etcd(self):
self.run_test_with_backend(
backend="etcd", test_to_run=self.run_function_with_return_value
)
- @skip_but_pass_in_sandcastle_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
+ @skip_but_pass_in_sandcastle_if(
+ TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
+ )
def test_run_function_with_return_value_etcd_v2(self):
self.run_test_with_backend(
backend="etcd-v2", test_to_run=self.run_function_with_return_value
@@ -685,19 +744,27 @@ class LocalElasticAgentTest(unittest.TestCase):
self.assertFalse(res.is_failed())
# _dist_sum internally checks that the sum computed is valid
- @skip_but_pass_in_sandcastle_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
+ @skip_but_pass_in_sandcastle_if(
+ TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
+ )
def test_simple_dist_sum_c10d(self):
self.run_test_with_backend(backend="c10d", test_to_run=self.simple_dist_sum)
- @skip_but_pass_in_sandcastle_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
+ @skip_but_pass_in_sandcastle_if(
+ TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
+ )
def test_simple_dist_sum_etcd(self):
self.run_test_with_backend(backend="etcd", test_to_run=self.simple_dist_sum)
- @skip_but_pass_in_sandcastle_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
+ @skip_but_pass_in_sandcastle_if(
+ TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
+ )
def test_simple_dist_sum_etcd_v2(self):
self.run_test_with_backend(backend="etcd-v2", test_to_run=self.simple_dist_sum)
- def run_distributed_sum_homogeneous(self, log_line_prefix_template: Optional[str] = None):
+ def run_distributed_sum_homogeneous(
+ self, log_line_prefix_template: Optional[str] = None
+ ):
node_configs = [
Conf(role="sum", entrypoint=_dist_sum, local_world_size=4, tee=Std.ALL),
Conf(role="sum", entrypoint=_dist_sum, local_world_size=4, tee=Std.ALL),
@@ -706,7 +773,9 @@ class LocalElasticAgentTest(unittest.TestCase):
# due to getting stuck on the _dist_sum in waiting for TCPStore workers
# to join the cluster
# TODO(aivanou): t83447589 come up with the proper fix
- res = self.run_job(node_configs, log_line_prefix_template=log_line_prefix_template)
+ res = self.run_job(
+ node_configs, log_line_prefix_template=log_line_prefix_template
+ )
self.assertEqual(2, len(res["sum"]))
ranks = set()
for run_results in res["sum"]:
@@ -727,10 +796,11 @@ class LocalElasticAgentTest(unittest.TestCase):
log_line_prefix_template = "[${role_name}-${local_rank}:${rank}]:"
self.run_test_with_backend(
backend="c10d",
- test_to_run=lambda: self.run_distributed_sum_homogeneous(log_line_prefix_template)
+ test_to_run=lambda: self.run_distributed_sum_homogeneous(
+ log_line_prefix_template
+ ),
)
-
@unittest.skipIf(
TEST_WITH_DEV_DBG_ASAN or TEST_WITH_TSAN,
"test incompatible with dev/dbg asan or tsan",
@@ -770,19 +840,25 @@ class LocalElasticAgentTest(unittest.TestCase):
ranks.update(run_results.return_values.keys())
self.assertSetEqual(set(range(1 + 2 + 3)), ranks)
- @skip_but_pass_in_sandcastle_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
+ @skip_but_pass_in_sandcastle_if(
+ TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
+ )
def test_run_distributed_sum_heterogeneous_c10d(self):
self.run_test_with_backend(
backend="c10d", test_to_run=self.run_distributed_sum_heterogeneous
)
- @skip_but_pass_in_sandcastle_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
+ @skip_but_pass_in_sandcastle_if(
+ TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
+ )
def test_run_distributed_sum_heterogeneous_etcd(self):
self.run_test_with_backend(
backend="etcd", test_to_run=self.run_distributed_sum_heterogeneous
)
- @skip_but_pass_in_sandcastle_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
+ @skip_but_pass_in_sandcastle_if(
+ TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
+ )
def test_run_distributed_sum_heterogeneous_etcd_v2(self):
self.run_test_with_backend(
backend="etcd-v2", test_to_run=self.run_distributed_sum_heterogeneous
@@ -809,15 +885,21 @@ class LocalElasticAgentTest(unittest.TestCase):
self.assertEqual(data["message"], failure_data["message"])
self.assertEqual(int(data["extraInfo"]["timestamp"]), failure.timestamp)
- @skip_but_pass_in_sandcastle_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
+ @skip_but_pass_in_sandcastle_if(
+ TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
+ )
def test_run_sad_function_c10d(self):
self.run_test_with_backend(backend="c10d", test_to_run=self.run_sad_function)
- @skip_but_pass_in_sandcastle_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
+ @skip_but_pass_in_sandcastle_if(
+ TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
+ )
def test_run_sad_function_etcd(self):
self.run_test_with_backend(backend="etcd", test_to_run=self.run_sad_function)
- @skip_but_pass_in_sandcastle_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
+ @skip_but_pass_in_sandcastle_if(
+ TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
+ )
def test_run_sad_function_etcd_v2(self):
self.run_test_with_backend(backend="etcd-v2", test_to_run=self.run_sad_function)
@@ -834,19 +916,25 @@ class LocalElasticAgentTest(unittest.TestCase):
self.assertEqual(WorkerState.FAILED, agent.get_worker_group().state)
self.assertTrue(agent._total_execution_time > 0)
- @skip_but_pass_in_sandcastle_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
+ @skip_but_pass_in_sandcastle_if(
+ TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
+ )
def test_run_bipolar_function_c10d(self):
self.run_test_with_backend(
backend="c10d", test_to_run=self.run_bipolar_function
)
- @skip_but_pass_in_sandcastle_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
+ @skip_but_pass_in_sandcastle_if(
+ TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
+ )
def test_run_bipolar_function_etcd(self):
self.run_test_with_backend(
backend="etcd", test_to_run=self.run_bipolar_function
)
- @skip_but_pass_in_sandcastle_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
+ @skip_but_pass_in_sandcastle_if(
+ TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
+ )
def test_run_bipolar_function_etcd_v2(self):
self.run_test_with_backend(
backend="etcd-v2", test_to_run=self.run_bipolar_function
@@ -1302,15 +1390,21 @@ class LocalElasticAgentTest(unittest.TestCase):
self.assertFalse(res.is_failed())
barrier_mock.assert_called_once()
- @skip_but_pass_in_sandcastle_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
+ @skip_but_pass_in_sandcastle_if(
+ TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
+ )
def test_barrier_failed_c10d(self):
self.run_test_with_backend(backend="c10d", test_to_run=self.barrier_failed)
- @skip_but_pass_in_sandcastle_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
+ @skip_but_pass_in_sandcastle_if(
+ TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
+ )
def test_barrier_failed_etcd(self):
self.run_test_with_backend(backend="etcd", test_to_run=self.barrier_failed)
- @skip_but_pass_in_sandcastle_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
+ @skip_but_pass_in_sandcastle_if(
+ TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
+ )
def test_barrier_failed_etcd_v2(self):
self.run_test_with_backend(backend="etcd-v2", test_to_run=self.barrier_failed)
@@ -1329,14 +1423,20 @@ class LocalElasticAgentTest(unittest.TestCase):
agent.run("worker")
pcontext_mock.close.assert_called_once()
- @skip_but_pass_in_sandcastle_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
+ @skip_but_pass_in_sandcastle_if(
+ TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
+ )
def test_shutdown_called_c10d(self):
self.run_test_with_backend(backend="c10d", test_to_run=self.shutdown_called)
- @skip_but_pass_in_sandcastle_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
+ @skip_but_pass_in_sandcastle_if(
+ TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
+ )
def test_shutdown_called_etcd(self):
self.run_test_with_backend(backend="etcd", test_to_run=self.shutdown_called)
- @skip_but_pass_in_sandcastle_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
+ @skip_but_pass_in_sandcastle_if(
+ TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
+ )
def test_shutdown_called_etcd_v2(self):
self.run_test_with_backend(backend="etcd-v2", test_to_run=self.shutdown_called)
diff --git a/test/distributed/elastic/events/lib_test.py b/test/distributed/elastic/events/lib_test.py
index 3a5fb694bf..63836c48c1 100644
--- a/test/distributed/elastic/events/lib_test.py
+++ b/test/distributed/elastic/events/lib_test.py
@@ -13,12 +13,12 @@ from dataclasses import asdict
from unittest.mock import patch
from torch.distributed.elastic.events import (
+ _get_or_create_logger,
+ construct_and_record_rdzv_event,
Event,
EventSource,
NodeState,
RdzvEvent,
- _get_or_create_logger,
- construct_and_record_rdzv_event,
)
from torch.testing._internal.common_utils import run_tests, TestCase
@@ -58,6 +58,7 @@ class EventLibTest(TestCase):
deser_event = Event.deserialize(json_event)
self.assert_event(event, deser_event)
+
class RdzvEventLibTest(TestCase):
@patch("torch.distributed.elastic.events.record_rdzv_event")
@patch("torch.distributed.elastic.events.get_logging_handler")
@@ -72,7 +73,9 @@ class RdzvEventLibTest(TestCase):
@patch("torch.distributed.elastic.events.record_rdzv_event")
@patch("torch.distributed.elastic.events.get_logging_handler")
- def test_construct_and_record_rdzv_event_does_not_run_if_invalid_dest(self, get_mock, record_mock):
+ def test_construct_and_record_rdzv_event_does_not_run_if_invalid_dest(
+ self, get_mock, record_mock
+ ):
get_mock.return_value = logging.NullHandler()
construct_and_record_rdzv_event(
run_id="test_run_id",
@@ -120,7 +123,6 @@ class RdzvEventLibTest(TestCase):
self.assertEqual(event.local_id, 4)
self.assertEqual(event.error_trace, "test_error_trace")
-
def test_rdzv_event_deserialize(self):
event = self.get_test_rdzv_event()
json_event = event.serialize()
diff --git a/test/distributed/elastic/metrics/api_test.py b/test/distributed/elastic/metrics/api_test.py
index 279a1b951f..e548cfb6f4 100644
--- a/test/distributed/elastic/metrics/api_test.py
+++ b/test/distributed/elastic/metrics/api_test.py
@@ -10,10 +10,10 @@ import abc
import unittest.mock as mock
from torch.distributed.elastic.metrics.api import (
+ _get_metric_name,
MetricData,
MetricHandler,
MetricStream,
- _get_metric_name,
prof,
)
from torch.testing._internal.common_utils import run_tests, TestCase
diff --git a/test/distributed/elastic/multiprocessing/api_test.py b/test/distributed/elastic/multiprocessing/api_test.py
index 6851db05c0..9658ed087a 100644
--- a/test/distributed/elastic/multiprocessing/api_test.py
+++ b/test/distributed/elastic/multiprocessing/api_test.py
@@ -22,13 +22,13 @@ import torch
import torch.multiprocessing as mp
from torch.distributed.elastic.multiprocessing import ProcessFailure, start_processes
from torch.distributed.elastic.multiprocessing.api import (
+ _validate_full_rank,
+ _wrap,
DefaultLogsSpecs,
MultiprocessContext,
RunProcsResult,
SignalException,
Std,
- _validate_full_rank,
- _wrap,
to_map,
)
from torch.distributed.elastic.multiprocessing.errors import ErrorHandler
@@ -37,13 +37,13 @@ from torch.testing._internal.common_utils import (
IS_MACOS,
IS_WINDOWS,
NO_MULTIPROCESSING_SPAWN,
+ run_tests,
+ skip_but_pass_in_sandcastle_if,
+ skip_if_pytest,
TEST_WITH_ASAN,
TEST_WITH_DEV_DBG_ASAN,
TEST_WITH_TSAN,
TestCase,
- run_tests,
- skip_but_pass_in_sandcastle_if,
- skip_if_pytest,
)
@@ -67,7 +67,6 @@ class RunProcResultsTest(TestCase):
self.assertTrue(pr_fail.is_failed())
def test_get_failures(self):
-
error_file0 = os.path.join(self.test_dir, "error0.json")
error_file1 = os.path.join(self.test_dir, "error1.json")
eh = ErrorHandler()
@@ -276,7 +275,7 @@ if not (TEST_WITH_DEV_DBG_ASAN or IS_WINDOWS or IS_MACOS):
not_a_dir.name: NotADirectoryError,
}
- for (log_dir, expected_error) in cases.items():
+ for log_dir, expected_error in cases.items():
with self.subTest(log_dir=log_dir, expected_error=expected_error):
with self.assertRaises(expected_error):
pc = None
@@ -292,7 +291,6 @@ if not (TEST_WITH_DEV_DBG_ASAN or IS_WINDOWS or IS_MACOS):
if pc:
pc.close()
-
def test_args_env_len_mismatch(self):
cases = [
# 1 x args; 2 x envs
@@ -396,7 +394,9 @@ if not (TEST_WITH_DEV_DBG_ASAN or IS_WINDOWS or IS_MACOS):
results = pc.wait(period=0.1)
self.assertEqual({0: None, 1: None}, results.return_values)
- @skip_but_pass_in_sandcastle_if(TEST_WITH_DEV_DBG_ASAN, "tests incompatible with asan")
+ @skip_but_pass_in_sandcastle_if(
+ TEST_WITH_DEV_DBG_ASAN, "tests incompatible with asan"
+ )
def test_function_large_ret_val(self):
# python multiprocessing.queue module uses pipes and actually PipedQueues
# This means that if a single object is greater than a pipe size
@@ -435,7 +435,8 @@ if not (TEST_WITH_DEV_DBG_ASAN or IS_WINDOWS or IS_MACOS):
args={0: ("hello", RAISE), 1: ("world",)},
envs={
0: {"TORCHELASTIC_RUN_ID": "run_id"},
- 1: {"TORCHELASTIC_RUN_ID": "run_id"}},
+ 1: {"TORCHELASTIC_RUN_ID": "run_id"},
+ },
logs_specs=DefaultLogsSpecs(log_dir=log_dir),
start_method=start_method,
)
@@ -453,7 +454,9 @@ if not (TEST_WITH_DEV_DBG_ASAN or IS_WINDOWS or IS_MACOS):
self.assertEqual(1, failure.exitcode)
self.assertEqual("<N/A>", failure.signal_name())
self.assertEqual(pc.pids()[0], failure.pid)
- self.assertTrue(error_file.startswith(os.path.join(log_dir, "run_id_")))
+ self.assertTrue(
+ error_file.startswith(os.path.join(log_dir, "run_id_"))
+ )
self.assertTrue(error_file.endswith("attempt_0/0/error.json"))
self.assertEqual(
int(error_file_data["message"]["extraInfo"]["timestamp"]),
@@ -541,9 +544,7 @@ if not (TEST_WITH_DEV_DBG_ASAN or IS_WINDOWS or IS_MACOS):
args={0: (0, 1)},
envs={0: {}},
logs_specs=DefaultLogsSpecs(
- log_dir=self.log_dir(),
- redirects=Std.ALL,
- tee=Std.ALL
+ log_dir=self.log_dir(), redirects=Std.ALL, tee=Std.ALL
),
start_method="spawn",
)
diff --git a/test/distributed/elastic/multiprocessing/bin/echo3.py b/test/distributed/elastic/multiprocessing/bin/echo3.py
index b07f4714b2..ebad725468 100755
--- a/test/distributed/elastic/multiprocessing/bin/echo3.py
+++ b/test/distributed/elastic/multiprocessing/bin/echo3.py
@@ -24,5 +24,4 @@ if __name__ == "__main__":
if args.segfault:
ctypes.string_at(0)
else:
-
print(f"{args.msg} from {rank}")
diff --git a/test/distributed/elastic/multiprocessing/tail_log_test.py b/test/distributed/elastic/multiprocessing/tail_log_test.py
index 2f4a4cc87f..6ead06dbe0 100644
--- a/test/distributed/elastic/multiprocessing/tail_log_test.py
+++ b/test/distributed/elastic/multiprocessing/tail_log_test.py
@@ -53,7 +53,9 @@ class TailLogTest(unittest.TestCase):
}
dst = io.StringIO()
- tail = TailLog(name="writer", log_files=log_files, dst=dst, interval_sec=interval_sec).start()
+ tail = TailLog(
+ name="writer", log_files=log_files, dst=dst, interval_sec=interval_sec
+ ).start()
# sleep here is intentional to ensure that the log tail
# can gracefully handle and wait for non-existent log files
time.sleep(interval_sec * 10)
@@ -130,7 +132,6 @@ class TailLogTest(unittest.TestCase):
self.assertIn(f"[worker{i}][{i}]", headers)
self.assertTrue(tail.stopped())
-
def test_tail_no_files(self):
"""
Ensures that the log tail can gracefully handle no log files
diff --git a/test/distributed/elastic/rendezvous/api_test.py b/test/distributed/elastic/rendezvous/api_test.py
index 40567857df..b9287546b3 100644
--- a/test/distributed/elastic/rendezvous/api_test.py
+++ b/test/distributed/elastic/rendezvous/api_test.py
@@ -6,7 +6,7 @@
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
-from typing import Any, Dict, SupportsInt, Tuple, cast
+from typing import Any, cast, Dict, SupportsInt, Tuple
from unittest import TestCase
from torch.distributed import Store
@@ -170,7 +170,9 @@ class RendezvousParametersTest(TestCase):
params = self._create_params()
- self.assertEqual(params.get_as_int("dummy_param"), int(cast(SupportsInt, value)))
+ self.assertEqual(
+ params.get_as_int("dummy_param"), int(cast(SupportsInt, value))
+ )
def test_get_as_int_raises_error_if_value_is_invalid(self) -> None:
for value in ["a", "0a", "3b", "abc"]:
@@ -233,7 +235,9 @@ class RendezvousHandlerRegistryTest(TestCase):
self._registry.register("dummy_backend", self._create_handler)
self._registry.register("dummy_backend", self._create_handler)
- def test_register_raises_error_if_called_twice_with_different_creators(self) -> None:
+ def test_register_raises_error_if_called_twice_with_different_creators(
+ self,
+ ) -> None:
self._registry.register("dummy_backend", self._create_handler)
other_create_handler = lambda p: _DummyRendezvousHandler(p) # noqa: E731
diff --git a/test/distributed/elastic/rendezvous/c10d_rendezvous_backend_test.py b/test/distributed/elastic/rendezvous/c10d_rendezvous_backend_test.py
index e31b0f9439..5ebeb00b2f 100644
--- a/test/distributed/elastic/rendezvous/c10d_rendezvous_backend_test.py
+++ b/test/distributed/elastic/rendezvous/c10d_rendezvous_backend_test.py
@@ -11,22 +11,23 @@ import tempfile
from base64 import b64encode
from datetime import timedelta
-from typing import ClassVar, cast, Callable
-from unittest import TestCase, mock
+from typing import Callable, cast, ClassVar
+from unittest import mock, TestCase
-from torch.distributed import TCPStore, FileStore
+from rendezvous_backend_test import RendezvousBackendTestMixin
+
+from torch.distributed import FileStore, TCPStore
from torch.distributed.elastic.rendezvous import (
RendezvousConnectionError,
+ RendezvousError,
RendezvousParameters,
- RendezvousError)
+)
from torch.distributed.elastic.rendezvous.c10d_rendezvous_backend import (
C10dRendezvousBackend,
create_backend,
)
-from rendezvous_backend_test import RendezvousBackendTestMixin
-
class TCPStoreBackendTest(TestCase, RendezvousBackendTestMixin):
_store: ClassVar[TCPStore]
@@ -44,6 +45,7 @@ class TCPStoreBackendTest(TestCase, RendezvousBackendTestMixin):
def _corrupt_state(self) -> None:
self._store.set("torch.rendezvous.dummy_run_id", "non_base64")
+
class FileStoreBackendTest(TestCase, RendezvousBackendTestMixin):
_store: ClassVar[FileStore]
@@ -102,7 +104,6 @@ class CreateBackendTest(TestCase):
def tearDown(self) -> None:
os.remove(self._expected_endpoint_file)
-
def _run_test_with_store(self, store_type: str, test_to_run: Callable):
"""
Use this function to specify the store type to use in a test. If
@@ -124,10 +125,10 @@ class CreateBackendTest(TestCase):
typecast_store = cast(self._expected_store_type, store)
self.assertEqual(typecast_store.timeout, self._expected_read_timeout) # type: ignore[attr-defined]
- if (self._expected_store_type == TCPStore):
+ if self._expected_store_type == TCPStore:
self.assertEqual(typecast_store.host, self._expected_endpoint_host) # type: ignore[attr-defined]
self.assertEqual(typecast_store.port, self._expected_endpoint_port) # type: ignore[attr-defined]
- if (self._expected_store_type == FileStore):
+ if self._expected_store_type == FileStore:
if self._params.endpoint:
self.assertEqual(typecast_store.path, self._expected_endpoint_file) # type: ignore[attr-defined]
else:
@@ -142,7 +143,9 @@ class CreateBackendTest(TestCase):
def test_create_backend_returns_backend(self) -> None:
for store_type in ["tcp", "file"]:
with self.subTest(store_type=store_type):
- self._run_test_with_store(store_type, self._assert_create_backend_returns_backend)
+ self._run_test_with_store(
+ store_type, self._assert_create_backend_returns_backend
+ )
def test_create_backend_returns_backend_if_is_host_is_false(self) -> None:
store = TCPStore( # type: ignore[call-arg] # noqa: F841
@@ -169,28 +172,36 @@ class CreateBackendTest(TestCase):
self._assert_create_backend_returns_backend()
- def test_create_backend_returns_backend_if_endpoint_port_is_not_specified(self) -> None:
+ def test_create_backend_returns_backend_if_endpoint_port_is_not_specified(
+ self,
+ ) -> None:
self._params.endpoint = self._expected_endpoint_host
self._expected_endpoint_port = 29400
self._assert_create_backend_returns_backend()
- def test_create_backend_returns_backend_if_endpoint_file_is_not_specified(self) -> None:
+ def test_create_backend_returns_backend_if_endpoint_file_is_not_specified(
+ self,
+ ) -> None:
self._params_filestore.endpoint = ""
self._run_test_with_store("file", self._assert_create_backend_returns_backend)
- def test_create_backend_returns_backend_if_store_type_is_not_specified(self) -> None:
+ def test_create_backend_returns_backend_if_store_type_is_not_specified(
+ self,
+ ) -> None:
del self._params.config["store_type"]
self._expected_store_type = TCPStore
- if (not self._params.get("read_timeout")):
+ if not self._params.get("read_timeout"):
self._expected_read_timeout = timedelta(seconds=60)
self._assert_create_backend_returns_backend()
- def test_create_backend_returns_backend_if_read_timeout_is_not_specified(self) -> None:
+ def test_create_backend_returns_backend_if_read_timeout_is_not_specified(
+ self,
+ ) -> None:
del self._params.config["read_timeout"]
self._expected_read_timeout = timedelta(seconds=60)
@@ -198,13 +209,11 @@ class CreateBackendTest(TestCase):
self._assert_create_backend_returns_backend()
def test_create_backend_returns_backend_with_libuv(self) -> None:
-
self._params.config["use_libuv"] = "true"
self._assert_create_backend_returns_backend()
def test_create_backend_returns_backend_without_libuv(self) -> None:
-
self._params.config["use_libuv"] = "false"
self._assert_create_backend_returns_backend()
@@ -237,7 +246,8 @@ class CreateBackendTest(TestCase):
self._params.config["store_type"] = "dummy_store_type"
with self.assertRaisesRegex(
- ValueError, r"^Invalid store type given. Currently only supports file and tcp.$"
+ ValueError,
+ r"^Invalid store type given. Currently only supports file and tcp.$",
):
create_backend(self._params)
@@ -252,18 +262,24 @@ class CreateBackendTest(TestCase):
create_backend(self._params)
@mock.patch("tempfile.mkstemp")
- def test_create_backend_raises_error_if_tempfile_creation_fails(self, tempfile_mock) -> None:
+ def test_create_backend_raises_error_if_tempfile_creation_fails(
+ self, tempfile_mock
+ ) -> None:
tempfile_mock.side_effect = OSError("test error")
# Set the endpoint to empty so it defaults to creating a temp file
self._params_filestore.endpoint = ""
with self.assertRaisesRegex(
RendezvousError,
- r"The file creation for C10d store has failed. See inner exception for details."
+ r"The file creation for C10d store has failed. See inner exception for details.",
):
create_backend(self._params_filestore)
- @mock.patch("torch.distributed.elastic.rendezvous.c10d_rendezvous_backend.FileStore")
- def test_create_backend_raises_error_if_file_path_is_invalid(self, filestore_mock) -> None:
+ @mock.patch(
+ "torch.distributed.elastic.rendezvous.c10d_rendezvous_backend.FileStore"
+ )
+ def test_create_backend_raises_error_if_file_path_is_invalid(
+ self, filestore_mock
+ ) -> None:
filestore_mock.side_effect = RuntimeError("test error")
self._params_filestore.endpoint = "bad file path"
with self.assertRaisesRegex(
diff --git a/test/distributed/elastic/rendezvous/dynamic_rendezvous_test.py b/test/distributed/elastic/rendezvous/dynamic_rendezvous_test.py
index 3713290e48..0772ca5135 100644
--- a/test/distributed/elastic/rendezvous/dynamic_rendezvous_test.py
+++ b/test/distributed/elastic/rendezvous/dynamic_rendezvous_test.py
@@ -52,7 +52,9 @@ from torch.distributed.elastic.rendezvous.dynamic_rendezvous import (
class CustomAssertMixin:
assertDictEqual: Callable
- def assert_state_equal(self, actual: _RendezvousState, expected: _RendezvousState) -> None:
+ def assert_state_equal(
+ self, actual: _RendezvousState, expected: _RendezvousState
+ ) -> None:
self.assertDictEqual(vars(actual), vars(expected))
def assert_state_empty(self, actual: _RendezvousState) -> None:
@@ -87,7 +89,8 @@ class RendezvousTimeoutTest(TestCase):
for join_timeout in join_timeouts:
with self.subTest(join_timeout=join_timeout):
with self.assertRaisesRegex(
- ValueError, rf"^The join timeout \({join_timeout}\) must be positive.$"
+ ValueError,
+ rf"^The join timeout \({join_timeout}\) must be positive.$",
):
timeout = RendezvousTimeout(join_timeout)
@@ -143,8 +146,12 @@ class RendezvousStateTest(TestCase):
for num_nodes, max_byte_size in expected_max_sizes:
with self.subTest(num_nodes=num_nodes, max_byte_size=max_byte_size):
for i in range(num_nodes):
- node_running = _NodeDesc(f"dummy{i}.dummy1-dummy1-dummy1-dummy1.com", 12345, i)
- node_waiting = _NodeDesc(f"dummy{i}.dummy2-dummy2-dummy2-dummy2.com", 67890, i)
+ node_running = _NodeDesc(
+ f"dummy{i}.dummy1-dummy1-dummy1-dummy1.com", 12345, i
+ )
+ node_waiting = _NodeDesc(
+ f"dummy{i}.dummy2-dummy2-dummy2-dummy2.com", 67890, i
+ )
state.participants[node_running] = i
@@ -269,7 +276,9 @@ class BackendRendezvousStateHolderTest(TestCase, CustomAssertMixin):
return state
def _create_state_holder(self) -> _BackendRendezvousStateHolder:
- return _BackendRendezvousStateHolder(self._backend, self._settings, self._cache_duration)
+ return _BackendRendezvousStateHolder(
+ self._backend, self._settings, self._cache_duration
+ )
def test_init_initializes_state_holder(self) -> None:
state_holder = self._create_state_holder()
@@ -361,7 +370,9 @@ class BackendRendezvousStateHolderTest(TestCase, CustomAssertMixin):
self._backend.set_state_internal(state)
- with patch("torch.distributed.elastic.rendezvous.dynamic_rendezvous.time") as mock_time:
+ with patch(
+ "torch.distributed.elastic.rendezvous.dynamic_rendezvous.time"
+ ) as mock_time:
for cache_duration in [1, 5, 10]:
with self.subTest(cache_duration=cache_duration):
self._cache_duration = cache_duration
@@ -397,7 +408,9 @@ class BackendRendezvousStateHolderTest(TestCase, CustomAssertMixin):
self._backend.set_state_internal(state)
- with patch("torch.distributed.elastic.rendezvous.dynamic_rendezvous.time") as mock_time:
+ with patch(
+ "torch.distributed.elastic.rendezvous.dynamic_rendezvous.time"
+ ) as mock_time:
self._cache_duration = 1
state_holder = self._create_state_holder()
@@ -568,7 +581,9 @@ class DistributedRendezvousOpExecutorTest(TestCase, CustomAssertMixin):
if settings is None:
settings = self._create_settings()
- return _DistributedRendezvousOpExecutor(self._node, self._state_holder, settings)
+ return _DistributedRendezvousOpExecutor(
+ self._node, self._state_holder, settings
+ )
def _run_action(self, action: _Action) -> None:
op_executor = self._create_op_executor()
@@ -644,14 +659,18 @@ class DistributedRendezvousOpExecutorTest(TestCase, CustomAssertMixin):
node = _NodeDesc(f"dummy{i}", 1, 1)
rank = i
else:
- node = _NodeDesc(f"dummy{num_participants - i - 1}", 1, 1) # Add in reverse.
+ node = _NodeDesc(
+ f"dummy{num_participants - i - 1}", 1, 1
+ ) # Add in reverse.
rank = 0
state.participants[node] = rank
state.last_heartbeats[node] = self._now
- def test_run_adds_to_participants_and_starts_last_call_if_min_nodes_is_reached(self) -> None:
+ def test_run_adds_to_participants_and_starts_last_call_if_min_nodes_is_reached(
+ self,
+ ) -> None:
for num_participants in range(3):
self._state = _RendezvousState()
@@ -817,12 +836,16 @@ class DistributedRendezvousOpExecutorTest(TestCase, CustomAssertMixin):
self.assertListEqual(self._mock_state_holder.mock_calls, [call.sync()])
def test_run_delays_execution_if_sync_requested(self) -> None:
- with patch("torch.distributed.elastic.rendezvous.dynamic_rendezvous._delay") as mock_delay:
+ with patch(
+ "torch.distributed.elastic.rendezvous.dynamic_rendezvous._delay"
+ ) as mock_delay:
self._run_action(_Action.SYNC)
mock_delay.assert_called_once_with(seconds=1)
- self.assertListEqual(self._mock_state_holder.mock_calls, [call.sync(), call.sync()])
+ self.assertListEqual(
+ self._mock_state_holder.mock_calls, [call.sync(), call.sync()]
+ )
class AbstractTestRendezvousOp(ABC):
@@ -850,7 +873,9 @@ class AbstractTestRendezvousOp(ABC):
mock_datetime = self._datetime_patch.start()
mock_datetime.utcnow.return_value = self._now
- self._time_patch = patch("torch.distributed.elastic.rendezvous.dynamic_rendezvous.time")
+ self._time_patch = patch(
+ "torch.distributed.elastic.rendezvous.dynamic_rendezvous.time"
+ )
mock_time = self._time_patch.start()
mock_time.monotonic.return_value = self._deadline
@@ -932,14 +957,18 @@ class TestRendezvousJoinOp(AbstractTestRendezvousOp, TestCase):
self._assert_action(expected_action)
- def test_treat_as_redundancy_for_next_rendezvous_if_rendezvous_is_complete(self) -> None:
+ def test_treat_as_redundancy_for_next_rendezvous_if_rendezvous_is_complete(
+ self,
+ ) -> None:
self._max_nodes = 1
self._state.complete = True
self._assert_action(_Action.ADD_TO_REDUNDANCY_LIST)
- def test_waits_next_round_if_rendezvous_is_complete_and_node_is_redundant(self) -> None:
+ def test_waits_next_round_if_rendezvous_is_complete_and_node_is_redundant(
+ self,
+ ) -> None:
self._state.redundancy_list.add(self._node)
self._max_nodes = 1
@@ -957,7 +986,9 @@ class TestRendezvousJoinOp(AbstractTestRendezvousOp, TestCase):
self._assert_action(_Action.REMOVE_FROM_REDUNDANCY_LIST)
- def test_waits_next_round_if_rendezvous_is_complete_and_node_is_in_wait_list(self) -> None:
+ def test_waits_next_round_if_rendezvous_is_complete_and_node_is_in_wait_list(
+ self,
+ ) -> None:
self._state.wait_list.add(self._node)
self._state.complete = True
@@ -999,14 +1030,18 @@ class TestRendezvousJoinOp(AbstractTestRendezvousOp, TestCase):
self._assert_action(_Action.ERROR_TIMEOUT)
- def test_raises_timeout_if_rollback_deadline_exceeded_and_node_is_participant(self) -> None:
+ def test_raises_timeout_if_rollback_deadline_exceeded_and_node_is_participant(
+ self,
+ ) -> None:
self._deadline = 0
self._state.participants[self._node] = 0
self._assert_action(_Action.ERROR_TIMEOUT)
- def test_raises_timeout_if_rollback_deadline_exceeded_and_node_is_in_wait_list(self) -> None:
+ def test_raises_timeout_if_rollback_deadline_exceeded_and_node_is_in_wait_list(
+ self,
+ ) -> None:
self._deadline = 0
self._state.wait_list.add(self._node)
@@ -1022,7 +1057,9 @@ class TestRendezvousJoinOp(AbstractTestRendezvousOp, TestCase):
self._assert_action(_Action.REMOVE_FROM_PARTICIPANTS)
- def test_removes_from_wait_list_if_timed_out_but_rollback_deadline_is_not_reached(self) -> None:
+ def test_removes_from_wait_list_if_timed_out_but_rollback_deadline_is_not_reached(
+ self,
+ ) -> None:
self._deadline = 5
self._state.wait_list.add(self._node)
@@ -1091,7 +1128,9 @@ class TestRendezvousKeepAliveOp(AbstractTestRendezvousOp, TestCase):
def test_finishes_if_no_keep_alive_update_is_needed(self) -> None:
delta = timedelta(seconds=1)
- self._state.last_heartbeats[self._node] = self._now - self._keep_alive_interval + delta
+ self._state.last_heartbeats[self._node] = (
+ self._now - self._keep_alive_interval + delta
+ )
self._assert_action(_Action.FINISH)
@@ -1176,7 +1215,9 @@ class DynamicRendezvousHandlerTest(TestCase):
_ = store.get("dummy_key")
- self._mock_store_get.assert_called_once_with("torch.rendezvous.dummy_run_id.0/dummy_key")
+ self._mock_store_get.assert_called_once_with(
+ "torch.rendezvous.dummy_run_id.0/dummy_key"
+ )
def test_next_rendezvous_respects_the_requested_timeout(self) -> None:
self._mock_sync.side_effect = lambda: time.sleep(0.3)
@@ -1489,7 +1530,9 @@ class CreateHandlerTest(TestCase):
self.assertEqual(handler.settings.min_nodes, self._params.min_nodes)
self.assertEqual(handler.settings.max_nodes, self._params.max_nodes)
self.assertEqual(handler.settings.timeout.join, self._expected_timeout.join)
- self.assertEqual(handler.settings.timeout.last_call, self._expected_timeout.last_call)
+ self.assertEqual(
+ handler.settings.timeout.last_call, self._expected_timeout.last_call
+ )
self.assertEqual(handler.settings.timeout.close, self._expected_timeout.close)
def test_create_handler_returns_handler_if_timeout_is_not_specified(self) -> None:
@@ -1516,6 +1559,7 @@ def _ignore_exception(exception_type: Exception, fn: Callable):
except exception_type as e:
pass
+
def _wait_for(condition, timeout=10, interval=1, name=None):
def _wait_while():
while True:
@@ -1523,18 +1567,21 @@ def _wait_for(condition, timeout=10, interval=1, name=None):
break
else:
time.sleep(interval)
+
wait_thread = threading.Thread(target=_wait_while, name=name)
wait_thread.start()
wait_thread.join(timeout=timeout)
-class _CapturingThread(threading.Thread):
+class _CapturingThread(threading.Thread):
def __init__(self, target=None, name=None, args=None, kwargs=None):
if args is None:
args = ()
if kwargs is None:
kwargs = {}
- threading.Thread.__init__(self, target=target, args=args, kwargs=kwargs, name=name)
+ threading.Thread.__init__(
+ self, target=target, args=args, kwargs=kwargs, name=name
+ )
self._result = None
def run(self):
@@ -1545,6 +1592,7 @@ class _CapturingThread(threading.Thread):
threading.Thread.join(self, *args)
return self._result
+
class IntegrationTest(TestCase):
def setUp(self) -> None:
self._store = DummyStore()
@@ -1602,7 +1650,8 @@ class IntegrationTest(TestCase):
handler2_thread = _CapturingThread(target=handler2.next_rendezvous)
handler3_thread = _CapturingThread(
target=_ignore_exception,
- args=(RendezvousTimeoutError, lambda: handler3.next_rendezvous()))
+ args=(RendezvousTimeoutError, lambda: handler3.next_rendezvous()),
+ )
handler1_thread.start()
handler2_thread.start()
@@ -1642,7 +1691,8 @@ class IntegrationTest(TestCase):
handler3_thread = _CapturingThread(
target=_ignore_exception,
- args=(RendezvousTimeoutError, lambda: handler3.next_rendezvous()))
+ args=(RendezvousTimeoutError, lambda: handler3.next_rendezvous()),
+ )
handler1_thread.start()
handler2_thread.start()
@@ -1657,8 +1707,12 @@ class IntegrationTest(TestCase):
handler2._stop_heartbeats()
- _wait_for(lambda: len(pickle.loads(self._backend.get_state()[0]).participants) == 1)
- _wait_for(lambda: len(pickle.loads(self._backend.get_state()[0]).wait_list) == 1)
+ _wait_for(
+ lambda: len(pickle.loads(self._backend.get_state()[0]).participants) == 1
+ )
+ _wait_for(
+ lambda: len(pickle.loads(self._backend.get_state()[0]).wait_list) == 1
+ )
class _InMemoryRendezvousBackend(RendezvousBackend):
diff --git a/test/distributed/elastic/rendezvous/etcd_rendezvous_backend_test.py b/test/distributed/elastic/rendezvous/etcd_rendezvous_backend_test.py
index a972ef01b2..55343bd080 100644
--- a/test/distributed/elastic/rendezvous/etcd_rendezvous_backend_test.py
+++ b/test/distributed/elastic/rendezvous/etcd_rendezvous_backend_test.py
@@ -8,21 +8,24 @@
import subprocess
from base64 import b64encode
-from typing import ClassVar, cast
+from typing import cast, ClassVar
from unittest import TestCase
from etcd import EtcdKeyNotFound # type: ignore[import]
-from torch.distributed.elastic.rendezvous import RendezvousConnectionError, RendezvousParameters
+from rendezvous_backend_test import RendezvousBackendTestMixin
+
+from torch.distributed.elastic.rendezvous import (
+ RendezvousConnectionError,
+ RendezvousParameters,
+)
from torch.distributed.elastic.rendezvous.etcd_rendezvous_backend import (
- EtcdRendezvousBackend,
create_backend,
+ EtcdRendezvousBackend,
)
from torch.distributed.elastic.rendezvous.etcd_server import EtcdServer
from torch.distributed.elastic.rendezvous.etcd_store import EtcdStore
-from rendezvous_backend_test import RendezvousBackendTestMixin
-
class EtcdRendezvousBackendTest(TestCase, RendezvousBackendTestMixin):
_server: ClassVar[EtcdServer]
@@ -45,7 +48,9 @@ class EtcdRendezvousBackendTest(TestCase, RendezvousBackendTestMixin):
except EtcdKeyNotFound:
pass
- self._backend = EtcdRendezvousBackend(self._client, "dummy_run_id", "/dummy_prefix")
+ self._backend = EtcdRendezvousBackend(
+ self._client, "dummy_run_id", "/dummy_prefix"
+ )
def _corrupt_state(self) -> None:
self._client.write("/dummy_prefix/dummy_run_id", "non_base64")
@@ -107,7 +112,9 @@ class CreateBackendTest(TestCase):
self.test_create_backend_returns_backend()
- def test_create_backend_returns_backend_if_read_timeout_is_not_specified(self) -> None:
+ def test_create_backend_returns_backend_if_read_timeout_is_not_specified(
+ self,
+ ) -> None:
del self._params.config["read_timeout"]
self._expected_read_timeout = 60
@@ -126,7 +133,9 @@ class CreateBackendTest(TestCase):
def test_create_backend_raises_error_if_protocol_is_invalid(self) -> None:
self._params.config["protocol"] = "dummy"
- with self.assertRaisesRegex(ValueError, r"^The protocol must be HTTP or HTTPS.$"):
+ with self.assertRaisesRegex(
+ ValueError, r"^The protocol must be HTTP or HTTPS.$"
+ ):
create_backend(self._params)
def test_create_backend_raises_error_if_read_timeout_is_invalid(self) -> None:
diff --git a/test/distributed/elastic/rendezvous/etcd_server_test.py b/test/distributed/elastic/rendezvous/etcd_server_test.py
index 08fe2e14a6..88726dddae 100644
--- a/test/distributed/elastic/rendezvous/etcd_server_test.py
+++ b/test/distributed/elastic/rendezvous/etcd_server_test.py
@@ -6,8 +6,8 @@
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import os
-import unittest
import sys
+import unittest
import etcd
from torch.distributed.elastic.rendezvous.etcd_rendezvous import (
@@ -20,6 +20,7 @@ if os.getenv("CIRCLECI"):
print("T85992919 temporarily disabling in circle ci", file=sys.stderr)
sys.exit(0)
+
class EtcdServerTest(unittest.TestCase):
def test_etcd_server_start_stop(self):
server = EtcdServer()
diff --git a/test/distributed/elastic/rendezvous/rendezvous_backend_test.py b/test/distributed/elastic/rendezvous/rendezvous_backend_test.py
index b64254bf56..fa2c6ae9c2 100644
--- a/test/distributed/elastic/rendezvous/rendezvous_backend_test.py
+++ b/test/distributed/elastic/rendezvous/rendezvous_backend_test.py
@@ -7,10 +7,13 @@
# LICENSE file in the root directory of this source tree.
from abc import ABC, abstractmethod
-from typing import Any, Callable, Optional, Tuple, cast
+from typing import Any, Callable, cast, Optional, Tuple
from torch.distributed.elastic.rendezvous import RendezvousStateError
-from torch.distributed.elastic.rendezvous.dynamic_rendezvous import RendezvousBackend, Token
+from torch.distributed.elastic.rendezvous.dynamic_rendezvous import (
+ RendezvousBackend,
+ Token,
+)
class RendezvousBackendTestMixin(ABC):
@@ -28,7 +31,9 @@ class RendezvousBackendTestMixin(ABC):
"""Corrupts the state stored in the backend."""
pass
- def _set_state(self, state: bytes, token: Optional[Any] = None) -> Tuple[bytes, Token, bool]:
+ def _set_state(
+ self, state: bytes, token: Optional[Any] = None
+ ) -> Tuple[bytes, Token, bool]:
result = self._backend.set_state(state, token)
self.assertIsNotNone(result)
diff --git a/test/distributed/elastic/rendezvous/utils_test.py b/test/distributed/elastic/rendezvous/utils_test.py
index c180924ba5..b876f458ab 100644
--- a/test/distributed/elastic/rendezvous/utils_test.py
+++ b/test/distributed/elastic/rendezvous/utils_test.py
@@ -6,19 +6,19 @@
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
+import socket
import threading
import time
-import socket
from datetime import timedelta
from typing import List
from unittest import TestCase
from unittest.mock import patch
from torch.distributed.elastic.rendezvous.utils import (
- _PeriodicTimer,
_delay,
_matches_machine_hostname,
_parse_rendezvous_config,
+ _PeriodicTimer,
_try_parse_port,
parse_rendezvous_endpoint,
)
@@ -229,7 +229,9 @@ class UtilsTest(TestCase):
def test_matches_machine_hostname_returns_true_if_hostname_is_machine_address(
self,
) -> None:
- addr_list = socket.getaddrinfo(socket.gethostname(), None, proto=socket.IPPROTO_TCP)
+ addr_list = socket.getaddrinfo(
+ socket.gethostname(), None, proto=socket.IPPROTO_TCP
+ )
for addr in (addr_info[4][0] for addr_info in addr_list):
with self.subTest(addr=addr):
@@ -255,20 +257,26 @@ class UtilsTest(TestCase):
self.assertGreaterEqual(time2 - time1, 0.2)
-
- @patch('socket.getaddrinfo', side_effect=[
- [(None, None, 0, 'a_host', ('1.2.3.4', 0))],
- [(None, None, 0, 'a_different_host', ('1.2.3.4', 0))]])
+ @patch(
+ "socket.getaddrinfo",
+ side_effect=[
+ [(None, None, 0, "a_host", ("1.2.3.4", 0))],
+ [(None, None, 0, "a_different_host", ("1.2.3.4", 0))],
+ ],
+ )
def test_matches_machine_hostname_returns_true_if_ip_address_match_between_hosts(
self,
_0,
) -> None:
self.assertTrue(_matches_machine_hostname("a_host"))
-
- @patch('socket.getaddrinfo', side_effect=[
- [(None, None, 0, 'a_host', ('1.2.3.4', 0))],
- [(None, None, 0, 'another_host_with_different_ip', ('1.2.3.5', 0))]])
+ @patch(
+ "socket.getaddrinfo",
+ side_effect=[
+ [(None, None, 0, "a_host", ("1.2.3.4", 0))],
+ [(None, None, 0, "another_host_with_different_ip", ("1.2.3.5", 0))],
+ ],
+ )
def test_matches_machine_hostname_returns_false_if_ip_address_not_match_between_hosts(
self,
_0,
diff --git a/test/distributed/elastic/timer/file_based_local_timer_test.py b/test/distributed/elastic/timer/file_based_local_timer_test.py
index 198c57f34b..6c7a92c35a 100644
--- a/test/distributed/elastic/timer/file_based_local_timer_test.py
+++ b/test/distributed/elastic/timer/file_based_local_timer_test.py
@@ -111,14 +111,18 @@ if not (IS_WINDOWS or IS_MACOS):
num_requests_per_client = 10
processes = []
for i in range(num_clients):
- p = mp.Process(target=func, args=(num_requests_per_client, self.file_path))
+ p = mp.Process(
+ target=func, args=(num_requests_per_client, self.file_path)
+ )
processes.append(p)
p.start()
for p in processes:
p.join()
self.server.run_once() # Allows the server to process all requests
- self.assertEqual(2 * num_clients * num_requests_per_client, self.server._request_count)
+ self.assertEqual(
+ 2 * num_clients * num_requests_per_client, self.server._request_count
+ )
@mock.patch("torch.distributed.elastic.timer.FileTimerServer._reap_worker")
def test_exit_before_release(self, mock_reap):
@@ -139,7 +143,9 @@ if not (IS_WINDOWS or IS_MACOS):
self.assertEqual(0, len(self.server._timers))
@mock.patch("torch.distributed.elastic.timer.FileTimerServer._reap_worker")
- @mock.patch("torch.distributed.elastic.timer.FileTimerServer.is_process_running")
+ @mock.patch(
+ "torch.distributed.elastic.timer.FileTimerServer.is_process_running"
+ )
def test_exit_before_release_reap(self, mock_pid_exists, mock_reap):
def func1(file_path):
client = timer.FileTimerClient(file_path)
@@ -185,7 +191,6 @@ if not (IS_WINDOWS or IS_MACOS):
client.acquire("test_scope", 0)
time.sleep(interval)
-
class FileTimerClientTest(TestCase):
def test_send_request_without_server(self):
client = timer.FileTimerClient("test_file")
@@ -194,7 +199,6 @@ if not (IS_WINDOWS or IS_MACOS):
with timer.expires(after=0.1):
time.sleep(0.1)
-
class FileTimerServerTest(TestCase):
def setUp(self):
super().setUp()
@@ -234,14 +238,26 @@ if not (IS_WINDOWS or IS_MACOS):
def _expired_timer(self, pid, scope):
expired = time.time() - 60
- return timer.FileTimerRequest(worker_pid=pid, scope_id=scope, expiration_time=expired, signal=signal.SIGKILL)
+ return timer.FileTimerRequest(
+ worker_pid=pid,
+ scope_id=scope,
+ expiration_time=expired,
+ signal=signal.SIGKILL,
+ )
def _valid_timer(self, pid, scope):
valid = time.time() + 60
- return timer.FileTimerRequest(worker_pid=pid, scope_id=scope, expiration_time=valid, signal=signal.SIGKILL)
+ return timer.FileTimerRequest(
+ worker_pid=pid,
+ scope_id=scope,
+ expiration_time=valid,
+ signal=signal.SIGKILL,
+ )
def _release_timer(self, pid, scope):
- return timer.FileTimerRequest(worker_pid=pid, scope_id=scope, expiration_time=-1)
+ return timer.FileTimerRequest(
+ worker_pid=pid, scope_id=scope, expiration_time=-1
+ )
@mock.patch("os.kill")
def test_expired_timers(self, mock_os_kill):
@@ -278,7 +294,9 @@ if not (IS_WINDOWS or IS_MACOS):
self.assertEqual(0, len(self.server._timers))
mock_os_kill.assert_not_called()
- @mock.patch("torch.distributed.elastic.timer.FileTimerServer.is_process_running")
+ @mock.patch(
+ "torch.distributed.elastic.timer.FileTimerServer.is_process_running"
+ )
@mock.patch("os.kill")
def test_valid_timers(self, mock_os_kill, mock_pid_exists):
"""
diff --git a/test/distributed/elastic/timer/local_timer_example.py b/test/distributed/elastic/timer/local_timer_example.py
index 71204d8350..48907bca7b 100644
--- a/test/distributed/elastic/timer/local_timer_example.py
+++ b/test/distributed/elastic/timer/local_timer_example.py
@@ -14,12 +14,12 @@ import time
import torch.distributed.elastic.timer as timer
import torch.multiprocessing as torch_mp
from torch.testing._internal.common_utils import (
- TEST_WITH_DEV_DBG_ASAN,
- run_tests,
- IS_WINDOWS,
IS_MACOS,
+ IS_WINDOWS,
+ run_tests,
skip_but_pass_in_sandcastle_if,
- TestCase
+ TEST_WITH_DEV_DBG_ASAN,
+ TestCase,
)
@@ -42,6 +42,7 @@ def _stuck_function(rank, mp_queue):
# timer is not supported on macos or windows
if not (IS_WINDOWS or IS_MACOS):
+
class LocalTimerExample(TestCase):
"""
Demonstrates how to use LocalTimerServer and LocalTimerClient
@@ -55,7 +56,9 @@ if not (IS_WINDOWS or IS_MACOS):
unittest. As of now this will SIGSEGV.
"""
- @skip_but_pass_in_sandcastle_if(TEST_WITH_DEV_DBG_ASAN, "test is asan incompatible")
+ @skip_but_pass_in_sandcastle_if(
+ TEST_WITH_DEV_DBG_ASAN, "test is asan incompatible"
+ )
def test_torch_mp_example(self):
# in practice set the max_interval to a larger value (e.g. 60 seconds)
mp_queue = mp.get_context("spawn").Queue()
@@ -80,7 +83,9 @@ if not (IS_WINDOWS or IS_MACOS):
server.stop()
- @skip_but_pass_in_sandcastle_if(TEST_WITH_DEV_DBG_ASAN, "test is asan incompatible")
+ @skip_but_pass_in_sandcastle_if(
+ TEST_WITH_DEV_DBG_ASAN, "test is asan incompatible"
+ )
def test_example_start_method_spawn(self):
self._run_example_with(start_method="spawn")
diff --git a/test/distributed/elastic/timer/local_timer_test.py b/test/distributed/elastic/timer/local_timer_test.py
index 386b6e9be9..6111f326d2 100644
--- a/test/distributed/elastic/timer/local_timer_test.py
+++ b/test/distributed/elastic/timer/local_timer_test.py
@@ -15,12 +15,12 @@ import torch.distributed.elastic.timer as timer
from torch.distributed.elastic.timer.api import TimerRequest
from torch.distributed.elastic.timer.local_timer import MultiprocessingRequestQueue
from torch.testing._internal.common_utils import (
- run_tests,
- IS_WINDOWS,
IS_MACOS,
+ IS_WINDOWS,
+ run_tests,
TEST_WITH_DEV_DBG_ASAN,
TEST_WITH_TSAN,
- TestCase
+ TestCase,
)
diff --git a/test/distributed/elastic/utils/distributed_test.py b/test/distributed/elastic/utils/distributed_test.py
index 4e5fa8d7e0..65ebd4b6e7 100644
--- a/test/distributed/elastic/utils/distributed_test.py
+++ b/test/distributed/elastic/utils/distributed_test.py
@@ -24,12 +24,19 @@ from torch.testing._internal.common_utils import (
IS_WINDOWS,
run_tests,
TEST_WITH_TSAN,
- TestCase
+ TestCase,
)
def _create_c10d_store_mp(is_server, server_addr, port, world_size, wait_for_workers):
- store = create_c10d_store(is_server, server_addr, port, world_size, wait_for_workers=wait_for_workers, timeout=2)
+ store = create_c10d_store(
+ is_server,
+ server_addr,
+ port,
+ world_size,
+ wait_for_workers=wait_for_workers,
+ timeout=2,
+ )
if store is None:
raise AssertionError()
diff --git a/test/distributed/launcher/api_test.py b/test/distributed/launcher/api_test.py
index 6a4b46272e..81e9320d1f 100644
--- a/test/distributed/launcher/api_test.py
+++ b/test/distributed/launcher/api_test.py
@@ -23,9 +23,7 @@ from unittest.mock import MagicMock, Mock, patch
import torch
import torch.distributed as dist
from torch.distributed.elastic.agent.server.api import RunResult, WorkerState
-from torch.distributed.elastic.multiprocessing.api import (
- SignalException,
-)
+from torch.distributed.elastic.multiprocessing.api import SignalException
from torch.distributed.elastic.multiprocessing.errors import ChildFailedError
from torch.distributed.elastic.rendezvous.etcd_server import EtcdServer
from torch.distributed.elastic.utils import get_socket_with_port
@@ -157,7 +155,9 @@ class ElasticLaunchTest(unittest.TestCase):
{str(i) for i in range(world_size)}, set(os.listdir(self.test_dir))
)
- @skip_but_pass_in_sandcastle_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
+ @skip_but_pass_in_sandcastle_if(
+ TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
+ )
def test_launch_script_python(self):
nnodes = 1
nproc_per_node = 4
@@ -172,7 +172,9 @@ class ElasticLaunchTest(unittest.TestCase):
world_size = nnodes * nproc_per_node
self.check_works_ran(world_size)
- @skip_but_pass_in_sandcastle_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
+ @skip_but_pass_in_sandcastle_if(
+ TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
+ )
def test_launch_script_python_local_rank_transfer(self):
nnodes = 1
nproc_per_node = 4
@@ -187,7 +189,9 @@ class ElasticLaunchTest(unittest.TestCase):
world_size = nnodes * nproc_per_node
self.check_works_ran(world_size)
- @skip_but_pass_in_sandcastle_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
+ @skip_but_pass_in_sandcastle_if(
+ TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
+ )
def test_launch_script_bash(self):
nnodes = 1
nproc_per_node = 4
@@ -200,7 +204,9 @@ class ElasticLaunchTest(unittest.TestCase):
world_size = nnodes * nproc_per_node
self.check_works_ran(world_size)
- @skip_but_pass_in_sandcastle_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
+ @skip_but_pass_in_sandcastle_if(
+ TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
+ )
def test_launch_function(self):
nnodes = 1
nproc_per_node = 4
@@ -214,7 +220,9 @@ class ElasticLaunchTest(unittest.TestCase):
actual_res = sorted(value for value in res.values())
self.assertEqual(expected_res, actual_res)
- @skip_but_pass_in_sandcastle_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
+ @skip_but_pass_in_sandcastle_if(
+ TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
+ )
def test_launch_dist_sum_with_static_rdzv(self):
nnodes = 1
nproc_per_node = 4
@@ -243,7 +251,9 @@ class ElasticLaunchTest(unittest.TestCase):
actual_res = sorted(value for value in res.values())
self.assertEqual(expected_res, actual_res)
- @skip_but_pass_in_sandcastle_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
+ @skip_but_pass_in_sandcastle_if(
+ TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
+ )
def test_launch_elastic(self):
nproc_per_node = 4
@@ -288,7 +298,9 @@ class ElasticLaunchTest(unittest.TestCase):
)("-u", path("bin/test_script.py"), f"--touch-file-dir={self.test_dir}")
record_mock.assert_called_once()
- @skip_but_pass_in_sandcastle_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
+ @skip_but_pass_in_sandcastle_if(
+ TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
+ )
def test_launch_elastic_multiple_agents(self):
min_nodes = 1
max_nodes = 2
diff --git a/test/distributed/launcher/launch_test.py b/test/distributed/launcher/launch_test.py
index 4140e55c6c..b8312de37f 100644
--- a/test/distributed/launcher/launch_test.py
+++ b/test/distributed/launcher/launch_test.py
@@ -15,8 +15,8 @@ from contextlib import closing
import torch.distributed.launch as launch
from torch.distributed.elastic.utils import get_socket_with_port
from torch.testing._internal.common_utils import (
- TEST_WITH_DEV_DBG_ASAN,
skip_but_pass_in_sandcastle_if,
+ TEST_WITH_DEV_DBG_ASAN,
)
diff --git a/test/distributed/launcher/run_test.py b/test/distributed/launcher/run_test.py
index f33d075d8a..c816042e3e 100644
--- a/test/distributed/launcher/run_test.py
+++ b/test/distributed/launcher/run_test.py
@@ -145,7 +145,9 @@ class ElasticLaunchTest(unittest.TestCase):
{str(i) for i in range(world_size)}, set(os.listdir(self.test_dir))
)
- @skip_but_pass_in_sandcastle_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
+ @skip_but_pass_in_sandcastle_if(
+ TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
+ )
def test_launch_user_script_bash(self):
run_id = str(uuid.uuid4().int)
nnodes = 1
@@ -176,7 +178,9 @@ class ElasticLaunchTest(unittest.TestCase):
{str(i) for i in range(world_size)}, set(os.listdir(self.test_dir))
)
- @skip_but_pass_in_sandcastle_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
+ @skip_but_pass_in_sandcastle_if(
+ TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
+ )
def test_launch_user_script_default_nproc(self):
run_id = str(uuid.uuid4().int)
nnodes = 1
@@ -205,7 +209,9 @@ class ElasticLaunchTest(unittest.TestCase):
{str(i) for i in range(world_size)}, set(os.listdir(self.test_dir))
)
- @skip_but_pass_in_sandcastle_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
+ @skip_but_pass_in_sandcastle_if(
+ TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
+ )
def test_launch_with_env_vars(self):
run_id = str(uuid.uuid4().int)
nnodes = 1
@@ -263,27 +269,37 @@ class ElasticLaunchTest(unittest.TestCase):
{str(i) for i in range(world_size)}, set(os.listdir(self.test_dir))
)
- @skip_but_pass_in_sandcastle_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
+ @skip_but_pass_in_sandcastle_if(
+ TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
+ )
def test_nproc_launch_auto_configurations(self):
self._test_nproc_launch_configuration("auto", os.cpu_count())
- @skip_but_pass_in_sandcastle_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
+ @skip_but_pass_in_sandcastle_if(
+ TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
+ )
def test_nproc_launch_number_configurations(self):
self._test_nproc_launch_configuration("4", 4)
- @skip_but_pass_in_sandcastle_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
+ @skip_but_pass_in_sandcastle_if(
+ TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
+ )
def test_nproc_launch_unknown_configurations(self):
with self.assertRaises(ValueError):
self._test_nproc_launch_configuration("unknown", 4)
- @skip_but_pass_in_sandcastle_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
+ @skip_but_pass_in_sandcastle_if(
+ TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
+ )
@patch("torch.cuda.is_available", return_value=True)
@patch("torch.cuda.device_count", return_value=3)
def test_nproc_gpu_launch_configurations(self, _mock1, _mock2):
self._test_nproc_launch_configuration("auto", 3)
self._test_nproc_launch_configuration("gpu", 3)
- @skip_but_pass_in_sandcastle_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
+ @skip_but_pass_in_sandcastle_if(
+ TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
+ )
def test_launch_elastic(self):
run_id = str(uuid.uuid4().int)
min_nodes = 1
@@ -311,7 +327,9 @@ class ElasticLaunchTest(unittest.TestCase):
)
@mock.patch("torch.distributed.elastic.events.record")
- @skip_but_pass_in_sandcastle_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
+ @skip_but_pass_in_sandcastle_if(
+ TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
+ )
def test_launch_elastic_worker_raise_exception(self, record_mock):
"""
Asserts that when the worker program fails and lancher raieses exception
@@ -339,7 +357,9 @@ class ElasticLaunchTest(unittest.TestCase):
record_mock.assert_called_once()
- @skip_but_pass_in_sandcastle_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
+ @skip_but_pass_in_sandcastle_if(
+ TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
+ )
@mock.patch(
"torch.distributed.elastic.agent.server.local_elastic_agent.LocalElasticAgent.run"
)
@@ -371,7 +391,9 @@ class ElasticLaunchTest(unittest.TestCase):
launch.main(args)
record_mock.assert_called_once()
- @skip_but_pass_in_sandcastle_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
+ @skip_but_pass_in_sandcastle_if(
+ TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
+ )
def test_launch_standalone(self):
nnodes = 1
nproc_per_node = 4
@@ -393,7 +415,9 @@ class ElasticLaunchTest(unittest.TestCase):
{str(i) for i in range(world_size)}, set(os.listdir(self.test_dir))
)
- @skip_but_pass_in_sandcastle_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
+ @skip_but_pass_in_sandcastle_if(
+ TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
+ )
def test_launch_run_path(self):
nnodes = 1
nproc_per_node = 4
@@ -415,7 +439,9 @@ class ElasticLaunchTest(unittest.TestCase):
{str(i) for i in range(world_size)}, set(os.listdir(self.test_dir))
)
- @skip_but_pass_in_sandcastle_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
+ @skip_but_pass_in_sandcastle_if(
+ TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
+ )
def test_launch_elastic_multiple_agents(self):
run_id = str(uuid.uuid4().int)
min_nodes = 1
@@ -484,7 +510,9 @@ class ElasticLaunchTest(unittest.TestCase):
launch.main(args)
rdzv_handler_mock.shutdown.assert_called_once()
- @skip_but_pass_in_sandcastle_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
+ @skip_but_pass_in_sandcastle_if(
+ TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
+ )
def test_is_torchelastic_launched(self):
# launch test script with torchelastic and validate that
# torch.distributed.is_torchelastic_launched() returns True
@@ -506,7 +534,9 @@ class ElasticLaunchTest(unittest.TestCase):
self.assertEqual("True", is_torchelastic_launched)
@patch("torch.distributed.run.metadata")
- @skip_but_pass_in_sandcastle_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
+ @skip_but_pass_in_sandcastle_if(
+ TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
+ )
def test_is_torchelastic_launched_with_logs_spec_defined(self, metadata_mock):
# mock the entrypoint API to avoid version issues.
entrypoints = MagicMock()
@@ -518,7 +548,7 @@ class ElasticLaunchTest(unittest.TestCase):
ep = MagicMock()
ep.load.return_value = DefaultLogsSpecs
- group.select.return_value = (ep)
+ group.select.return_value = ep
group.__getitem__.return_value = ep
out_file = f"{os.path.join(self.test_dir, 'out')}"
@@ -540,7 +570,9 @@ class ElasticLaunchTest(unittest.TestCase):
is_torchelastic_launched = fp.readline()
self.assertEqual("True", is_torchelastic_launched)
- @skip_but_pass_in_sandcastle_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
+ @skip_but_pass_in_sandcastle_if(
+ TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
+ )
def test_logs_logs_spec_entrypoint_must_be_defined(self):
with self.assertRaises(ValueError):
launch.main(
@@ -591,7 +623,9 @@ class ElasticLaunchTest(unittest.TestCase):
runpy.run_path(sys.argv[0], run_name="__main__")
# nothing to validate, just make sure it runs
- @skip_but_pass_in_sandcastle_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
+ @skip_but_pass_in_sandcastle_if(
+ TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
+ )
def test_init_method_tcp_with_torchelastic(self):
port = get_free_port()
launch.main(
@@ -629,7 +663,9 @@ class ElasticLaunchTest(unittest.TestCase):
runpy.run_path(sys.argv[0], run_name="__main__")
# nothing to validate, just make sure it runs
- @skip_but_pass_in_sandcastle_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
+ @skip_but_pass_in_sandcastle_if(
+ TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
+ )
def test_init_method_env_with_torchelastic(self):
port = get_free_port()
launch.main(
diff --git a/test/distributed/nn/jit/test_instantiator.py b/test/distributed/nn/jit/test_instantiator.py
index 8a7026f964..03d3a6f050 100644
--- a/test/distributed/nn/jit/test_instantiator.py
+++ b/test/distributed/nn/jit/test_instantiator.py
@@ -6,15 +6,15 @@ import sys
from typing import Tuple
import torch
-from torch import Tensor, nn
import torch.distributed as dist
+from torch import nn, Tensor
if not dist.is_available():
print("Distributed not available, skipping tests", file=sys.stderr)
sys.exit(0)
from torch.distributed.nn.jit import instantiator
-from torch.testing._internal.common_utils import TestCase, run_tests
+from torch.testing._internal.common_utils import run_tests, TestCase
@torch.jit.interface
diff --git a/test/distributed/optim/test_zero_redundancy_optimizer.py b/test/distributed/optim/test_zero_redundancy_optimizer.py
index b84d96cb0f..485df8f5b5 100644
--- a/test/distributed/optim/test_zero_redundancy_optimizer.py
+++ b/test/distributed/optim/test_zero_redundancy_optimizer.py
@@ -47,6 +47,7 @@ try:
except ImportError:
HAS_TORCHVISION = False
+
# Use GLOO on GPU when running CUDA + Windows
def _get_backend_for_tests():
return (
@@ -725,7 +726,8 @@ class TestZeroRedundancyOptimizerDistributed(TestZeroRedundancyOptimizer):
common_distributed.logger.info(
"Skipping `test_nondefault_process_group()` since world size "
"of %s is less than %s",
- self.world_size, MIN_WORLD_SIZE
+ self.world_size,
+ MIN_WORLD_SIZE,
)
return
BACKEND = dist.Backend.GLOO
@@ -1275,7 +1277,7 @@ class TestZeroRedundancyOptimizerDistributed(TestZeroRedundancyOptimizer):
[torch.randn(1, 3, 3, 1000).to(device) for _ in range(NUM_INPUTS)],
)
)
- for (model, inputs) in models_to_test:
+ for model, inputs in models_to_test:
# Enable determinism in cudnn operators
with torch.backends.cudnn.flags(
enabled=True, deterministic=True, benchmark=False
diff --git a/test/distributed/pipeline/sync/conftest.py b/test/distributed/pipeline/sync/conftest.py
index 78f7d3a8f1..4f2479b27b 100644
--- a/test/distributed/pipeline/sync/conftest.py
+++ b/test/distributed/pipeline/sync/conftest.py
@@ -5,11 +5,13 @@
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import tempfile
+
import pytest
import torch
import torch.distributed as dist
+
@pytest.fixture(autouse=True)
def manual_seed_zero():
torch.manual_seed(0)
@@ -38,6 +40,7 @@ def cuda_sleep():
def pytest_report_header():
return f"torch: {torch.__version__}"
+
@pytest.fixture
def setup_rpc(scope="session"):
file = tempfile.NamedTemporaryFile()
@@ -47,11 +50,12 @@ def setup_rpc(scope="session"):
world_size=1,
rpc_backend_options=dist.rpc.TensorPipeRpcBackendOptions(
init_method=f"file://{file.name}",
- )
+ ),
)
yield
dist.rpc.shutdown()
+
def pytest_ignore_collect(path, config):
"Skip this directory if distributed modules are not enabled."
return not dist.is_available()
diff --git a/test/distributed/pipeline/sync/skip/test_gpipe.py b/test/distributed/pipeline/sync/skip/test_gpipe.py
index 21731d452d..e002d65db7 100644
--- a/test/distributed/pipeline/sync/skip/test_gpipe.py
+++ b/test/distributed/pipeline/sync/skip/test_gpipe.py
@@ -12,13 +12,19 @@ from torch import nn
from torch.distributed.pipeline.sync import Pipe
from torch.distributed.pipeline.sync.skip import pop, skippable, stash
-from torch.distributed.pipeline.sync.skip.portal import PortalBlue, PortalCopy, PortalOrange
+from torch.distributed.pipeline.sync.skip.portal import (
+ PortalBlue,
+ PortalCopy,
+ PortalOrange,
+)
from torch.distributed.pipeline.sync.utils import partition_model
from torch.testing._internal.common_utils import run_tests
@pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required")
-@pytest.mark.parametrize("balance", [[3], [1, 2], [2, 1], [1, 1, 1]], ids=["3", "1:2", "2:1", "1:1:1"])
+@pytest.mark.parametrize(
+ "balance", [[3], [1, 2], [2, 1], [1, 1, 1]], ids=["3", "1:2", "2:1", "1:1:1"]
+)
@pytest.mark.parametrize("checkpoint", ["never", "always", "except_last"])
def test_1to3(balance, checkpoint, setup_rpc):
if torch.cuda.device_count() < len(balance):
@@ -67,8 +73,12 @@ def test_1to3(balance, checkpoint, setup_rpc):
loss = output.local_value().mean()
loss.backward()
- assert torch.allclose(output.local_value().norm(), torch.tensor(1039.0, device=out_device), atol=6e-1)
- assert torch.allclose(input.grad.norm(), torch.tensor(0.0004533053, device=in_device))
+ assert torch.allclose(
+ output.local_value().norm(), torch.tensor(1039.0, device=out_device), atol=6e-1
+ )
+ assert torch.allclose(
+ input.grad.norm(), torch.tensor(0.0004533053, device=in_device)
+ )
def test_none_skip(setup_rpc):
diff --git a/test/distributed/pipeline/sync/skip/test_leak.py b/test/distributed/pipeline/sync/skip/test_leak.py
index f0e82f7bba..2bf797dae5 100644
--- a/test/distributed/pipeline/sync/skip/test_leak.py
+++ b/test/distributed/pipeline/sync/skip/test_leak.py
@@ -10,7 +10,7 @@ import pytest
import torch
from torch import nn
-from torch.distributed.pipeline.sync import Pipe, is_checkpointing, is_recomputing
+from torch.distributed.pipeline.sync import is_checkpointing, is_recomputing, Pipe
from torch.distributed.pipeline.sync.skip import pop, skippable, stash
from torch.distributed.pipeline.sync.skip.tracker import current_skip_tracker
from torch.testing._internal.common_utils import run_tests
@@ -113,7 +113,9 @@ def test_no_portal_without_pipe(train, monkeypatch, setup_rpc):
def deny(*args, **kwargs):
raise AssertionError("tried to create Portal without Pipe")
- monkeypatch.setattr("torch.distributed.pipeline.sync.skip.portal.Portal.__init__", deny)
+ monkeypatch.setattr(
+ "torch.distributed.pipeline.sync.skip.portal.Portal.__init__", deny
+ )
model = nn.Sequential(Stash(), Pop())
diff --git a/test/distributed/pipeline/sync/skip/test_tracker.py b/test/distributed/pipeline/sync/skip/test_tracker.py
index 5810cab976..007a5a963a 100644
--- a/test/distributed/pipeline/sync/skip/test_tracker.py
+++ b/test/distributed/pipeline/sync/skip/test_tracker.py
@@ -6,18 +6,25 @@
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
-from queue import Queue
import threading
+from queue import Queue
import pytest
import torch
from torch import nn
-from torch.distributed.pipeline.sync.checkpoint import enable_checkpointing, enable_recomputing
+from torch.distributed.pipeline.sync.checkpoint import (
+ enable_checkpointing,
+ enable_recomputing,
+)
from torch.distributed.pipeline.sync.microbatch import Batch
from torch.distributed.pipeline.sync.skip import pop, skippable, stash
from torch.distributed.pipeline.sync.skip.layout import SkipLayout
-from torch.distributed.pipeline.sync.skip.tracker import SkipTracker, SkipTrackerThroughPotals, current_skip_tracker
+from torch.distributed.pipeline.sync.skip.tracker import (
+ current_skip_tracker,
+ SkipTracker,
+ SkipTrackerThroughPotals,
+)
from torch.testing._internal.common_utils import run_tests
@@ -76,7 +83,10 @@ def test_reuse_portal():
def test_no_copy_no_portal():
- skip_layout = SkipLayout(num_partitions=2, skip_routes={(None, "copy"): (0, 1), (None, "not_copy"): (0, 0)})
+ skip_layout = SkipLayout(
+ num_partitions=2,
+ skip_routes={(None, "copy"): (0, 1), (None, "not_copy"): (0, 0)},
+ )
skip_tracker = SkipTrackerThroughPotals(skip_layout)
batch = Batch(torch.tensor([1.0]))
diff --git a/test/distributed/pipeline/sync/skip/test_verify_skippables.py b/test/distributed/pipeline/sync/skip/test_verify_skippables.py
index 6de439ec88..265c3fee85 100644
--- a/test/distributed/pipeline/sync/skip/test_verify_skippables.py
+++ b/test/distributed/pipeline/sync/skip/test_verify_skippables.py
@@ -151,7 +151,12 @@ def test_double_stash_pop_but_isolated():
ns2 = Namespace()
verify_skippables(
- nn.Sequential(Layer1().isolate(ns1), Layer2().isolate(ns1), Layer3().isolate(ns2), Layer4().isolate(ns2),)
+ nn.Sequential(
+ Layer1().isolate(ns1),
+ Layer2().isolate(ns1),
+ Layer3().isolate(ns2),
+ Layer4().isolate(ns2),
+ )
)
diff --git a/test/distributed/pipeline/sync/test_balance.py b/test/distributed/pipeline/sync/test_balance.py
index b8a81aabb7..82af7545bb 100644
--- a/test/distributed/pipeline/sync/test_balance.py
+++ b/test/distributed/pipeline/sync/test_balance.py
@@ -12,11 +12,17 @@ import pytest
import torch
from torch import nn
-from torch.distributed.pipeline.sync._balance import balance_by_size, balance_by_time, blockpartition
+from torch.distributed.pipeline.sync._balance import (
+ balance_by_size,
+ balance_by_time,
+ blockpartition,
+)
from torch.distributed.pipeline.sync._balance.profile import layerwise_sandbox
from torch.testing._internal.common_utils import run_tests
-skip_if_no_cuda = pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required")
+skip_if_no_cuda = pytest.mark.skipif(
+ not torch.cuda.is_available(), reason="cuda required"
+)
devices = ["cpu"]
if torch.cuda.is_available():
@@ -24,7 +30,10 @@ if torch.cuda.is_available():
def test_blockpartition():
- assert blockpartition.solve([1, 2, 3, 4, 5, 6], partitions=2) == [[1, 2, 3, 4], [5, 6]]
+ assert blockpartition.solve([1, 2, 3, 4, 5, 6], partitions=2) == [
+ [1, 2, 3, 4],
+ [5, 6],
+ ]
def test_blockpartition_zeros():
diff --git a/test/distributed/pipeline/sync/test_bugs.py b/test/distributed/pipeline/sync/test_bugs.py
index f9860cb0f2..c3dc716a64 100644
--- a/test/distributed/pipeline/sync/test_bugs.py
+++ b/test/distributed/pipeline/sync/test_bugs.py
@@ -8,12 +8,12 @@
# LICENSE file in the root directory of this source tree.
import pytest
import torch
-from torch import nn
import torch.nn.functional as F
+from torch import nn
from torch.distributed.pipeline.sync import Pipe
-from torch.testing._internal.common_utils import run_tests
from torch.testing._internal.common_cuda import TEST_MULTIGPU
+from torch.testing._internal.common_utils import run_tests
def test_python_autograd_function(setup_rpc):
diff --git a/test/distributed/pipeline/sync/test_checkpoint.py b/test/distributed/pipeline/sync/test_checkpoint.py
index f3d57c218c..e1ae6f6754 100644
--- a/test/distributed/pipeline/sync/test_checkpoint.py
+++ b/test/distributed/pipeline/sync/test_checkpoint.py
@@ -10,10 +10,15 @@ from functools import partial
import pytest
import torch
-from torch import nn
import torch.cuda
+from torch import nn
-from torch.distributed.pipeline.sync.checkpoint import Checkpointing, checkpoint, is_checkpointing, is_recomputing
+from torch.distributed.pipeline.sync.checkpoint import (
+ checkpoint,
+ Checkpointing,
+ is_checkpointing,
+ is_recomputing,
+)
from torch.distributed.pipeline.sync.dependency import fork, join
from torch.distributed.pipeline.sync.microbatch import Batch
from torch.testing._internal.common_utils import run_tests
@@ -63,7 +68,14 @@ def test_serial_checkpoints(device):
# +--> {b} --Checkpoint(Log)--> {b} --First--> {b}
out.backward()
- assert timeline == ["a:forward", "b:forward", "b:forward", "b:backward", "a:forward", "a:backward"]
+ assert timeline == [
+ "a:forward",
+ "b:forward",
+ "b:forward",
+ "b:backward",
+ "a:forward",
+ "a:backward",
+ ]
# |----------------------| |-----------------------| |-----------------------|
# forward pass Checkpoint(Log[b]) Checkpoint(Log[a])
diff --git a/test/distributed/pipeline/sync/test_copy.py b/test/distributed/pipeline/sync/test_copy.py
index 171b7ffbb8..22a3a37805 100644
--- a/test/distributed/pipeline/sync/test_copy.py
+++ b/test/distributed/pipeline/sync/test_copy.py
@@ -10,10 +10,19 @@ import pytest
import torch
from torch.distributed.pipeline.sync.copy import Copy, Wait
-from torch.distributed.pipeline.sync.stream import CPUStream, current_stream, get_device, is_cuda, new_stream, use_stream
+from torch.distributed.pipeline.sync.stream import (
+ CPUStream,
+ current_stream,
+ get_device,
+ is_cuda,
+ new_stream,
+ use_stream,
+)
from torch.testing._internal.common_utils import run_tests
-skip_if_no_cuda = pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required")
+skip_if_no_cuda = pytest.mark.skipif(
+ not torch.cuda.is_available(), reason="cuda required"
+)
def _test_copy_wait(prev_stream, next_stream, cuda_sleep=None):
diff --git a/test/distributed/pipeline/sync/test_deferred_batch_norm.py b/test/distributed/pipeline/sync/test_deferred_batch_norm.py
index 4e2578da94..2fea96e7dd 100644
--- a/test/distributed/pipeline/sync/test_deferred_batch_norm.py
+++ b/test/distributed/pipeline/sync/test_deferred_batch_norm.py
@@ -28,7 +28,7 @@ def tilt_dist(input):
# Tilt mean by single batch.
for i, single in enumerate(input):
- single += 2 ** i
+ single += 2**i
return input
@@ -140,7 +140,7 @@ def test_optimize():
dbn.eval()
with torch.no_grad():
- assert torch.allclose(bn(input), dbn(input), atol=1e-1 * (10 ** i))
+ assert torch.allclose(bn(input), dbn(input), atol=1e-1 * (10**i))
def test_conv_bn():
diff --git a/test/distributed/pipeline/sync/test_dependency.py b/test/distributed/pipeline/sync/test_dependency.py
index cff4082759..73283b88ab 100644
--- a/test/distributed/pipeline/sync/test_dependency.py
+++ b/test/distributed/pipeline/sync/test_dependency.py
@@ -11,7 +11,7 @@ import weakref
import pytest
import torch
-from torch.distributed.pipeline.sync.dependency import Fork, Join, fork, join
+from torch.distributed.pipeline.sync.dependency import Fork, fork, Join, join
from torch.testing._internal.common_utils import run_tests
diff --git a/test/distributed/pipeline/sync/test_pipe.py b/test/distributed/pipeline/sync/test_pipe.py
index 7fc8d8b7c5..b0237f8427 100644
--- a/test/distributed/pipeline/sync/test_pipe.py
+++ b/test/distributed/pipeline/sync/test_pipe.py
@@ -6,27 +6,26 @@
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
+import random
+import time
from collections import OrderedDict
from copy import deepcopy
-import time
import pytest
-import random
import torch
-from torch import nn
-from torch import Tensor
+from torch import nn, Tensor
-from torch.distributed.pipeline.sync import Pipe, NoChunk, WithDevice
+from torch.distributed.pipeline.sync import NoChunk, Pipe, WithDevice
from torch.distributed.pipeline.sync.pipe import PipeSequential
-from torch.testing._internal.common_utils import run_tests, TEST_CUDA
from torch.testing._internal.common_cuda import TEST_MULTIGPU
+from torch.testing._internal.common_utils import run_tests, TEST_CUDA
skip_if_no_cuda = pytest.mark.skipif(not TEST_CUDA, reason="cuda required")
def test_pipe_without_rpc():
model = nn.Sequential(nn.Linear(1, 1))
- with pytest.raises(RuntimeError, match='Please initialize RPC framework'):
+ with pytest.raises(RuntimeError, match="Please initialize RPC framework"):
pipe = Pipe(model, chunks=1)
@@ -135,14 +134,19 @@ def test_checkpoint_mode(setup_rpc):
never_output = never(input)
assert count_grad_fn(always_output.local_value().grad_fn, "CheckpointBackward") == 2
- assert count_grad_fn(except_last_output.local_value().grad_fn, "CheckpointBackward") == 1
+ assert (
+ count_grad_fn(except_last_output.local_value().grad_fn, "CheckpointBackward")
+ == 1
+ )
assert count_grad_fn(never_output.local_value().grad_fn, "CheckpointBackward") == 0
def test_checkpoint_mode_invalid(setup_rpc):
model = nn.Sequential(nn.Linear(1, 1))
- with pytest.raises(ValueError, match="checkpoint is not one of 'always', 'except_last', or 'never'"):
+ with pytest.raises(
+ ValueError, match="checkpoint is not one of 'always', 'except_last', or 'never'"
+ ):
Pipe(model, chunks=2, checkpoint="INVALID_CHECKPOINT")
@@ -329,10 +333,7 @@ def test_multi_sequence_input(setup_rpc):
model = Pipe(nn.Sequential(MultiSeq()))
with pytest.raises(TypeError):
- model(
- [torch.rand(10), torch.rand(10)],
- [torch.rand(10), torch.rand(10)]
- )
+ model([torch.rand(10), torch.rand(10)], [torch.rand(10), torch.rand(10)])
def test_input_singleton(setup_rpc):
@@ -427,7 +428,9 @@ def test_valid_non_tensor(checkpoint, setup_rpc):
res += d
return c, res, a, d + f if f is not None else d, b, e, f
- model = Pipe(nn.Sequential(NonTensor1(), NonTensor2()), chunks=5, checkpoint=checkpoint)
+ model = Pipe(
+ nn.Sequential(NonTensor1(), NonTensor2()), chunks=5, checkpoint=checkpoint
+ )
a = random.randint(0, 10)
b = torch.rand(10, 10)
c = random.randint(0, 1) == 0
@@ -507,7 +510,7 @@ def test_uneven_batch_size(checkpoint, setup_rpc):
b = random.randint(0, 10)
c = torch.rand(4, 10)
- with pytest.raises(RuntimeError, match='Found different number of chunks'):
+ with pytest.raises(RuntimeError, match="Found different number of chunks"):
model(a, b, c)
@@ -529,7 +532,7 @@ def test_no_chunk(checkpoint, setup_rpc):
assert torch.allclose(torch.cat((c, c, c, c, c)), res[2])
# Test invalid type for NoChunk
- with pytest.raises(TypeError, match='NoChunk only supported for tensors'):
+ with pytest.raises(TypeError, match="NoChunk only supported for tensors"):
NoChunk(b)
@@ -538,7 +541,10 @@ def test_deferred_batch_norm(checkpoint, setup_rpc):
bn = nn.BatchNorm2d(3)
pipe_bn = deepcopy(bn)
pipe = Pipe(
- nn.Sequential(pipe_bn), chunks=2, checkpoint=checkpoint, deferred_batch_norm=True
+ nn.Sequential(pipe_bn),
+ chunks=2,
+ checkpoint=checkpoint,
+ deferred_batch_norm=True,
)
x = torch.rand(4, 3, 10, 10)
@@ -554,7 +560,10 @@ def test_deferred_batch_norm_params(checkpoint, setup_rpc):
bn = nn.BatchNorm2d(3)
pipe_bn = deepcopy(bn)
pipe = Pipe(
- nn.Sequential(pipe_bn), chunks=1, checkpoint=checkpoint, deferred_batch_norm=True
+ nn.Sequential(pipe_bn),
+ chunks=1,
+ checkpoint=checkpoint,
+ deferred_batch_norm=True,
)
x = torch.rand(4, 3, 10, 10)
@@ -682,7 +691,9 @@ def test_named_children(setup_rpc):
def test_verify_module_non_sequential(setup_rpc):
- with pytest.raises(TypeError, match="module must be nn.Sequential to be partitioned"):
+ with pytest.raises(
+ TypeError, match="module must be nn.Sequential to be partitioned"
+ ):
Pipe(nn.Module())
@@ -690,7 +701,9 @@ def test_verify_module_duplicate_children(setup_rpc):
conv = nn.Conv2d(3, 3, 1)
model = nn.Sequential(conv, conv)
- with pytest.raises(ValueError, match="module with duplicate children is not supported"):
+ with pytest.raises(
+ ValueError, match="module with duplicate children is not supported"
+ ):
Pipe(model)
@@ -708,22 +721,17 @@ def test_verify_module_params_on_same_device(setup_rpc):
with pytest.raises(
ValueError,
- match=r'should have all parameters on a single device, please use .to\(\)'
- ' to place the module on a single device'):
+ match=r"should have all parameters on a single device, please use .to\(\)"
+ " to place the module on a single device",
+ ):
Pipe(model)
@pytest.mark.skipif(not TEST_MULTIGPU, reason="Need atleast two GPUs")
def test_verify_nested_modules(setup_rpc):
model = nn.Sequential(
- nn.Sequential(
- nn.Linear(32, 16).cuda(0),
- nn.Linear(16, 8).cuda(0)
- ),
- nn.Sequential(
- nn.Linear(8, 4).cuda(1),
- nn.Linear(4, 2).cuda(1)
- ),
+ nn.Sequential(nn.Linear(32, 16).cuda(0), nn.Linear(16, 8).cuda(0)),
+ nn.Sequential(nn.Linear(8, 4).cuda(1), nn.Linear(4, 2).cuda(1)),
)
pipe = Pipe(model)
@@ -785,7 +793,11 @@ def test_multiple_inputs(checkpoint, setup_rpc):
def forward(self, a, b):
return a + b
- model = Pipe(nn.Sequential(Module1().cuda(0), Module2().cuda(0)), chunks=2, checkpoint=checkpoint)
+ model = Pipe(
+ nn.Sequential(Module1().cuda(0), Module2().cuda(0)),
+ chunks=2,
+ checkpoint=checkpoint,
+ )
t = torch.rand(10)
res = model(t, t, t).local_value()
assert torch.equal(res, (t + t + t) + (t * t * t))
@@ -805,7 +817,10 @@ def test_inputs_wrong_device(setup_rpc):
a = torch.rand(10).cuda(1)
b = torch.rand(10).cuda(1)
model = Pipe(nn.Sequential(Module1().cuda(0), Module1().cuda(1)), chunks=2)
- with pytest.raises(ValueError, match='All inputs should be on the same device as the first partition'):
+ with pytest.raises(
+ ValueError,
+ match="All inputs should be on the same device as the first partition",
+ ):
model(a, b)
@@ -815,21 +830,27 @@ def test_with_device_wrapper(setup_rpc):
fc2 = nn.Linear(8, 4).cuda(1)
dropout = nn.Dropout()
- model = nn.Sequential(fc1, fc2, WithDevice(dropout, 'cuda:1'))
+ model = nn.Sequential(fc1, fc2, WithDevice(dropout, "cuda:1"))
model = Pipe(model, chunks=8)
- assert torch.device('cuda:1') == model(torch.rand(16, 16).cuda(0)).local_value().device
- assert [torch.device('cuda:0'), torch.device('cuda:1')] == model.devices
+ assert (
+ torch.device("cuda:1") == model(torch.rand(16, 16).cuda(0)).local_value().device
+ )
+ assert [torch.device("cuda:0"), torch.device("cuda:1")] == model.devices
- model = nn.Sequential(fc1, WithDevice(dropout, 'cuda:1'))
+ model = nn.Sequential(fc1, WithDevice(dropout, "cuda:1"))
model = Pipe(model, chunks=8)
- assert torch.device('cuda:1') == model(torch.rand(16, 16).cuda(0)).local_value().device
- assert [torch.device('cuda:0'), torch.device('cuda:1')] == model.devices
+ assert (
+ torch.device("cuda:1") == model(torch.rand(16, 16).cuda(0)).local_value().device
+ )
+ assert [torch.device("cuda:0"), torch.device("cuda:1")] == model.devices
- model = nn.Sequential(fc1, WithDevice(fc2, 'cuda:0'))
+ model = nn.Sequential(fc1, WithDevice(fc2, "cuda:0"))
model = Pipe(model, chunks=8)
- assert torch.device('cuda:0') == model(torch.rand(16, 16).cuda(0)).local_value().device
- assert [torch.device('cuda:0')] == model.devices
- assert torch.device('cuda:0') == fc2.weight.device
+ assert (
+ torch.device("cuda:0") == model(torch.rand(16, 16).cuda(0)).local_value().device
+ )
+ assert [torch.device("cuda:0")] == model.devices
+ assert torch.device("cuda:0") == fc2.weight.device
if __name__ == "__main__":
diff --git a/test/distributed/pipeline/sync/test_stream.py b/test/distributed/pipeline/sync/test_stream.py
index 6fa8e99b13..29281ca606 100644
--- a/test/distributed/pipeline/sync/test_stream.py
+++ b/test/distributed/pipeline/sync/test_stream.py
@@ -23,7 +23,9 @@ from torch.distributed.pipeline.sync.stream import (
)
from torch.testing._internal.common_utils import run_tests
-skip_if_no_cuda = pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required")
+skip_if_no_cuda = pytest.mark.skipif(
+ not torch.cuda.is_available(), reason="cuda required"
+)
class TestNewStream:
diff --git a/test/distributed/pipeline/sync/test_transparency.py b/test/distributed/pipeline/sync/test_transparency.py
index 88456b407d..a87a04150f 100644
--- a/test/distributed/pipeline/sync/test_transparency.py
+++ b/test/distributed/pipeline/sync/test_transparency.py
@@ -22,7 +22,12 @@ def test_simple_linears(setup_rpc):
p.grad = None
inputs = torch.rand(8, 1)
- model = nn.Sequential(nn.Linear(1, 2), nn.Linear(2, 4), nn.Linear(4, 2), nn.Linear(2, 1),)
+ model = nn.Sequential(
+ nn.Linear(1, 2),
+ nn.Linear(2, 4),
+ nn.Linear(4, 2),
+ nn.Linear(2, 1),
+ )
# Without Pipe
outputs = model(inputs)
diff --git a/test/distributed/pipeline/sync/test_worker.py b/test/distributed/pipeline/sync/test_worker.py
index 7d347d48a2..ca5d99c576 100644
--- a/test/distributed/pipeline/sync/test_worker.py
+++ b/test/distributed/pipeline/sync/test_worker.py
@@ -13,7 +13,7 @@ import torch
from torch.distributed.pipeline.sync.microbatch import Batch
from torch.distributed.pipeline.sync.stream import CPUStream
-from torch.distributed.pipeline.sync.worker import Task, spawn_workers
+from torch.distributed.pipeline.sync.worker import spawn_workers, Task
from torch.testing._internal.common_utils import run_tests
@@ -25,6 +25,7 @@ class fake_device:
type = "fake"
index = None
+
def test_compute_multithreading():
"""Task.compute should be executed on multiple threads."""
thread_ids = set()
diff --git a/test/distributed/rpc/cuda/test_tensorpipe_agent.py b/test/distributed/rpc/cuda/test_tensorpipe_agent.py
index cef2e9d36a..12af5036b1 100644
--- a/test/distributed/rpc/cuda/test_tensorpipe_agent.py
+++ b/test/distributed/rpc/cuda/test_tensorpipe_agent.py
@@ -9,19 +9,19 @@ if not dist.is_available():
print("Distributed not available, skipping tests", file=sys.stderr)
sys.exit(0)
+import torch
from torch.testing._internal.common_utils import run_tests
from torch.testing._internal.distributed.rpc.tensorpipe_rpc_agent_test_fixture import (
TensorPipeRpcAgentTestFixture,
)
from torch.testing._internal.distributed.rpc_utils import (
+ generate_tests,
GENERIC_CUDA_TESTS,
TENSORPIPE_CUDA_TESTS,
- generate_tests,
)
-import torch
if torch.cuda.is_available():
- torch.cuda.memory._set_allocator_settings('expandable_segments:False')
+ torch.cuda.memory._set_allocator_settings("expandable_segments:False")
globals().update(
generate_tests(
diff --git a/test/distributed/rpc/test_share_memory.py b/test/distributed/rpc/test_share_memory.py
index 8b538c44b6..c587023722 100644
--- a/test/distributed/rpc/test_share_memory.py
+++ b/test/distributed/rpc/test_share_memory.py
@@ -1,34 +1,36 @@
#!/usr/bin/env python3
# Owner(s): ["oncall: distributed"]
-import torch
-import torch.distributed as dist
-
import contextlib
import copyreg
import os
import sys
+import torch
+import torch.distributed as dist
+
if not dist.is_available():
print("Distributed not available, skipping tests", file=sys.stderr)
sys.exit(0)
-from torch import multiprocessing
-import torch.multiprocessing.reductions as TorchMpReductions
import torch.distributed.rpc as rpc
-from torch.distributed.rpc.internal import _InternalRPCPickler
+import torch.multiprocessing.reductions as TorchMpReductions
+from torch import multiprocessing
from torch.distributed.rpc.api import _use_rpc_pickler
-from torch.testing._internal.common_utils import TestCase, run_tests
+from torch.distributed.rpc.internal import _InternalRPCPickler
+from torch.testing._internal.common_utils import run_tests, TestCase
+
@contextlib.contextmanager
def fs_sharing():
prev_strategy = multiprocessing.get_sharing_strategy()
- multiprocessing.set_sharing_strategy('file_system')
+ multiprocessing.set_sharing_strategy("file_system")
try:
yield
finally:
multiprocessing.set_sharing_strategy(prev_strategy)
+
class ShareMemoryRPCPickler(_InternalRPCPickler):
def __init__(self) -> None:
super().__init__()
@@ -46,38 +48,36 @@ class ShareMemoryRPCPickler(_InternalRPCPickler):
torch.nn.parameter.Parameter
] = TorchMpReductions.reduce_tensor
+
def worker_loop(a):
- rpc.init_rpc('worker1', rank=1, world_size=2)
+ rpc.init_rpc("worker1", rank=1, world_size=2)
rpc.shutdown()
+
def worker_fn(m):
pass
+
class TestRPCPickler(TestCase):
def test_case(self):
- os.environ['MASTER_ADDR'] = 'localhost'
- os.environ['MASTER_PORT'] = '29500'
+ os.environ["MASTER_ADDR"] = "localhost"
+ os.environ["MASTER_PORT"] = "29500"
with fs_sharing():
r = multiprocessing.spawn(worker_loop, join=False)
try:
with _use_rpc_pickler(ShareMemoryRPCPickler()):
- rpc.init_rpc(
- 'worker0',
- rank=0,
- world_size=2)
+ rpc.init_rpc("worker0", rank=0, world_size=2)
m = torch.nn.Linear(1, 2)
m.share_memory()
- rref = rpc.remote(
- 'worker1',
- worker_fn,
- args=(m,))
+ rref = rpc.remote("worker1", worker_fn, args=(m,))
rref.to_here()
finally:
rpc.shutdown()
r.join()
-if __name__ == '__main__':
+
+if __name__ == "__main__":
run_tests()
diff --git a/test/distributed/rpc/test_tensorpipe_agent.py b/test/distributed/rpc/test_tensorpipe_agent.py
index bb0870dd50..56f176a17c 100644
--- a/test/distributed/rpc/test_tensorpipe_agent.py
+++ b/test/distributed/rpc/test_tensorpipe_agent.py
@@ -15,9 +15,9 @@ from torch.testing._internal.distributed.rpc.tensorpipe_rpc_agent_test_fixture i
TensorPipeRpcAgentTestFixture,
)
from torch.testing._internal.distributed.rpc_utils import (
+ generate_tests,
GENERIC_TESTS,
TENSORPIPE_TESTS,
- generate_tests,
)
diff --git a/test/distributed/tensor/parallel/test_ddp_2d_parallel.py b/test/distributed/tensor/parallel/test_ddp_2d_parallel.py
index ef059d9933..8c69bf25a8 100644
--- a/test/distributed/tensor/parallel/test_ddp_2d_parallel.py
+++ b/test/distributed/tensor/parallel/test_ddp_2d_parallel.py
@@ -2,7 +2,7 @@
import torch
import torch.distributed as dist
-from torch.distributed._tensor import DeviceMesh, DTensor, Replicate, init_device_mesh
+from torch.distributed._tensor import DeviceMesh, DTensor, init_device_mesh, Replicate
from torch.distributed.tensor.parallel import (
ColwiseParallel,
parallelize_module,
@@ -39,7 +39,11 @@ def init_model(device_type, model_parallel_size=TP_DEGREE):
device_type=device_type,
mesh=torch.arange(0, world_size).view(-1, model_parallel_size),
)
- mesh_2d = init_device_mesh(device_type, (world_size // model_parallel_size, model_parallel_size), mesh_dim_names=("dp", "tp"))
+ mesh_2d = init_device_mesh(
+ device_type,
+ (world_size // model_parallel_size, model_parallel_size),
+ mesh_dim_names=("dp", "tp"),
+ )
dp_pg = mesh_2d.get_group(mesh_dim=0)
diff --git a/test/distributed/tensor/parallel/test_fsdp_2d_parallel.py b/test/distributed/tensor/parallel/test_fsdp_2d_parallel.py
index 64bc628d1b..329131290c 100644
--- a/test/distributed/tensor/parallel/test_fsdp_2d_parallel.py
+++ b/test/distributed/tensor/parallel/test_fsdp_2d_parallel.py
@@ -381,7 +381,9 @@ class TestNew2dParallelStateDict(DTensorTestBase):
"net1": ColwiseParallel(),
"net2": RowwiseParallel(),
}
- model_2d = parallelize_module(simple_model().cuda(), mesh_2d["tp"], parallelize_plan)
+ model_2d = parallelize_module(
+ simple_model().cuda(), mesh_2d["tp"], parallelize_plan
+ )
model_2d = FSDP(model_2d, device_mesh=mesh_2d["dp"], use_orig_params=True)
FSDP.set_state_dict_type(
model_2d,
diff --git a/test/distributed/tensor/parallel/test_parallelize_api.py b/test/distributed/tensor/parallel/test_parallelize_api.py
index ed5a7361d0..53f92ecd0d 100644
--- a/test/distributed/tensor/parallel/test_parallelize_api.py
+++ b/test/distributed/tensor/parallel/test_parallelize_api.py
@@ -4,9 +4,7 @@ from copy import deepcopy
import torch
from torch.distributed._tensor import DeviceMesh, DTensor, Replicate, Shard
-from torch.distributed.tensor.parallel.api import (
- parallelize_module,
-)
+from torch.distributed.tensor.parallel.api import parallelize_module
from torch.distributed.tensor.parallel.style import (
ColwiseParallel,
PrepareModuleInput,
@@ -177,9 +175,8 @@ class TensorParallelAPITests(DTensorTestBase):
module,
device_mesh,
PrepareModuleInput(
- input_layouts=Shard(0),
- desired_input_layouts=Replicate()
- )
+ input_layouts=Shard(0), desired_input_layouts=Replicate()
+ ),
)
inp = torch.rand(5, 7, device=self.device_type)
output = module(inp).redistribute(device_mesh, [Shard(0)]).to_local()
@@ -193,9 +190,8 @@ class TensorParallelAPITests(DTensorTestBase):
module,
device_mesh,
PrepareModuleOutput(
- output_layouts=Replicate(),
- desired_output_layouts=Shard(0)
- )
+ output_layouts=Replicate(), desired_output_layouts=Shard(0)
+ ),
)
torch.manual_seed(15)
inp = torch.rand(16, 7, device=self.device_type)
diff --git a/test/distributed/tensor/parallel/test_tp_examples.py b/test/distributed/tensor/parallel/test_tp_examples.py
index 1733c3065a..c85032fe2f 100644
--- a/test/distributed/tensor/parallel/test_tp_examples.py
+++ b/test/distributed/tensor/parallel/test_tp_examples.py
@@ -1,12 +1,19 @@
# Copyright (c) Meta Platforms, Inc. and affiliates
# Owner(s): ["oncall: distributed"]
-from copy import deepcopy
import itertools
+from copy import deepcopy
+
import torch
import torch.distributed as dist
import torch.nn.functional as F
-from torch.distributed._tensor import DeviceMesh, DTensor, Replicate, Shard, distribute_tensor
+from torch.distributed._tensor import (
+ DeviceMesh,
+ distribute_tensor,
+ DTensor,
+ Replicate,
+ Shard,
+)
from torch.distributed._tensor.debug import CommDebugMode
from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import (
checkpoint_wrapper,
@@ -37,6 +44,7 @@ from torch.testing._internal.distributed._tensor.common_dtensor import (
c10d_functional = torch.ops.c10d_functional
+
class DistTensorParallelExampleTest(DTensorTestBase):
def _check_module(self, m1, m2, check_grad=False):
named_parameters = dict(m1.named_parameters())
@@ -95,6 +103,7 @@ class DistTensorParallelExampleTest(DTensorTestBase):
output.sum().backward()
from torch.distributed._tensor.debug import CommDebugMode
+
comm_mode = CommDebugMode()
with comm_mode:
output_tp = model_tp(inp)
@@ -102,8 +111,12 @@ class DistTensorParallelExampleTest(DTensorTestBase):
self.assertEqual(output, output_tp)
if is_seq_parallel:
- self.assertEqual(comm_mode.get_comm_counts()[c10d_functional.all_gather_into_tensor], 2)
- self.assertEqual(comm_mode.get_comm_counts()[c10d_functional.reduce_scatter_tensor], 1)
+ self.assertEqual(
+ comm_mode.get_comm_counts()[c10d_functional.all_gather_into_tensor], 2
+ )
+ self.assertEqual(
+ comm_mode.get_comm_counts()[c10d_functional.reduce_scatter_tensor], 1
+ )
else:
self.assertEqual(comm_mode.get_comm_counts()[c10d_functional.all_reduce], 1)
@@ -214,16 +227,22 @@ class DistTensorParallelExampleTest(DTensorTestBase):
output_tp = model_tp(inp)
self.assertEqual(output, output_tp)
if is_seq_parallel:
- self.assertDictEqual(comm_mode.get_comm_counts(), {
- c10d_functional.all_reduce: 1,
- c10d_functional.reduce_scatter_tensor: 4,
- c10d_functional.all_gather_into_tensor: 7,
- })
+ self.assertDictEqual(
+ comm_mode.get_comm_counts(),
+ {
+ c10d_functional.all_reduce: 1,
+ c10d_functional.reduce_scatter_tensor: 4,
+ c10d_functional.all_gather_into_tensor: 7,
+ },
+ )
else:
- self.assertDictEqual(comm_mode.get_comm_counts(), {
- c10d_functional.all_reduce: 5,
- c10d_functional.all_gather_into_tensor: 2,
- })
+ self.assertDictEqual(
+ comm_mode.get_comm_counts(),
+ {
+ c10d_functional.all_reduce: 5,
+ c10d_functional.all_gather_into_tensor: 2,
+ },
+ )
# Ensure gradients are equal.
output.sum().backward()
@@ -231,15 +250,21 @@ class DistTensorParallelExampleTest(DTensorTestBase):
output_tp.sum().backward()
self._check_module(model, model_tp, check_grad=True)
if is_seq_parallel:
- self.assertDictEqual(comm_mode.get_comm_counts(), {
- c10d_functional.reduce_scatter_tensor: 4,
- c10d_functional.all_gather_into_tensor: 7,
- })
+ self.assertDictEqual(
+ comm_mode.get_comm_counts(),
+ {
+ c10d_functional.reduce_scatter_tensor: 4,
+ c10d_functional.all_gather_into_tensor: 7,
+ },
+ )
else:
- self.assertDictEqual(comm_mode.get_comm_counts(), {
- c10d_functional.all_reduce: 8,
- c10d_functional.all_gather_into_tensor: 1,
- })
+ self.assertDictEqual(
+ comm_mode.get_comm_counts(),
+ {
+ c10d_functional.all_reduce: 8,
+ c10d_functional.all_gather_into_tensor: 1,
+ },
+ )
# Ensure model weights are still the same after update.
optim.step()
@@ -247,9 +272,12 @@ class DistTensorParallelExampleTest(DTensorTestBase):
optim_tp.step()
self._check_module(model, model_tp)
if is_seq_parallel:
- self.assertDictEqual(comm_mode.get_comm_counts(), {
- c10d_functional.all_reduce: 30,
- })
+ self.assertDictEqual(
+ comm_mode.get_comm_counts(),
+ {
+ c10d_functional.all_reduce: 30,
+ },
+ )
else:
self.assertDictEqual(comm_mode.get_comm_counts(), {})
@@ -330,7 +358,9 @@ class DistTensorParallelExampleTest(DTensorTestBase):
with loss_parallel():
if shard_dim == channel_dim:
with comm_mode:
- dist_y = F.cross_entropy(dist_x, target, weight, reduction=reduction)
+ dist_y = F.cross_entropy(
+ dist_x, target, weight, reduction=reduction
+ )
self.assertEqual(comm_mode.get_total_counts(), 3)
self.assertEqual(
comm_mode.get_comm_counts()[c10d_functional.all_reduce],
@@ -347,7 +377,9 @@ class DistTensorParallelExampleTest(DTensorTestBase):
y.backward()
dist_y.backward()
self.assertEqual(comm_mode.get_total_counts(), 0)
- self.assertTrue(dist_x.grad.placements[0].is_shard(shard_dim))
+ self.assertTrue(
+ dist_x.grad.placements[0].is_shard(shard_dim)
+ )
self.assertEqual(dist_x.grad.full_tensor(), x.grad)
x.grad.zero_()
else:
@@ -355,8 +387,9 @@ class DistTensorParallelExampleTest(DTensorTestBase):
ValueError,
"loss_parallel",
):
- dist_y = F.cross_entropy(dist_x, target, reduction=reduction)
-
+ dist_y = F.cross_entropy(
+ dist_x, target, reduction=reduction
+ )
instantiate_parametrized_tests(DistTensorParallelExampleTest)
diff --git a/test/distributed/tensor/parallel/test_tp_random_state.py b/test/distributed/tensor/parallel/test_tp_random_state.py
index 366006ac73..5c83802076 100644
--- a/test/distributed/tensor/parallel/test_tp_random_state.py
+++ b/test/distributed/tensor/parallel/test_tp_random_state.py
@@ -102,7 +102,9 @@ class TensorParallelRandomStateTests(DTensorTestBase):
# each rank within a TP group has the same initial weights
self.assertEqual(tensor1, tensor2)
- self.check_gathered_tensors(tp_rank, tp_size, tensor_gather, tp_weights_assert)
+ self.check_gathered_tensors(
+ tp_rank, tp_size, tensor_gather, tp_weights_assert
+ )
# check across TP groups
# all-gather local shards
@@ -123,7 +125,9 @@ class TensorParallelRandomStateTests(DTensorTestBase):
# random seeds set in data loading.
self.assertNotEqual(tensor1, tensor2)
- self.check_gathered_tensors(dp_rank, dp_size, tensor_gather, dp_weights_assert)
+ self.check_gathered_tensors(
+ dp_rank, dp_size, tensor_gather, dp_weights_assert
+ )
if __name__ == "__main__":
diff --git a/test/distributed/tensor/parallel/test_tp_style.py b/test/distributed/tensor/parallel/test_tp_style.py
index 47bd32a0c7..ab4f1ab8a7 100644
--- a/test/distributed/tensor/parallel/test_tp_style.py
+++ b/test/distributed/tensor/parallel/test_tp_style.py
@@ -6,9 +6,15 @@ from copy import deepcopy
import torch
import torch.nn as nn
-from torch.distributed._tensor import Replicate, Shard, init_device_mesh, distribute_tensor, DTensor
-from torch.distributed._tensor.placement_types import _Partial
+from torch.distributed._tensor import (
+ distribute_tensor,
+ DTensor,
+ init_device_mesh,
+ Replicate,
+ Shard,
+)
from torch.distributed._tensor.debug import CommDebugMode
+from torch.distributed._tensor.placement_types import _Partial
from torch.distributed.tensor.parallel import parallelize_module
from torch.distributed.tensor.parallel.style import (
ColwiseParallel,
@@ -20,14 +26,15 @@ from torch.distributed.tensor.parallel.style import (
from torch.testing._internal.common_utils import run_tests
from torch.testing._internal.distributed._tensor.common_dtensor import (
DTensorTestBase,
- with_comms,
NUM_DEVICES,
RMSNormPython,
+ with_comms,
)
c10d_functional = torch.ops.c10d_functional
+
class TensorParallelStyleTest(DTensorTestBase):
@property
def world_size(self):
@@ -43,7 +50,9 @@ class TensorParallelStyleTest(DTensorTestBase):
default_col_parallel = ColwiseParallel()
with comm_mode:
- colwise_mod = parallelize_module(deepcopy(model), mesh, default_col_parallel)
+ colwise_mod = parallelize_module(
+ deepcopy(model), mesh, default_col_parallel
+ )
out = colwise_mod(tensor)
# ensure output shard on the last dim
self.assertEqual(out.shape, (8, 16 // self.world_size))
@@ -57,17 +66,23 @@ class TensorParallelStyleTest(DTensorTestBase):
sharded_col_parallel = ColwiseParallel(input_layouts=Shard(0))
with comm_mode:
- colwise_mod = parallelize_module(deepcopy(model), mesh, sharded_col_parallel)
+ colwise_mod = parallelize_module(
+ deepcopy(model), mesh, sharded_col_parallel
+ )
out = colwise_mod(tensor)
# ensure output shard on the last dim
self.assertEqual(out.shape, (8 * self.world_size, 16 // self.world_size))
# allgather in fwd
- self.assertEqual(comm_mode.get_comm_counts()[c10d_functional.all_gather_into_tensor], 1)
+ self.assertEqual(
+ comm_mode.get_comm_counts()[c10d_functional.all_gather_into_tensor], 1
+ )
self.assertEqual(comm_mode.get_total_counts(), 1)
out.sum().backward()
# reduce_scatter in bwd
- self.assertEqual(comm_mode.get_comm_counts()[c10d_functional.reduce_scatter_tensor], 1)
+ self.assertEqual(
+ comm_mode.get_comm_counts()[c10d_functional.reduce_scatter_tensor], 1
+ )
self.assertEqual(comm_mode.get_total_counts(), 2)
@with_comms
@@ -80,7 +95,9 @@ class TensorParallelStyleTest(DTensorTestBase):
default_col_parallel = ColwiseParallel()
with comm_mode:
- colwise_mod = parallelize_module(deepcopy(model), mesh, default_col_parallel)
+ colwise_mod = parallelize_module(
+ deepcopy(model), mesh, default_col_parallel
+ )
out = colwise_mod(tensor)
# ensure output shard on the last dim
self.assertEqual(out.shape, (4, 2, 16 // self.world_size))
@@ -96,12 +113,16 @@ class TensorParallelStyleTest(DTensorTestBase):
mesh = init_device_mesh(self.device_type, (self.world_size,))
comm_mode = CommDebugMode()
- tensor = torch.rand(8, 16 // self.world_size, device=self.device_type, requires_grad=True)
+ tensor = torch.rand(
+ 8, 16 // self.world_size, device=self.device_type, requires_grad=True
+ )
model = nn.Linear(16, 16, device=self.device_type)
default_row_parallel = RowwiseParallel()
with comm_mode:
- rowwise_mod = parallelize_module(deepcopy(model), mesh, default_row_parallel)
+ rowwise_mod = parallelize_module(
+ deepcopy(model), mesh, default_row_parallel
+ )
out = rowwise_mod(tensor)
# ensure output replicated
self.assertEqual(out.shape, (8, 16))
@@ -115,17 +136,23 @@ class TensorParallelStyleTest(DTensorTestBase):
sharded_row_parallel = RowwiseParallel(output_layouts=Shard(0))
with comm_mode:
- rowwise_mod = parallelize_module(deepcopy(model), mesh, sharded_row_parallel)
+ rowwise_mod = parallelize_module(
+ deepcopy(model), mesh, sharded_row_parallel
+ )
out = rowwise_mod(tensor)
# ensure output replicated
self.assertEqual(out.shape, (8 // self.world_size, 16))
# reduce_scatter in fwd
- self.assertEqual(comm_mode.get_comm_counts()[c10d_functional.reduce_scatter_tensor], 1)
+ self.assertEqual(
+ comm_mode.get_comm_counts()[c10d_functional.reduce_scatter_tensor], 1
+ )
self.assertEqual(comm_mode.get_total_counts(), 1)
out.sum().backward()
# allgather in bwd
- self.assertEqual(comm_mode.get_comm_counts()[c10d_functional.all_gather_into_tensor], 1)
+ self.assertEqual(
+ comm_mode.get_comm_counts()[c10d_functional.all_gather_into_tensor], 1
+ )
self.assertEqual(comm_mode.get_total_counts(), 2)
@with_comms
@@ -137,7 +164,9 @@ class TensorParallelStyleTest(DTensorTestBase):
model = nn.Embedding(16, 16, device=self.device_type)
with comm_mode:
- rowwise_mod = parallelize_module(deepcopy(model), mesh, RowwiseParallel(input_layouts=Replicate()))
+ rowwise_mod = parallelize_module(
+ deepcopy(model), mesh, RowwiseParallel(input_layouts=Replicate())
+ )
out = rowwise_mod(tensor)
# ensure output shard on the last dim
self.assertEqual(out.shape, (4, 2, 16))
@@ -149,21 +178,21 @@ class TensorParallelStyleTest(DTensorTestBase):
# no comm in bwd
self.assertEqual(comm_mode.get_total_counts(), 1)
-
@with_comms
def test_prepare_module_input(self):
mesh = init_device_mesh(self.device_type, (self.world_size,))
tensor = torch.ones(2, 16, device=self.device_type)
expected_tensor = torch.ones(2 * self.world_size, 16, device=self.device_type)
- prepare_inp_style = PrepareModuleInput(input_layouts=Shard(0), desired_input_layouts=Replicate())
+ prepare_inp_style = PrepareModuleInput(
+ input_layouts=Shard(0), desired_input_layouts=Replicate()
+ )
model = nn.Identity()
allgather_mod = parallelize_module(model, mesh, prepare_inp_style)
output = allgather_mod(tensor).full_tensor()
self.assertEqual(output, expected_tensor)
-
@with_comms
def test_prepare_module_input_multiple_inputs(self):
mesh = init_device_mesh(self.device_type, (self.world_size,))
@@ -178,26 +207,41 @@ class TensorParallelStyleTest(DTensorTestBase):
# Raise assertion error if input_layouts and desired_input_layouts do not have same length.
test_mod = TestModule().to(self.device_type)
- with self.assertRaisesRegex(AssertionError, "input_layouts and desired_input_layouts should have same length!"):
- prepare_inps_dimension_mismatch = PrepareModuleInput(input_layouts=Shard(0), desired_input_layouts=(Replicate(), None))
+ with self.assertRaisesRegex(
+ AssertionError,
+ "input_layouts and desired_input_layouts should have same length!",
+ ):
+ prepare_inps_dimension_mismatch = PrepareModuleInput(
+ input_layouts=Shard(0), desired_input_layouts=(Replicate(), None)
+ )
# Raise assertion error if module inputs and input_layouts do not have same length.
- prepare_inps_short_dimension = PrepareModuleInput(input_layouts=Shard(0), desired_input_layouts=Replicate())
+ prepare_inps_short_dimension = PrepareModuleInput(
+ input_layouts=Shard(0), desired_input_layouts=Replicate()
+ )
parallelize_module(test_mod.linear, mesh, ColwiseParallel())
parallelize_module(test_mod, mesh, prepare_inps_short_dimension)
- with self.assertRaisesRegex(ValueError, "module inputs and input_layouts should have same length!"):
+ with self.assertRaisesRegex(
+ ValueError, "module inputs and input_layouts should have same length!"
+ ):
output = test_mod(
torch.randn(2, 8, device=self.device_type),
- torch.ones(self.world_size * 2, 8 // self.world_size, device=self.device_type)
+ torch.ones(
+ self.world_size * 2, 8 // self.world_size, device=self.device_type
+ ),
)
test_mod = TestModule().to(self.device_type)
- prepare_inps = PrepareModuleInput(input_layouts=(Shard(0), None), desired_input_layouts=(Replicate(), None))
+ prepare_inps = PrepareModuleInput(
+ input_layouts=(Shard(0), None), desired_input_layouts=(Replicate(), None)
+ )
parallelize_module(test_mod.linear, mesh, ColwiseParallel())
parallelize_module(test_mod, mesh, prepare_inps)
output = test_mod(
torch.randn(2, 8, device=self.device_type),
- torch.ones(self.world_size * 2, 8 // self.world_size, device=self.device_type)
+ torch.ones(
+ self.world_size * 2, 8 // self.world_size, device=self.device_type
+ ),
)
self.assertEqual(output.shape, (self.world_size * 2, 8 // self.world_size))
@@ -207,7 +251,9 @@ class TensorParallelStyleTest(DTensorTestBase):
tensor = torch.ones(8, 16, device=self.device_type)
expected_tensor = torch.ones(8 // self.world_size, 16, device=self.device_type)
- prepare_out_style = PrepareModuleOutput(output_layouts=Replicate(), desired_output_layouts=Shard(0))
+ prepare_out_style = PrepareModuleOutput(
+ output_layouts=Replicate(), desired_output_layouts=Shard(0)
+ )
model = nn.Identity()
chunk_mod = parallelize_module(model, mesh, prepare_out_style)
@@ -221,12 +267,22 @@ class TensorParallelStyleTest(DTensorTestBase):
comm_mode = CommDebugMode()
batch, N, embedding_dim = 20, 8, 12
- global_input = torch.rand(batch, N * self.world_size, embedding_dim, device=self.device_type, requires_grad=True)
+ global_input = torch.rand(
+ batch,
+ N * self.world_size,
+ embedding_dim,
+ device=self.device_type,
+ requires_grad=True,
+ )
sharded_input = distribute_tensor(global_input, mesh, [Shard(1)])
# test LayerNorm
for elementwise_affine in [True, False]:
- norm = nn.LayerNorm(embedding_dim, elementwise_affine=elementwise_affine, device=self.device_type)
+ norm = nn.LayerNorm(
+ embedding_dim,
+ elementwise_affine=elementwise_affine,
+ device=self.device_type,
+ )
sp_norm = parallelize_module(deepcopy(norm), mesh, SequenceParallel())
output = norm(global_input)
@@ -239,7 +295,9 @@ class TensorParallelStyleTest(DTensorTestBase):
self.assertIsInstance(sharded_out, DTensor)
self.assertEqual(sharded_out.placements, (Shard(1),))
self.assertEqual(comm_mode.get_total_counts(), 0)
- self.assertEqual(comm_mode.get_comm_counts()[c10d_functional.all_reduce], 0)
+ self.assertEqual(
+ comm_mode.get_comm_counts()[c10d_functional.all_reduce], 0
+ )
if elementwise_affine:
self.assertEqual(sp_norm.weight.grad.placements, (_Partial(),))
self.assertEqual(sp_norm.bias.grad.placements, (_Partial(),))
diff --git a/test/distributed/test_c10d_common.py b/test/distributed/test_c10d_common.py
index dd17e7dc03..97660ad0d8 100644
--- a/test/distributed/test_c10d_common.py
+++ b/test/distributed/test_c10d_common.py
@@ -21,8 +21,8 @@ if not dist.is_available():
print("distributed package not available, skipping tests", file=sys.stderr)
sys.exit(0)
-import torch.distributed.distributed_c10d as c10d
import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD
+import torch.distributed.distributed_c10d as c10d
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from torch import nn
@@ -34,13 +34,13 @@ from torch.testing._internal.common_distributed import (
skip_if_lt_x_gpu,
)
from torch.testing._internal.common_utils import (
- retry_on_connect_failures,
- TestCase,
+ instantiate_parametrized_tests,
load_tests,
+ parametrize,
+ retry_on_connect_failures,
run_tests,
TEST_WITH_DEV_DBG_ASAN,
- instantiate_parametrized_tests,
- parametrize
+ TestCase,
)
from torch.utils.checkpoint import checkpoint
@@ -141,7 +141,13 @@ class TimeoutTest(TestCase):
if init_type == "file":
barrier_store = dist.FileStore(f.name)
elif init_type == "tcp":
- barrier_store = dist.TCPStore("localhost", port, world_size, is_master=rank == 0, wait_for_workers=False)
+ barrier_store = dist.TCPStore(
+ "localhost",
+ port,
+ world_size,
+ is_master=rank == 0,
+ wait_for_workers=False,
+ )
elif init_type == "hash":
barrier_store = dist.HashStore()
try:
@@ -153,7 +159,7 @@ class TimeoutTest(TestCase):
group_name="_",
rendezvous_count=world_size,
timeout=timeout,
- logging_interval=timeout / 2
+ logging_interval=timeout / 2,
)
except torch.distributed.DistStoreError as e:
self.assertTrue(isinstance(e, torch.distributed.DistError))
@@ -165,7 +171,14 @@ class TimeoutTest(TestCase):
for init_type in ["file", "tcp", "hash"]:
for rank in range(world_size):
t = threading.Thread(
- target=thread_work, args=(timedelta(seconds=3), init_type, world_size, rank, error_list,)
+ target=thread_work,
+ args=(
+ timedelta(seconds=3),
+ init_type,
+ world_size,
+ rank,
+ error_list,
+ ),
)
threads.append(t)
t.start()
@@ -176,10 +189,14 @@ class TimeoutTest(TestCase):
# we expect the world_size-1 threads to have failed
self.assertEqual(len(error_list), world_size - 1)
for error in error_list:
- self.assertTrue("Timed out initializing process group in store based barrier" in error.args[0])
+ self.assertTrue(
+ "Timed out initializing process group in store based barrier"
+ in error.args[0]
+ )
error_list = []
threads = []
+
class Net(nn.Module):
def __init__(self):
super().__init__()
@@ -375,7 +392,9 @@ class CommonDistributedDataParallelTest:
def _get_process_group(self):
raise NotImplementedError("To be implemented by child class")
- def _train_model(self, model, input_var, target, loss, run_checkpoint=False, use_reentrant=True):
+ def _train_model(
+ self, model, input_var, target, loss, run_checkpoint=False, use_reentrant=True
+ ):
model.train()
if run_checkpoint:
output = checkpoint(model, input_var, use_reentrant=use_reentrant)
@@ -418,9 +437,21 @@ class CommonDistributedDataParallelTest:
for i in range(n_iters):
model.zero_grad(set_to_none=False)
ddp_model.zero_grad(set_to_none=False)
- self._train_model(model, input, target, loss, run_checkpoint=run_checkpoint, use_reentrant=use_reentrant)
self._train_model(
- ddp_model, ddp_input, ddp_target, loss, run_checkpoint=run_checkpoint, use_reentrant=use_reentrant
+ model,
+ input,
+ target,
+ loss,
+ run_checkpoint=run_checkpoint,
+ use_reentrant=use_reentrant,
+ )
+ self._train_model(
+ ddp_model,
+ ddp_input,
+ ddp_target,
+ loss,
+ run_checkpoint=run_checkpoint,
+ use_reentrant=use_reentrant,
)
for i, j in zip(model.parameters(), ddp_model.parameters()):
if not allow_none_grads:
@@ -436,6 +467,7 @@ class CommonDistributedDataParallelTest:
"""
Runs checkpoint for a single layer in the model.
"""
+
def __init__(self, use_reentrant=True):
super().__init__()
self.l1 = nn.Linear(20, 20)
@@ -453,6 +485,7 @@ class CommonDistributedDataParallelTest:
cases such as pipeline parallel where the same layer can be checkpointed
more than one time.
"""
+
def __init__(self, use_reentrant=True):
super().__init__(use_reentrant=use_reentrant)
@@ -466,6 +499,7 @@ class CommonDistributedDataParallelTest:
"""
Similar to CheckpointTwiceModule but the weights are shared.
"""
+
def __init__(self, use_reentrant=True):
super().__init__(use_reentrant=use_reentrant)
# Share weights
@@ -477,7 +511,6 @@ class CommonDistributedDataParallelTest:
x = checkpoint(self.l2, x, use_reentrant=self.use_reentrant)
return x
-
class DynamicCheckpointTwiceModule(CheckpointTwiceModule):
def __init__(self, use_reentrant=True):
super().__init__(use_reentrant=use_reentrant)
@@ -498,7 +531,6 @@ class CommonDistributedDataParallelTest:
# Share weights
self.l1.weight = self.l2.weight
-
def _prepare_dummy_data(self):
ddp_bs = 16
bs = ddp_bs * self.world_size
@@ -509,7 +541,6 @@ class CommonDistributedDataParallelTest:
ddp_target = target[offset : offset + ddp_bs]
return input, ddp_input, target, ddp_target
-
@skip_if_lt_x_gpu(2)
@parametrize("use_reentrant", [True, False])
def test_ddp_checkpointing_once(self, use_reentrant):
@@ -546,10 +577,10 @@ class CommonDistributedDataParallelTest:
process_group = self._get_process_group()
for use_bucket_view in (True, False):
err_ctx = (
- nullcontext() if not use_reentrant else
- self.assertRaisesRegex(
- RuntimeError,
- "Expected to mark a variable ready only once."
+ nullcontext()
+ if not use_reentrant
+ else self.assertRaisesRegex(
+ RuntimeError, "Expected to mark a variable ready only once."
)
)
with err_ctx:
@@ -578,10 +609,10 @@ class CommonDistributedDataParallelTest:
process_group = self._get_process_group()
for use_bucket_view in (True, False):
err_ctx = (
- nullcontext() if not use_reentrant else
- self.assertRaisesRegex(
- RuntimeError,
- "Expected to mark a variable ready only once."
+ nullcontext()
+ if not use_reentrant
+ else self.assertRaisesRegex(
+ RuntimeError, "Expected to mark a variable ready only once."
)
)
with err_ctx:
@@ -634,7 +665,7 @@ class CommonDistributedDataParallelTest:
find_unused_parameters=True,
# Grads can be none sometimes due to dynamic module not using
# all params.
- allow_none_grads=True
+ allow_none_grads=True,
)
@skip_if_lt_x_gpu(2)
@@ -653,7 +684,7 @@ class CommonDistributedDataParallelTest:
find_unused_parameters=True,
# Grads can be none sometimes due to dynamic module not using
# all params.
- allow_none_grads=True
+ allow_none_grads=True,
)
# DDP works as expected if there is weight sharing among layers
@@ -881,7 +912,7 @@ class CommonDistributedDataParallelTest:
x = torch.zeros(
(1 if self.rank != 0 else 0, 2, 11, 13),
dtype=torch.float32,
- device=self.rank
+ device=self.rank,
)
# input requires grad, this will trigger the collective communication
@@ -894,11 +925,7 @@ class CommonDistributedDataParallelTest:
self._test_not_nan(model, x)
# all ranks receive empty inputs
- x = torch.zeros(
- (0, 2, 11, 13),
- dtype=torch.float32,
- device=self.rank
- )
+ x = torch.zeros((0, 2, 11, 13), dtype=torch.float32, device=self.rank)
# input requires grad, this will trigger the collective communication
# in the backward pass
@@ -933,17 +960,13 @@ class CommonDistributedDataParallelTest:
x = torch.zeros(
(3 if self.rank != 0 else 0, 2, 30, 30),
dtype=torch.float32,
- device=self.rank
+ device=self.rank,
)
self._test_not_nan(model, x)
# all ranks receive empty inputs
- x = torch.zeros(
- (0, 2, 30, 30),
- dtype=torch.float32,
- device=self.rank
- )
+ x = torch.zeros((0, 2, 30, 30), dtype=torch.float32, device=self.rank)
self._test_not_nan(model, x)
@@ -962,16 +985,13 @@ class CommonDistributedDataParallelTest:
def forward(self, x):
o1 = None if self.skip_o1 else self.relu(self.seq1(x))
- o2 = {
- "a": self.seq2(x),
- "b": self.relu(self.seq2(x))
- }
+ o2 = {"a": self.seq2(x), "b": self.relu(self.seq2(x))}
return CommonDistributedDataParallelTest.CustomOutput(o1=o1, o2=o2)
def _test_dataclass_output(self, skip_o1):
- net_x = torch.cat(
- [torch.ones(4, 10) * i for i in range(self.world_size)]
- ).to(self.rank)
+ net_x = torch.cat([torch.ones(4, 10) * i for i in range(self.world_size)]).to(
+ self.rank
+ )
ddp_x = torch.ones(4, 10, device=self.rank) * self.rank
# use manual_seed to make sure local models start with the same values
@@ -1091,7 +1111,6 @@ class AbstractCommTest:
self.fail("test subclass didn't override device")
def _verify_sequence_number_across_pg(self, pg, verify_pg):
-
seq_num = pg._get_sequence_number_for_group()
obj_list = [None for _ in range(dist.get_world_size(verify_pg))]
# We use a separate pg to verify the sequence numbers, otherwise these
@@ -1273,11 +1292,12 @@ class AbstractCommTest:
tensor = torch.ones(2, 2, device=self.device) * 7
tensor_h = tensor.half()
- tensor_list = [torch.zeros(2, 2, device=self.device) for _ in range(self.world_size)]
+ tensor_list = [
+ torch.zeros(2, 2, device=self.device) for _ in range(self.world_size)
+ ]
tensor_list_h = list(tensor_list)
tensor_list_h[1] = tensor_list_h[1].half()
-
with self.assertRaisesRegex(ValueError, "tensors with different dtypes"):
dist.all_gather(tensor_list_h, tensor)
@@ -1329,7 +1349,9 @@ class AbstractCommTest:
tensor = torch.rand(2, device=self.device)
tensor_c = torch.view_as_complex(tensor)
- tensor_list = [torch.rand(2, device=self.device) for _ in range(self.world_size)]
+ tensor_list = [
+ torch.rand(2, device=self.device) for _ in range(self.world_size)
+ ]
tensor_list_c = list(tensor_list)
tensor_list_c[1] = torch.view_as_complex(tensor_list_c[1])
@@ -1354,6 +1376,7 @@ class AbstractCommTest:
dist.broadcast(outensor, src=0)
self.assertEqual(outensor, tensor)
+
# Variant of AbstractCommTest that expects world size of 4
class AbstractLargeCommTest:
@property
@@ -1382,7 +1405,9 @@ class AbstractLargeCommTest:
self.assertIn(rank, ranks_in)
self.assertNotIn(rank, ranks_out)
- self.assertIsNone(dist.new_group(ranks=ranks_out, use_local_synchronization=True))
+ self.assertIsNone(
+ dist.new_group(ranks=ranks_out, use_local_synchronization=True)
+ )
new_pg = dist.new_group(ranks=ranks_in, use_local_synchronization=True)
self.assertIsInstance(new_pg, dist.ProcessGroup)
@@ -1393,7 +1418,7 @@ class AbstractLargeCommTest:
self.assertEqual(
ranks_in,
dist.get_process_group_ranks(new_pg),
- f"expecting {ranks_in} but got {dist.get_process_group_ranks(new_pg)}"
+ f"expecting {ranks_in} but got {dist.get_process_group_ranks(new_pg)}",
)
def _test_new_group_local_sync_sanity_check(self, backend):
@@ -1413,12 +1438,18 @@ class AbstractLargeCommTest:
new_pg = dist.new_group(ranks=ranks_in, use_local_synchronization=True)
input_tensor = torch.tensor([pg_idx, rank], device=self.device)
- output_tensor_list = [torch.tensor([-1, -1], device=self.device,) for _ in range(new_pg.size())]
+ output_tensor_list = [
+ torch.tensor(
+ [-1, -1],
+ device=self.device,
+ )
+ for _ in range(new_pg.size())
+ ]
dist.all_gather(output_tensor_list, input_tensor, group=new_pg)
expected = [
torch.tensor([pg_idx, ranks_in[0]], device=self.device),
- torch.tensor([pg_idx, ranks_in[1]], device=self.device)
+ torch.tensor([pg_idx, ranks_in[1]], device=self.device),
]
self.assertEqual(output_tensor_list, expected)
@@ -1449,13 +1480,17 @@ class AbstractLargeCommTest:
input_tensor = torch.tensor([pg_idx, rank], device=self.device)
for new_pg in new_pgs:
output_tensor_list = [
- torch.tensor([-1, -1], device=self.device,) for _ in range(new_pg.size())
+ torch.tensor(
+ [-1, -1],
+ device=self.device,
+ )
+ for _ in range(new_pg.size())
]
dist.all_gather(output_tensor_list, input_tensor, group=new_pg)
expected = [
torch.tensor([pg_idx, ranks_in[0]], device=self.device),
- torch.tensor([pg_idx, ranks_in[1]], device=self.device)
+ torch.tensor([pg_idx, ranks_in[1]], device=self.device),
]
self.assertEqual(output_tensor_list, expected)
@@ -1507,7 +1542,9 @@ class CommTest(AbstractCommTest, MultiProcessTestCase):
for mode in invalid_debug_modes:
os.environ["TORCH_DISTRIBUTED_DEBUG"] = str(mode)
- with self.assertRaisesRegex(ValueError, "The value of TORCH_DISTRIBUTED_DEBUG must"):
+ with self.assertRaisesRegex(
+ ValueError, "The value of TORCH_DISTRIBUTED_DEBUG must"
+ ):
dist.set_debug_level_from_env()
@@ -1523,7 +1560,9 @@ class DummyProcessGroup(dist.ProcessGroup):
return "Dummy"
def allgather(self, output_tensor_lists, input_tensor_list, opts=None):
- for output_tensor_list, input_tensor in zip(output_tensor_lists, input_tensor_list):
+ for output_tensor_list, input_tensor in zip(
+ output_tensor_lists, input_tensor_list
+ ):
for output_tensor in output_tensor_list:
output_tensor.copy_(input_tensor)
@@ -1561,7 +1600,9 @@ class DummyProcessGroup(dist.ProcessGroup):
return DummyWork()
def reduce_scatter(self, output_tensor_list, input_tensor_lists, opts=None):
- for output_tensor, input_tensor_list in zip(output_tensor_list, input_tensor_lists):
+ for output_tensor, input_tensor_list in zip(
+ output_tensor_list, input_tensor_lists
+ ):
output_tensor.copy_(input_tensor_list[self.rank()])
return DummyWork()
@@ -1597,28 +1638,25 @@ class PythonProcessGroupExtensionTest(MultiProcessTestCase):
def test_backend_class_attr(self):
dist.Backend.register_backend(
- "dummy",
- PythonProcessGroupExtensionTest.create_dummy
+ "dummy", PythonProcessGroupExtensionTest.create_dummy
)
self.assertEqual(dist.Backend.DUMMY, "dummy")
self.assertEqual(
dist.Backend._plugins["DUMMY"].creator_fn,
- PythonProcessGroupExtensionTest.create_dummy
+ PythonProcessGroupExtensionTest.create_dummy,
)
def test_is_backend_available(self):
self.assertEqual(dist.is_ucc_available(), dist.is_backend_available("ucc"))
self.assertFalse(dist.is_backend_available("dummy"))
dist.Backend.register_backend(
- "dummy",
- PythonProcessGroupExtensionTest.create_dummy
+ "dummy", PythonProcessGroupExtensionTest.create_dummy
)
self.assertTrue(dist.is_backend_available("dummy"))
def test_backend_config(self):
dist.Backend.register_backend(
- "dummy",
- PythonProcessGroupExtensionTest.create_dummy
+ "dummy", PythonProcessGroupExtensionTest.create_dummy
)
# Ensure backend config can be created with the following arguments
@@ -1653,11 +1691,15 @@ class PythonProcessGroupExtensionTest(MultiProcessTestCase):
dist.BackendConfig(config_str)
def test_init_process_group_with_multiple_backends(self):
- dist.Backend.register_backend("dummy", PythonProcessGroupExtensionTest.create_dummy)
+ dist.Backend.register_backend(
+ "dummy", PythonProcessGroupExtensionTest.create_dummy
+ )
- os.environ['MASTER_ADDR'] = 'localhost'
- os.environ['MASTER_PORT'] = '6789'
- dist.init_process_group("cpu:dummy,cuda:dummy", rank=self.rank, world_size=self.world_size)
+ os.environ["MASTER_ADDR"] = "localhost"
+ os.environ["MASTER_PORT"] = "6789"
+ dist.init_process_group(
+ "cpu:dummy,cuda:dummy", rank=self.rank, world_size=self.world_size
+ )
# test all_gather
input_tensor = torch.ones(2, 2) * 7
@@ -1679,10 +1721,12 @@ class PythonProcessGroupExtensionTest(MultiProcessTestCase):
return DummyProcessGroup(group_rank, group_size)
def test_collectives(self):
- dist.Backend.register_backend("dummy", PythonProcessGroupExtensionTest.create_dummy)
+ dist.Backend.register_backend(
+ "dummy", PythonProcessGroupExtensionTest.create_dummy
+ )
- os.environ['MASTER_ADDR'] = 'localhost'
- os.environ['MASTER_PORT'] = '6789'
+ os.environ["MASTER_ADDR"] = "localhost"
+ os.environ["MASTER_PORT"] = "6789"
dist.init_process_group("dummy", rank=self.rank, world_size=self.world_size)
# test all_gather
@@ -1713,10 +1757,12 @@ class PythonProcessGroupExtensionTest(MultiProcessTestCase):
dist.destroy_process_group()
def test_send_recv(self):
- dist.Backend.register_backend("dummy", PythonProcessGroupExtensionTest.create_dummy)
+ dist.Backend.register_backend(
+ "dummy", PythonProcessGroupExtensionTest.create_dummy
+ )
- os.environ['MASTER_ADDR'] = 'localhost'
- os.environ['MASTER_PORT'] = '6789'
+ os.environ["MASTER_ADDR"] = "localhost"
+ os.environ["MASTER_PORT"] = "6789"
dist.init_process_group("dummy", rank=self.rank, world_size=self.world_size)
# test send
@@ -1739,6 +1785,7 @@ class PythonProcessGroupExtensionTest(MultiProcessTestCase):
instantiate_parametrized_tests(CommonDistributedDataParallelTest)
+
class ProcessGroupWithDispatchedCollectivesTests(MultiProcessTestCase):
@property
def world_size(self):
@@ -1791,7 +1838,7 @@ class ProcessGroupWithDispatchedCollectivesTests(MultiProcessTestCase):
backend=backend,
rank=self.rank,
world_size=self.world_size,
- store=store
+ store=store,
)
pg = c10d._get_default_group()
self.assertEqual(pg.rank(), self.rank)
@@ -1877,6 +1924,7 @@ class ProcessGroupWithDispatchedCollectivesTests(MultiProcessTestCase):
output_tensor = torch.zeros(2, 2, device=torch.device(device))
dist.all_to_all_single(output_tensor, input_tensor)
+
class CompilerTest(MultiProcessTestCase):
def setUp(self):
super().setUp()
@@ -1928,14 +1976,18 @@ class CompilerTest(MultiProcessTestCase):
commed = False
while prev is not None and not commed:
curr = prev
- waited |= all([
- curr.op == "call_function",
- curr.target == _wait_comm,
- ])
- commed |= all([
- curr.op == "call_function",
- CommTensor._is_supported(curr.target.__name__),
- ])
+ waited |= all(
+ [
+ curr.op == "call_function",
+ curr.target == _wait_comm,
+ ]
+ )
+ commed |= all(
+ [
+ curr.op == "call_function",
+ CommTensor._is_supported(curr.target.__name__),
+ ]
+ )
prev = curr.args[0]
@@ -1978,7 +2030,9 @@ class CompilerTest(MultiProcessTestCase):
def comm_fn(tensor, group=None):
out_tensors = [torch.zeros_like(tensor) for _ in range(group.size())]
output_tensor = torch.cat(out_tensors, dim=0)
- work = dist.all_gather_into_tensor(output_tensor, tensor, group=group, async_op=True)
+ work = dist.all_gather_into_tensor(
+ output_tensor, tensor, group=group, async_op=True
+ )
work.wait()
return work, output_tensor
@@ -1989,7 +2043,9 @@ class CompilerTest(MultiProcessTestCase):
def comm_fn(tensor, group=None):
in_tensors = [tensor.clone() + i for i in range(group.size())]
out_tensor = torch.zeros_like(tensor)
- work = dist.reduce_scatter(out_tensor, in_tensors, group=group, async_op=True)
+ work = dist.reduce_scatter(
+ out_tensor, in_tensors, group=group, async_op=True
+ )
return work, out_tensor
self._test_work_wait(tensor, comm_fn=comm_fn)
@@ -1997,7 +2053,9 @@ class CompilerTest(MultiProcessTestCase):
def _test_reduce_scatter_tensor_work_wait(self, tensor):
def comm_fn(tensor, group=None):
out_tensor = torch.zeros_like(tensor).chunk(group.size(), dim=0)[self.rank]
- work = dist.reduce_scatter_tensor(out_tensor, tensor, group=group, async_op=True)
+ work = dist.reduce_scatter_tensor(
+ out_tensor, tensor, group=group, async_op=True
+ )
return work, out_tensor
self._test_work_wait(tensor, comm_fn=comm_fn)
@@ -2011,9 +2069,13 @@ class CompilerTest(MultiProcessTestCase):
def _test_scatter_work_wait(self, tensor):
def comm_fn(tensor, group=None):
- in_tensors = [tensor + i for i in range(group.size())] if self.rank == 0 else None
+ in_tensors = (
+ [tensor + i for i in range(group.size())] if self.rank == 0 else None
+ )
out_tensor = torch.zeros_like(tensor)
- work = dist.scatter(out_tensor, in_tensors, src=0, group=group, async_op=True)
+ work = dist.scatter(
+ out_tensor, in_tensors, src=0, group=group, async_op=True
+ )
return work, out_tensor
self._test_work_wait(tensor, comm_fn=comm_fn)
@@ -2045,22 +2107,35 @@ class CompilerTest(MultiProcessTestCase):
class ReduceOpTest(TestCase):
-
# Ref: https://github.com/pytorch/pytorch/issues/87191
def test_op_isinstance_of_reduceop(self):
for reduce_op in (
- c10d.ReduceOp.SUM, c10d.ReduceOp.AVG, c10d.ReduceOp.PRODUCT, c10d.ReduceOp.MIN, c10d.ReduceOp.MAX,
- c10d.ReduceOp.BAND, c10d.ReduceOp.BOR, c10d.ReduceOp.BXOR,
+ c10d.ReduceOp.SUM,
+ c10d.ReduceOp.AVG,
+ c10d.ReduceOp.PRODUCT,
+ c10d.ReduceOp.MIN,
+ c10d.ReduceOp.MAX,
+ c10d.ReduceOp.BAND,
+ c10d.ReduceOp.BOR,
+ c10d.ReduceOp.BXOR,
):
self.assertTrue(isinstance(reduce_op, c10d.ReduceOp))
for scale in (torch.tensor(1.0), 2.0):
- self.assertTrue(isinstance(dist._make_nccl_premul_sum(scale), c10d.ReduceOp))
+ self.assertTrue(
+ isinstance(dist._make_nccl_premul_sum(scale), c10d.ReduceOp)
+ )
# Ref: https://github.com/pytorch/pytorch/pull/87303#discussion_r1002879700
def test_reduceop_copyable(self):
for reduce_op in (
- c10d.ReduceOp.SUM, c10d.ReduceOp.AVG, c10d.ReduceOp.PRODUCT, c10d.ReduceOp.MIN, c10d.ReduceOp.MAX,
- c10d.ReduceOp.BAND, c10d.ReduceOp.BOR, c10d.ReduceOp.BXOR,
+ c10d.ReduceOp.SUM,
+ c10d.ReduceOp.AVG,
+ c10d.ReduceOp.PRODUCT,
+ c10d.ReduceOp.MIN,
+ c10d.ReduceOp.MAX,
+ c10d.ReduceOp.BAND,
+ c10d.ReduceOp.BOR,
+ c10d.ReduceOp.BXOR,
):
self.assertEqual(copy.copy(reduce_op), reduce_op)
self.assertEqual(copy.deepcopy(reduce_op), reduce_op)
@@ -2074,8 +2149,14 @@ class ReduceOpTest(TestCase):
def test_reduceop_pickle(self):
for reduce_op in (
- c10d.ReduceOp.SUM, c10d.ReduceOp.AVG, c10d.ReduceOp.PRODUCT, c10d.ReduceOp.MIN, c10d.ReduceOp.MAX,
- c10d.ReduceOp.BAND, c10d.ReduceOp.BOR, c10d.ReduceOp.BXOR,
+ c10d.ReduceOp.SUM,
+ c10d.ReduceOp.AVG,
+ c10d.ReduceOp.PRODUCT,
+ c10d.ReduceOp.MIN,
+ c10d.ReduceOp.MAX,
+ c10d.ReduceOp.BAND,
+ c10d.ReduceOp.BOR,
+ c10d.ReduceOp.BXOR,
):
pickle.loads(pickle.dumps(reduce_op))
orig = c10d.ReduceOp(reduce_op)
@@ -2088,8 +2169,14 @@ class ReduceOpTest(TestCase):
def test_reduceop_equal(self):
not_reduceop = "abc"
for reduce_op in (
- c10d.ReduceOp.SUM, c10d.ReduceOp.AVG, c10d.ReduceOp.PRODUCT, c10d.ReduceOp.MIN, c10d.ReduceOp.MAX,
- c10d.ReduceOp.BAND, c10d.ReduceOp.BOR, c10d.ReduceOp.BXOR,
+ c10d.ReduceOp.SUM,
+ c10d.ReduceOp.AVG,
+ c10d.ReduceOp.PRODUCT,
+ c10d.ReduceOp.MIN,
+ c10d.ReduceOp.MAX,
+ c10d.ReduceOp.BAND,
+ c10d.ReduceOp.BOR,
+ c10d.ReduceOp.BXOR,
):
reduce_op_obj = c10d.ReduceOp(reduce_op)
# this calls `ReduceOp.__eq__(self, other)`
@@ -2106,6 +2193,7 @@ class ReduceOpTest(TestCase):
self.assertFalse(None in (reduce_op, reduce_op_obj))
self.assertFalse(not_reduceop in (reduce_op, reduce_op_obj))
+
class LocalRankTest(MultiProcessTestCase):
@property
def world_size(self):
@@ -2130,6 +2218,7 @@ class LocalRankTest(MultiProcessTestCase):
os.environ["LOCAL_RANK"] = str(self.rank)
self.assertEqual(dist.get_node_local_rank(), self.rank)
+
if __name__ == "__main__":
assert (
not torch.cuda._initialized
diff --git a/test/distributed/test_c10d_gloo.py b/test/distributed/test_c10d_gloo.py
index b82e7f641a..34f8849ddb 100644
--- a/test/distributed/test_c10d_gloo.py
+++ b/test/distributed/test_c10d_gloo.py
@@ -339,8 +339,12 @@ class ProcessGroupGlooTest(MultiProcessTestCase):
]
output = broadcast(xs, i, j)
- self.assertEqual(torch.tensor([i * num + j], dtype=torch.float32), output[0])
- self.assertEqual(torch.tensor([i * num + j], dtype=torch.float32), output[1])
+ self.assertEqual(
+ torch.tensor([i * num + j], dtype=torch.float32), output[0]
+ )
+ self.assertEqual(
+ torch.tensor([i * num + j], dtype=torch.float32), output[1]
+ )
# Test overloaded convenience function
x = torch.tensor([self.rank + 1.0])
@@ -419,7 +423,7 @@ class ProcessGroupGlooTest(MultiProcessTestCase):
# Single input tests
tests = simple_reduce_tests(self.rank, self.world_size)
- for (op, input, expected) in tests:
+ for op, input, expected in tests:
opts = c10d.AllreduceOptions()
opts.reduceOp = op
tensor = fn(input)
@@ -430,7 +434,7 @@ class ProcessGroupGlooTest(MultiProcessTestCase):
# Multi input tests
tests = simple_multi_input_reduce_tests(self.rank, self.world_size)
- for (op, inputs, output) in tests:
+ for op, inputs, output in tests:
opts = c10d.AllreduceOptions()
opts.reduceOp = op
tensors = [fn(input) for input in inputs]
@@ -506,7 +510,9 @@ class ProcessGroupGlooTest(MultiProcessTestCase):
opts = c10d.AllreduceCoalescedOptions()
pg.allreduce_coalesced([], opts)
- with self.assertRaisesRegex(RuntimeError, "tensors must all have the same type"):
+ with self.assertRaisesRegex(
+ RuntimeError, "tensors must all have the same type"
+ ):
opts = c10d.AllreduceCoalescedOptions()
pg.allreduce_coalesced([t1, t2], opts)
@@ -621,7 +627,9 @@ class ProcessGroupGlooTest(MultiProcessTestCase):
# Sparse allreduce only works with c10d.ReduceOp.SUM.
for op in [c10d.ReduceOp.PRODUCT, c10d.ReduceOp.MIN, c10d.ReduceOp.MAX]:
- with self.assertRaisesRegex(RuntimeError, "unsupported reduction operation"):
+ with self.assertRaisesRegex(
+ RuntimeError, "unsupported reduction operation"
+ ):
opts = c10d.AllreduceOptions()
opts.reduceOp = op
pg.allreduce([t3], opts)
@@ -636,7 +644,7 @@ class ProcessGroupGlooTest(MultiProcessTestCase):
tests = simple_sparse_reduce_tests(
self.rank, self.world_size, num_inputs=num_inputs_per_rank
)
- for (inputs, outputs) in tests:
+ for inputs, outputs in tests:
tensors = [fn(input) for input in inputs]
fut = pg.allreduce(tensors).get_future()
fut.wait()
@@ -657,11 +665,11 @@ class ProcessGroupGlooTest(MultiProcessTestCase):
@requires_gloo()
def test_sparse_allreduce_cuda_dispatched(self):
store = c10d.FileStore(self.file_name, self.world_size)
- dist.init_process_group(backend="gloo", store=store, rank=self.rank, world_size=self.world_size)
- tests = simple_sparse_reduce_tests(
- self.rank, self.world_size, num_inputs=1
+ dist.init_process_group(
+ backend="gloo", store=store, rank=self.rank, world_size=self.world_size
)
- for (inputs, outputs) in tests:
+ tests = simple_sparse_reduce_tests(self.rank, self.world_size, num_inputs=1)
+ for inputs, outputs in tests:
tensors = inputs[-1].clone().cuda()
work = dist.all_reduce(tensors, async_op=True)
work.wait()
@@ -707,8 +715,10 @@ class ProcessGroupGlooTest(MultiProcessTestCase):
work = dist.reduce_scatter_tensor(output, input, async_op=True)
work.wait()
- expect = input.view(self.world_size, *out_shape) \
- .chunk(self.world_size)[self.rank] * self.world_size
+ expect = (
+ input.view(self.world_size, *out_shape).chunk(self.world_size)[self.rank]
+ * self.world_size
+ )
self.assertTrue(torch.allclose(output, expect))
@requires_gloo()
@@ -730,8 +740,12 @@ class ProcessGroupGlooTest(MultiProcessTestCase):
work.wait()
for output, input in zip(outputs, inputs):
- expect = input.view(self.world_size, *output.shape) \
- .chunk(self.world_size)[self.rank] * self.world_size
+ expect = (
+ input.view(self.world_size, *output.shape).chunk(self.world_size)[
+ self.rank
+ ]
+ * self.world_size
+ )
self.assertTrue(torch.allclose(output, expect))
@requires_gloo()
@@ -769,12 +783,16 @@ class ProcessGroupGlooTest(MultiProcessTestCase):
opts.rootRank = 0
pg.scatter([t1, t1], [], opts)
- with self.assertRaisesRegex(RuntimeError, "requires a single-element input list"):
+ with self.assertRaisesRegex(
+ RuntimeError, "requires a single-element input list"
+ ):
opts = c10d.ScatterOptions()
opts.rootRank = self.rank
pg.scatter([t1], [], opts)
- with self.assertRaisesRegex(RuntimeError, "requires a single-element input list"):
+ with self.assertRaisesRegex(
+ RuntimeError, "requires a single-element input list"
+ ):
opts = c10d.ScatterOptions()
opts.rootRank = self.rank
pg.scatter([t1], [[t1] * self.world_size, [t1] * self.world_size], opts)
@@ -1094,7 +1112,9 @@ class ProcessGroupGlooTest(MultiProcessTestCase):
t2 = torch.zeros([1], dtype=torch.float64)
t3 = torch.zeros([2], dtype=torch.float32)
- with self.assertRaisesRegex(RuntimeError, "requires non-empty input tensor list"):
+ with self.assertRaisesRegex(
+ RuntimeError, "requires non-empty input tensor list"
+ ):
pg.allgather([], [])
with self.assertRaisesRegex(
@@ -1258,11 +1278,19 @@ class ProcessGroupGlooTest(MultiProcessTestCase):
)
xxs = [2 * [torch.tensor([i + self.rank])] for i in range(2)]
- yys = [[[torch.zeros_like(x) for x in xx] for _ in range(self.world_size)] for xx in xxs]
- futs = [c10d.all_gather_coalesced(yy, xx, async_op=True) for xx, yy in zip(xxs, yys)]
+ yys = [
+ [[torch.zeros_like(x) for x in xx] for _ in range(self.world_size)]
+ for xx in xxs
+ ]
+ futs = [
+ c10d.all_gather_coalesced(yy, xx, async_op=True) for xx, yy in zip(xxs, yys)
+ ]
# expected outputs
- zzs = [[2 * [torch.tensor([i + r])] for r in range(self.world_size)] for i in range(2)]
+ zzs = [
+ [2 * [torch.tensor([i + r])] for r in range(self.world_size)]
+ for i in range(2)
+ ]
torch.futures.wait_all(futs)
for yy, zz in zip(yys, zzs):
@@ -1324,7 +1352,7 @@ class ProcessGroupGlooTest(MultiProcessTestCase):
pg = self._create_process_group_gloo(
store, self.rank, self.world_size, self.opts()
)
- for (op, input, output) in simple_reduce_tests(self.rank, self.world_size):
+ for op, input, output in simple_reduce_tests(self.rank, self.world_size):
for root in range(self.world_size):
opts = c10d.ReduceOptions()
opts.reduceOp = op
@@ -1456,12 +1484,11 @@ class ProcessGroupGlooTest(MultiProcessTestCase):
def test_round_robin(self):
num_process_groups = 2
store = c10d.FileStore(self.file_name, self.world_size)
- c10d.init_process_group(backend="gloo", store=store, rank=self.rank, world_size=self.world_size)
+ c10d.init_process_group(
+ backend="gloo", store=store, rank=self.rank, world_size=self.world_size
+ )
pg = c10d._round_robin_process_groups(
- [
- c10d.new_group(pg_options=self.opts())
- for i in range(num_process_groups)
- ]
+ [c10d.new_group(pg_options=self.opts()) for i in range(num_process_groups)]
)
# Run a few collectives so that we have called each process group
@@ -1474,14 +1501,13 @@ class ProcessGroupGlooTest(MultiProcessTestCase):
@requires_gloo()
def test_round_robin_create_destroy(self):
store = c10d.FileStore(self.file_name, self.world_size)
- c10d.init_process_group(backend="gloo", store=store, rank=self.rank, world_size=self.world_size)
+ c10d.init_process_group(
+ backend="gloo", store=store, rank=self.rank, world_size=self.world_size
+ )
def create(num, prefix):
return c10d._round_robin_process_groups(
- [
- c10d.new_group(pg_options=self.opts())
- for i in range(num)
- ]
+ [c10d.new_group(pg_options=self.opts()) for i in range(num)]
)
# Run create/use/destroy twice
@@ -1504,14 +1530,18 @@ class DistributedDataParallelTest(
def _get_process_group(self):
store = self._get_store()
- c10d.init_process_group(backend="gloo", store=store, rank=self.rank, world_size=self.world_size)
+ c10d.init_process_group(
+ backend="gloo", store=store, rank=self.rank, world_size=self.world_size
+ )
return c10d.distributed_c10d._get_default_group()
def _test_gloo_backend(
self, devices, device_ids, multi_device=False, gradient_as_bucket_view=False
):
store = c10d.FileStore(self.file_name, self.world_size)
- c10d.init_process_group(backend="gloo", store=store, rank=self.rank, world_size=self.world_size)
+ c10d.init_process_group(
+ backend="gloo", store=store, rank=self.rank, world_size=self.world_size
+ )
process_group = c10d.distributed_c10d._get_default_group()
device = devices[-1]
backend = process_group._get_backend(device)
@@ -1792,6 +1822,7 @@ class DistributedDataParallelTest(
def forward(self, x):
x = self.relu(self.fc1(x))
return F.softmax(x, dim=1)
+
pg = dist.init_process_group(
"gloo",
init_method=f"file://{self.file_name}",
@@ -1802,14 +1833,13 @@ class DistributedDataParallelTest(
local_shard_metadata = ShardMetadata(
shard_offsets=[(self.rank % 2) * 5, 0],
shard_sizes=[5, 10],
- placement=f"rank:{self.rank}/cuda:{self.rank}"
+ placement=f"rank:{self.rank}/cuda:{self.rank}",
)
local_shards = [Shard(torch.randn(5, 10, device=device), local_shard_metadata)]
st = init_from_local_shards(local_shards, [10, 10])
m = MyModule(st)
DistributedDataParallel._set_params_and_buffers_to_ignore_for_model(
- module=m,
- params_and_buffers_to_ignore={'st'}
+ module=m, params_and_buffers_to_ignore={"st"}
)
# test to make DDP constructor will not fail when module includes a ShardedTensor when ignored
DistributedDataParallel(
@@ -1839,7 +1869,9 @@ class DistributedDataParallelTest(
# Check that the gradients are sparse and identical
vanilla_parameter = next(vanilla_model.parameters())
ddp_parameter = next(ddp_model.parameters())
- self.assertEqual(vanilla_parameter.grad.coalesce(), ddp_parameter.grad.coalesce())
+ self.assertEqual(
+ vanilla_parameter.grad.coalesce(), ddp_parameter.grad.coalesce()
+ )
@requires_gloo()
@skip_if_lt_x_gpu(2)
@@ -2064,7 +2096,9 @@ class DistributedDataParallelTest(
ModuleForDdpCommHook(), process_group=process_group
)
- expected_err = "Communication hook: return annotation should be torch.futures.Future"
+ expected_err = (
+ "Communication hook: return annotation should be torch.futures.Future"
+ )
with self.assertRaisesRegex(
ValueError,
expected_err,
@@ -2172,7 +2206,9 @@ class ReducerTest(TestCase):
self.file = tempfile.NamedTemporaryFile(delete=False)
world_size = 1
self.store = c10d.FileStore(self.file.name, world_size)
- c10d.init_process_group(backend="gloo", store=self.store, rank=0, world_size=world_size)
+ c10d.init_process_group(
+ backend="gloo", store=self.store, rank=0, world_size=world_size
+ )
self.process_group = c10d.distributed_c10d._get_default_group()
def tearDown(self):
@@ -2188,7 +2224,9 @@ class ReducerTest(TestCase):
model = ReducerModule()
parameters = list(model.parameters())
buckets = [list(range(len(parameters)))]
- dist.Reducer(parameters, buckets, [dist._DEFAULT_FIRST_BUCKET_BYTES], self.process_group)
+ dist.Reducer(
+ parameters, buckets, [dist._DEFAULT_FIRST_BUCKET_BYTES], self.process_group
+ )
def _create_mixed_precision_model(self):
model = ReducerModule()
@@ -2209,7 +2247,7 @@ class ReducerTest(TestCase):
parameters,
buckets,
[dist._DEFAULT_FIRST_BUCKET_BYTES],
- self.process_group
+ self.process_group,
)
@requires_gloo()
@@ -2224,7 +2262,7 @@ class ReducerTest(TestCase):
parameters,
buckets,
[dist._DEFAULT_FIRST_BUCKET_BYTES for _ in buckets],
- self.process_group
+ self.process_group,
)
def _create_reducer_for_models(self, models, find_unused_parameters=False):
@@ -2307,7 +2345,6 @@ class CommTest(test_c10d_common.AbstractCommTest, MultiProcessTestCase):
def device(self):
return "cpu"
-
def setUp(self):
super().setUp()
self._spawn_processes()
@@ -2354,7 +2391,9 @@ class CommTest(test_c10d_common.AbstractCommTest, MultiProcessTestCase):
@skip_if_lt_x_gpu(2)
def test_broadcast_coalesced_gloo_cuda(self):
store = c10d.FileStore(self.file_name, self.world_size)
- c10d.init_process_group(backend="gloo", store=store, rank=self.rank, world_size=self.world_size)
+ c10d.init_process_group(
+ backend="gloo", store=store, rank=self.rank, world_size=self.world_size
+ )
process_group = c10d.distributed_c10d._get_default_group()
device = torch.device("cuda:%d" % self.rank)
backend = process_group._get_backend(device)
@@ -2366,7 +2405,9 @@ class CommTest(test_c10d_common.AbstractCommTest, MultiProcessTestCase):
@requires_gloo()
def test_broadcast_coalesced_gloo_cpu(self):
store = c10d.FileStore(self.file_name, self.world_size)
- c10d.init_process_group(backend="gloo", store=store, rank=self.rank, world_size=self.world_size)
+ c10d.init_process_group(
+ backend="gloo", store=store, rank=self.rank, world_size=self.world_size
+ )
process_group = c10d.distributed_c10d._get_default_group()
device = torch.device("cpu")
backend = process_group._get_backend(device)
@@ -2421,7 +2462,10 @@ class CommTest(test_c10d_common.AbstractCommTest, MultiProcessTestCase):
def test_bool_tensors(self):
self._test_bool_tensors(backend="gloo")
-class GlooProcessGroupWithDispatchedCollectivesTests(test_c10d_common.ProcessGroupWithDispatchedCollectivesTests):
+
+class GlooProcessGroupWithDispatchedCollectivesTests(
+ test_c10d_common.ProcessGroupWithDispatchedCollectivesTests
+):
@requires_gloo()
def test_collectives(self):
self._test_collectives(backend="gloo")
@@ -2459,8 +2503,8 @@ class GlooProcessGroupWithDispatchedCollectivesTests(test_c10d_common.ProcessGro
)
dist.monitored_barrier()
-class CompilerTest(test_c10d_common.CompilerTest):
+class CompilerTest(test_c10d_common.CompilerTest):
@property
def world_size(self):
return 2
@@ -2480,36 +2524,28 @@ class CompilerTest(test_c10d_common.CompilerTest):
@skip_if_lt_x_gpu(2)
def test_allreduce_work_wait_gpu(self):
- self._test_allreduce_work_wait(
- torch.ones(2, 2, device=self.rank) * self.rank
- )
+ self._test_allreduce_work_wait(torch.ones(2, 2, device=self.rank) * self.rank)
def test_allgather_work_wait_cpu(self):
self._test_allgather_work_wait(torch.ones(2, 2) * self.rank)
@skip_if_lt_x_gpu(2)
def test_allgather_work_wait_gpu(self):
- self._test_allgather_work_wait(
- torch.ones(2, 2, device=self.rank) * self.rank
- )
+ self._test_allgather_work_wait(torch.ones(2, 2, device=self.rank) * self.rank)
def test_broadcast_work_wait_cpu(self):
self._test_broadcast_work_wait(torch.ones(2, 2) * self.rank)
@skip_if_lt_x_gpu(2)
def test_broadcast_work_wait_gpu(self):
- self._test_broadcast_work_wait(
- torch.ones(2, 2, device=self.rank) * self.rank
- )
+ self._test_broadcast_work_wait(torch.ones(2, 2, device=self.rank) * self.rank)
def test_scatter_work_wait_cpu(self):
self._test_scatter_work_wait(torch.ones(2, 2) * self.rank)
@skip_if_lt_x_gpu(2)
def test_scatter_work_wait_gpu(self):
- self._test_scatter_work_wait(
- torch.ones(2, 2, device=self.rank) * self.rank
- )
+ self._test_scatter_work_wait(torch.ones(2, 2, device=self.rank) * self.rank)
def test_nested_comm_tensor_wrapping(self):
self._test_nested_comm_tensor_wrapping(torch.ones(2, 2) * self.rank)
@@ -2523,6 +2559,7 @@ class CompilerTest(test_c10d_common.CompilerTest):
torch.ones(2, 2, device=self.rank) * self.rank
)
+
class LargeCommTest(test_c10d_common.AbstractLargeCommTest, MultiProcessTestCase):
def setUp(self):
super().setUp()
@@ -2551,6 +2588,7 @@ class LargeCommTest(test_c10d_common.AbstractLargeCommTest, MultiProcessTestCase
def test_new_group_local_sync_duplicate_pg(self):
self._test_new_group_local_sync_duplicate_pg(backend="gloo")
+
if __name__ == "__main__":
assert (
not torch.cuda._initialized
diff --git a/test/distributed/test_c10d_nccl.py b/test/distributed/test_c10d_nccl.py
index bbaafd1cac..bbe4461e0c 100644
--- a/test/distributed/test_c10d_nccl.py
+++ b/test/distributed/test_c10d_nccl.py
@@ -1,23 +1,23 @@
# Owner(s): ["oncall: distributed"]
import copy
+import json
import math
import os
+import pickle
import random
import re
import signal
import sys
import tempfile
import threading
-import pickle
import time
-import json
import warnings
from contextlib import contextmanager
from datetime import datetime, timedelta
-from itertools import chain, product
-from unittest import SkipTest, mock
from enum import auto, Enum
+from itertools import chain, product
+from unittest import mock, SkipTest
import torch
import torch.distributed as c10d
@@ -26,42 +26,43 @@ if not c10d.is_available() or not c10d.is_nccl_available():
print("c10d NCCL not available, skipping tests", file=sys.stderr)
sys.exit(0)
+from typing import Dict, List
+
import test_c10d_common
import torch.distributed as dist
import torch.distributed.algorithms.ddp_comm_hooks.default_hooks as default
import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
-from typing import Dict, List
-from test_c10d_common import gpus_for_rank, DoubleGpuNet, ConvNet, ModuleForDdpCommHook
+from test_c10d_common import ConvNet, DoubleGpuNet, gpus_for_rank, ModuleForDdpCommHook
from torch import nn
from torch._C._distributed_c10d import OpType
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_distributed import (
- MultiProcessTestCase,
+ get_timeout,
init_multigpu_helper,
- requires_nccl,
+ MultiProcessTestCase,
requires_gloo,
+ requires_nccl,
requires_nccl_version,
skip_if_lt_x_gpu,
- get_timeout,
skip_if_rocm,
with_dist_debug_levels,
with_nccl_blocking_wait,
)
from torch.testing._internal.common_utils import (
- TestCase,
- run_tests,
+ instantiate_parametrized_tests,
+ parametrize,
retry_on_connect_failures,
+ run_tests,
+ skip_but_pass_in_sandcastle,
+ skip_but_pass_in_sandcastle_if,
skipIfRocm,
+ TEST_CUDA,
TEST_WITH_DEV_DBG_ASAN,
TEST_WITH_ROCM,
- parametrize,
- instantiate_parametrized_tests,
- skip_but_pass_in_sandcastle,
- skip_but_pass_in_sandcastle_if,
- TEST_CUDA
+ TestCase,
)
if TEST_WITH_DEV_DBG_ASAN:
@@ -71,15 +72,12 @@ if TEST_WITH_DEV_DBG_ASAN:
sys.exit(0)
# bfloat16 is only supported by CUDA 11+
-BFLOAT16_AVAILABLE = (
- torch.cuda.is_available()
- and
- (
- (torch.version.cuda is not None and int(torch.version.cuda.split('.')[0]) >= 11)
- or torch.version.hip is not None
- )
+BFLOAT16_AVAILABLE = torch.cuda.is_available() and (
+ (torch.version.cuda is not None and int(torch.version.cuda.split(".")[0]) >= 11)
+ or torch.version.hip is not None
)
+
class RendezvousEnvTest(TestCase):
@retry_on_connect_failures
@requires_nccl()
@@ -218,7 +216,8 @@ class ProcessGroupNCCLTest(MultiProcessTestCase):
rank=self.rank,
store=store,
pg_options=opts,
- device_id=device_id)
+ device_id=device_id,
+ )
pg = c10d.distributed_c10d._get_default_group()
return pg
@@ -268,13 +267,23 @@ class ProcessGroupNCCLTest(MultiProcessTestCase):
pg.reduce(xs).wait()
self.assertEqual(0, xs[0].numel())
- ys = [[torch.FloatTensor([]).cuda(local_device_idx) for _ in range(self.world_size)]]
+ ys = [
+ [
+ torch.FloatTensor([]).cuda(local_device_idx)
+ for _ in range(self.world_size)
+ ]
+ ]
pg.allgather(ys, xs).wait()
for y in ys[0]:
self.assertEqual(0, y.numel())
ys = [torch.FloatTensor([]).cuda(local_device_idx)]
- xs = [[torch.FloatTensor([]).cuda(local_device_idx) for _ in range(self.world_size)]]
+ xs = [
+ [
+ torch.FloatTensor([]).cuda(local_device_idx)
+ for _ in range(self.world_size)
+ ]
+ ]
pg.reduce_scatter(ys, xs).wait()
self.assertEqual(0, ys[0].numel())
@@ -300,7 +309,10 @@ class ProcessGroupNCCLTest(MultiProcessTestCase):
self.assertEqual(torch.tensor([i]), output[0])
expected_tensor = torch.empty([i + 1, i + 1]).fill_(i + 1)
- xs = [torch.empty([i + 1, i + 1]).fill_(-1).cuda(device=device_idx) for device_idx in self.rank_to_GPU[self.rank]]
+ xs = [
+ torch.empty([i + 1, i + 1]).fill_(-1).cuda(device=device_idx)
+ for device_idx in self.rank_to_GPU[self.rank]
+ ]
# test with multiple input tensors (multiple gpu in one rank)
for j in range(len(xs)):
@@ -320,7 +332,9 @@ class ProcessGroupNCCLTest(MultiProcessTestCase):
indices = torch.tensor([[0, 1]])
values = torch.tensor([[1, 2, 0], [4, 0, 6]])
- sparse_tensor = torch.sparse_coo_tensor(indices, values, size=(2, 3)).to(self.rank)
+ sparse_tensor = torch.sparse_coo_tensor(indices, values, size=(2, 3)).to(
+ self.rank
+ )
# sparse allreduce call is wrapped in a try catch since the c10d API is only available in the nccl experimental branch
try:
@@ -365,26 +379,37 @@ class ProcessGroupNCCLTest(MultiProcessTestCase):
# Avg (only available for NCCL 2.10+)
if torch.cuda.nccl.version() >= (2, 10, 0):
- tensors = [torch.tensor([self.rank + 1.]).cuda(local_device_id)]
+ tensors = [torch.tensor([self.rank + 1.0]).cuda(local_device_id)]
allreduce(tensors, c10d.ReduceOp.AVG)
ndev = self.world_size
self.assertEqual(
- torch.tensor([ndev * (ndev + 1.) / (2. * ndev)]),
+ torch.tensor([ndev * (ndev + 1.0) / (2.0 * ndev)]),
tensors[0],
)
# Premul Sum
if torch.cuda.nccl.version() >= (2, 11, 1):
for dtype in torch.half, torch.float, torch.double:
- for factor in (3.0, torch.tensor([5.0], device=local_device_id, dtype=dtype)):
- tensors = [torch.tensor([self.rank + 1]).cuda(local_device_id).to(dtype=dtype)]
+ for factor in (
+ 3.0,
+ torch.tensor([5.0], device=local_device_id, dtype=dtype),
+ ):
+ tensors = [
+ torch.tensor([self.rank + 1])
+ .cuda(local_device_id)
+ .to(dtype=dtype)
+ ]
allreduce(tensors, c10d._make_nccl_premul_sum(factor))
self.assertEqual(
- factor * torch.tensor([self.world_size * (self.world_size + 1) / 2],
- dtype=dtype, device=local_device_id),
+ factor
+ * torch.tensor(
+ [self.world_size * (self.world_size + 1) / 2],
+ dtype=dtype,
+ device=local_device_id,
+ ),
tensors[0],
)
@@ -392,9 +417,7 @@ class ProcessGroupNCCLTest(MultiProcessTestCase):
tensors = [torch.tensor([self.rank + 1]).cuda(local_device_id)]
allreduce(tensors, c10d.ReduceOp.PRODUCT)
- self.assertEqual(
- torch.tensor([math.factorial(self.world_size)]), tensors[0]
- )
+ self.assertEqual(torch.tensor([math.factorial(self.world_size)]), tensors[0])
# Min
tensors = [torch.tensor([self.rank + 1]).cuda(local_device_id)]
@@ -408,14 +431,13 @@ class ProcessGroupNCCLTest(MultiProcessTestCase):
allreduce(tensors, c10d.ReduceOp.MAX)
self.assertEqual(torch.tensor([self.world_size]), tensors[0])
- for op, err in zip((c10d.ReduceOp.BAND, c10d.ReduceOp.BOR, c10d.ReduceOp.BXOR),
- ("ReduceOp.BAND", "ReduceOp.BOR", "ReduceOp.BXOR")):
- with self.assertRaisesRegex(
- ValueError, "Cannot use " + err + " with NCCL"
- ):
+ for op, err in zip(
+ (c10d.ReduceOp.BAND, c10d.ReduceOp.BOR, c10d.ReduceOp.BXOR),
+ ("ReduceOp.BAND", "ReduceOp.BOR", "ReduceOp.BXOR"),
+ ):
+ with self.assertRaisesRegex(ValueError, "Cannot use " + err + " with NCCL"):
allreduce(tensors, op)
-
@requires_nccl()
@skip_but_pass_in_sandcastle_if(not TEST_MULTIGPU, "NCCL test requires 2+ GPUs")
def test_alltoall_ops_with_cudafree_race(self):
@@ -528,7 +550,7 @@ class ProcessGroupNCCLTest(MultiProcessTestCase):
("ReduceOp.BAND", "ReduceOp.BOR", "ReduceOp.BXOR"),
):
with self.assertRaisesRegex(
- ValueError, "Cannot use " + err + " with NCCL"
+ ValueError, "Cannot use " + err + " with NCCL"
):
reduce(tensors, self.rank, rt, op)
@@ -541,11 +563,14 @@ class ProcessGroupNCCLTest(MultiProcessTestCase):
factor_ref = factor
float_tensors = [
torch.tensor(
- [self.rank + 1.0], device=f"cuda:{local_device_id}")
+ [self.rank + 1.0], device=f"cuda:{local_device_id}"
+ )
]
float_tensors_ref = [
torch.tensor(
- [(self.rank + 1.0) * factor_ref], device=f"cuda:{local_device_id}")
+ [(self.rank + 1.0) * factor_ref],
+ device=f"cuda:{local_device_id}",
+ )
]
reduce(float_tensors_ref, rt, 0)
@@ -568,8 +593,12 @@ class ProcessGroupNCCLTest(MultiProcessTestCase):
output_tensors = []
expected_output = []
- output_per_gpu = ([torch.empty(2, 2).fill_(-1)] * len(local_device_ids) * self.world_size)
- expected_per_gpu = ([torch.empty(2, 2).fill_(2)] * len(local_device_ids) * self.world_size)
+ output_per_gpu = (
+ [torch.empty(2, 2).fill_(-1)] * len(local_device_ids) * self.world_size
+ )
+ expected_per_gpu = (
+ [torch.empty(2, 2).fill_(2)] * len(local_device_ids) * self.world_size
+ )
for gpu in local_device_ids:
output_tensors.append([t.cuda(device=gpu) for t in output_per_gpu])
@@ -594,7 +623,9 @@ class ProcessGroupNCCLTest(MultiProcessTestCase):
# allgather_base is GPU number agnostic.
# Each rank contribute one tensor regardless of GPU counts
tensor = torch.tensor([self.rank]).cuda(local_device_id)
- output_t = torch.empty((self.world_size), dtype=tensor.dtype).cuda(local_device_id)
+ output_t = torch.empty((self.world_size), dtype=tensor.dtype).cuda(
+ local_device_id
+ )
allgather_base(output_t, tensor)
@@ -744,7 +775,8 @@ class ProcessGroupNCCLTest(MultiProcessTestCase):
with self.assertRaisesRegex(
# throws error message from dispatcher
- RuntimeError, "There were no tensor arguments to this function"
+ RuntimeError,
+ "There were no tensor arguments to this function",
):
opts = c10d.GatherOptions()
opts.rootRank = 0
@@ -821,7 +853,6 @@ class ProcessGroupNCCLTest(MultiProcessTestCase):
for rank in range(self.world_size):
ls.append(torch.tensor([rank]).cuda(gpu_idx))
-
# test each rank to scatter
expected = [torch.tensor([self.rank])]
for i in range(stress_length):
@@ -866,7 +897,8 @@ class ProcessGroupNCCLTest(MultiProcessTestCase):
with self.assertRaisesRegex(
# throws error message from dispatcher
- RuntimeError, "There were no tensor arguments to this function"
+ RuntimeError,
+ "There were no tensor arguments to this function",
):
opts = c10d.ScatterOptions()
opts.rootRank = 0
@@ -945,8 +977,8 @@ class ProcessGroupNCCLTest(MultiProcessTestCase):
[
(1 + self.world_size) * self.world_size // 2
+ self.world_size * self.rank
- ])
-
+ ]
+ )
self.assertEqual(expected, output[i])
@@ -961,9 +993,7 @@ class ProcessGroupNCCLTest(MultiProcessTestCase):
reduce_scatter(output, tensor_lists, c10d.ReduceOp.MAX)
for i in range(num_gpus):
- expected = torch.tensor(
- [self.rank + self.world_size + i]
- )
+ expected = torch.tensor([self.rank + self.world_size + i])
self.assertEqual(expected, output[i])
# Product
@@ -1021,7 +1051,9 @@ class ProcessGroupNCCLTest(MultiProcessTestCase):
output = [t.float() for t in output]
tensor_lists = [[t.float() for t in tl] for tl in tensor_lists]
output_ref = [t.float() for t in output]
- tensor_lists_ref = [[t.float() * factor_ref for t in tl] for tl in tensor_lists]
+ tensor_lists_ref = [
+ [t.float() * factor_ref for t in tl] for tl in tensor_lists
+ ]
reduce_scatter(output, tensor_lists, c10d._make_nccl_premul_sum(factor))
reduce_scatter(output_ref, tensor_lists_ref, c10d.ReduceOp.SUM)
self.assertEqual(output_ref, output)
@@ -1040,7 +1072,9 @@ class ProcessGroupNCCLTest(MultiProcessTestCase):
# reduce_scatter_base is GPU number agnostic.
# Each rank contribute one tensor regardless of GPU counts
output_t = torch.empty([1]).cuda(local_device_id)
- tensor = torch.arange(self.world_size, dtype=output_t.dtype).cuda(local_device_id)
+ tensor = torch.arange(self.world_size, dtype=output_t.dtype).cuda(
+ local_device_id
+ )
reduce_scatter_base(output_t, tensor)
@@ -1065,7 +1099,9 @@ class ProcessGroupNCCLTest(MultiProcessTestCase):
for i in range(1, len(local_device_ids) + 1):
for j in range(i):
- tensors_list[i - 1].append(torch.tensor([j + 1]).cuda(local_device_ids[j]))
+ tensors_list[i - 1].append(
+ torch.tensor([j + 1]).cuda(local_device_ids[j])
+ )
works = []
for tensors in tensors_list:
@@ -1145,7 +1181,9 @@ class ProcessGroupNCCLTest(MultiProcessTestCase):
dist.all_reduce(t)
def abortpg():
- c10d.distributed_c10d._get_default_group()._get_backend(torch.device(device))._shutdown()
+ c10d.distributed_c10d._get_default_group()._get_backend(
+ torch.device(device)
+ )._shutdown()
# Initialize DDP to ensure "destroy_process_group" will not call
# ProcessGroupNCCL destructor since DDP holds a reference to process group.
@@ -1218,7 +1256,6 @@ class ProcessGroupNCCLTest(MultiProcessTestCase):
pg._get_backend(torch.device(device))._shutdown()
del pg
-
@requires_nccl()
@skip_but_pass_in_sandcastle_if(not TEST_MULTIGPU, "NCCL test requires 2+ GPUs")
def test_destruct_before_terminate_pg(self):
@@ -1236,7 +1273,6 @@ class ProcessGroupNCCLTest(MultiProcessTestCase):
# force destruction before terminating comms, destructor would terminate comms
del pg
-
@requires_nccl()
@skip_but_pass_in_sandcastle_if(not TEST_MULTIGPU, "NCCL test requires 2+ GPUs")
def test_abort_in_destroy_pg(self):
@@ -1260,7 +1296,9 @@ class ProcessGroupNCCLTest(MultiProcessTestCase):
pg.allreduce([t])
@requires_nccl()
- @skip_but_pass_in_sandcastle_if(torch.cuda.device_count() < 2, "NCCL test requires 2+ GPUs")
+ @skip_but_pass_in_sandcastle_if(
+ torch.cuda.device_count() < 2, "NCCL test requires 2+ GPUs"
+ )
def test_close_multi_pg_unordered(self):
store = c10d.FileStore(self.file_name, self.world_size)
pg = self._create_process_group_nccl(store, self.opts())
@@ -1290,7 +1328,9 @@ class ProcessGroupNCCLTest(MultiProcessTestCase):
dist.destroy_process_group()
@requires_nccl()
- @skip_but_pass_in_sandcastle_if(torch.cuda.device_count() < 2, "NCCL test requires 2+ GPUs")
+ @skip_but_pass_in_sandcastle_if(
+ torch.cuda.device_count() < 2, "NCCL test requires 2+ GPUs"
+ )
def test_abort_in_destroy_multi_pgs(self):
os.environ["TORCH_NCCL_ABORT_IN_DESTROY_PG"] = "1"
store = c10d.FileStore(self.file_name, self.world_size)
@@ -1311,9 +1351,10 @@ class ProcessGroupNCCLTest(MultiProcessTestCase):
# shutdown all NCCL PGs in one shot
dist.destroy_process_group()
-
@requires_nccl()
- @skip_but_pass_in_sandcastle_if(torch.cuda.device_count() < 2, "NCCL test requires 2+ GPUs")
+ @skip_but_pass_in_sandcastle_if(
+ torch.cuda.device_count() < 2, "NCCL test requires 2+ GPUs"
+ )
def test_abort_in_destroy_mixed_empty_pgs(self):
os.environ["TORCH_NCCL_ABORT_IN_DESTROY_PG"] = "1"
store = c10d.FileStore(self.file_name, self.world_size)
@@ -1335,7 +1376,9 @@ class ProcessGroupNCCLTest(MultiProcessTestCase):
dist.destroy_process_group()
@requires_nccl()
- @skip_but_pass_in_sandcastle_if(torch.cuda.device_count() < 2, "NCCL test requires 2+ GPUs")
+ @skip_but_pass_in_sandcastle_if(
+ torch.cuda.device_count() < 2, "NCCL test requires 2+ GPUs"
+ )
def test_file_store_check(self):
os.environ["TORCH_NCCL_ASYNC_ERROR_HANDLING"] = "0"
os.environ["TORCH_NCCL_ENABLE_MONITORING"] = "0"
@@ -1347,10 +1390,7 @@ class ProcessGroupNCCLTest(MultiProcessTestCase):
# e.g., self.file_name = tempfile.NamedTemporaryFile(delete=False).name
store = dist.FileStore(self.file_name, self.world_size)
dist.init_process_group(
- backend="nccl",
- rank=self.rank,
- world_size=self.world_size,
- store=store
+ backend="nccl", rank=self.rank, world_size=self.world_size, store=store
)
pg = dist.distributed_c10d._get_default_group()
self.assertEqual(pg.rank(), self.rank)
@@ -1402,7 +1442,9 @@ class ProcessGroupNCCLTest(MultiProcessTestCase):
# 'timeout' kwarg taking precedence
opts = dist.ProcessGroupNCCL.Options()
opts._timeout = timedelta(seconds=123)
- dist.init_process_group(**base_opts, pg_options=opts, timeout=timedelta(seconds=1240))
+ dist.init_process_group(
+ **base_opts, pg_options=opts, timeout=timedelta(seconds=1240)
+ )
self._check_nccl_timeout(timedelta(seconds=1240))
dist.destroy_process_group()
@@ -1412,13 +1454,19 @@ class ProcessGroupNCCLTest(MultiProcessTestCase):
def test_set_nccl_pg_timeout(self, backend):
store = c10d.FileStore(self.file_name, self.world_size)
opts = dict(
- backend=backend, store=store, rank=self.rank, world_size=self.world_size, timeout=timedelta(seconds=123)
+ backend=backend,
+ store=store,
+ rank=self.rank,
+ world_size=self.world_size,
+ timeout=timedelta(seconds=123),
)
dist.init_process_group(**opts)
pg = dist.distributed_c10d._get_default_group()
pg.allreduce(torch.rand(10).cuda(self.rank))
self._check_nccl_timeout(timedelta(seconds=123))
- pg._get_backend(torch.device(f"cuda:{self.rank}"))._set_default_timeout(timedelta(seconds=23))
+ pg._get_backend(torch.device(f"cuda:{self.rank}"))._set_default_timeout(
+ timedelta(seconds=23)
+ )
self._check_nccl_timeout(timedelta(seconds=23))
pg.allreduce(torch.rand(10).cuda(self.rank))
c10d.distributed_c10d._set_pg_timeout(timedelta(seconds=252), pg)
@@ -1440,7 +1488,9 @@ class ProcessGroupNCCLTest(MultiProcessTestCase):
# allgather_base is GPU number agnostic.
# Each rank contribute one tensor regardless of GPU counts
tensor = torch.tensor([self.rank]).cuda(local_device_id)
- output_t = torch.empty((self.world_size), dtype=tensor.dtype).cuda(local_device_id)
+ output_t = torch.empty((self.world_size), dtype=tensor.dtype).cuda(
+ local_device_id
+ )
allgather_base(output_t, tensor)
@@ -1476,7 +1526,7 @@ class ProcessGroupNCCLTest(MultiProcessTestCase):
# Test `ncclCommSplit` for smaller subgroups of the world when
# we've passed a specific device_id to init_process_group.
store = c10d.FileStore(self.file_name, self.world_size)
- device = torch.device(f'cuda:{self.rank}')
+ device = torch.device(f"cuda:{self.rank}")
pg = self._create_process_group_nccl(store, self.opts(), device_id=device)
backend = pg._get_backend(torch.device(device))
@@ -1516,7 +1566,6 @@ class ProcessGroupNCCLTest(MultiProcessTestCase):
new_pg.broadcast(broadcast_tensor, 0).wait()
self.assertEqual(backend.comm_split_count(), 1)
-
@requires_nccl_version((2, 18), "Need NCCL 2.18+ for ncclCommSplit")
@skip_but_pass_in_sandcastle_if(not TEST_MULTIGPU, "NCCL test requires 2+ GPUs")
def test_non_blocking_with_eager_init(self):
@@ -1525,7 +1574,7 @@ class ProcessGroupNCCLTest(MultiProcessTestCase):
os.environ["TORCH_NCCL_USE_COMM_NONBLOCKING"] = "1"
os.environ["TORCH_NCCL_NONBLOCKING_TIMEOUT"] = "100"
store = c10d.FileStore(self.file_name, self.world_size)
- device = torch.device(f'cuda:{self.rank}')
+ device = torch.device(f"cuda:{self.rank}")
# bound device to triger eager init mode
pg = self._create_process_group_nccl(store, self.opts(), device_id=device)
backend = pg._get_backend(torch.device(device))
@@ -1541,25 +1590,26 @@ class ProcessGroupNCCLTest(MultiProcessTestCase):
new_pg.broadcast(broadcast_tensor, 0).wait()
self.assertEqual(backend.comm_split_count(), 1)
-
@requires_nccl()
@skip_but_pass_in_sandcastle_if(not TEST_MULTIGPU, "NCCL test requires 2+ GPUs")
def test_get_uid(self):
store = c10d.FileStore(self.file_name, self.world_size)
- device = torch.device(f'cuda:{self.rank}')
+ device = torch.device(f"cuda:{self.rank}")
pg = self._create_process_group_nccl(store, self.opts(), device_id=device)
from torch.distributed.distributed_c10d import _get_process_group_uid
+
self.assertEqual(_get_process_group_uid(pg), 0)
pg_2 = c10d.new_group([0, 1])
self.assertEqual(_get_process_group_uid(pg_2), 1)
-
@requires_nccl()
@skip_but_pass_in_sandcastle_if(not TEST_MULTIGPU, "NCCL test requires 2+ GPUs")
def test_set_process_group_desc(self):
store = c10d.FileStore(self.file_name, self.world_size)
- device = torch.device(f'cuda:{self.rank}')
- pg_default = self._create_process_group_nccl(store, self.opts(), device_id=device)
+ device = torch.device(f"cuda:{self.rank}")
+ pg_default = self._create_process_group_nccl(
+ store, self.opts(), device_id=device
+ )
self.assertEqual(pg_default.group_desc, "default_pg")
pg_1 = c10d.new_group([0, 1], group_desc="test_purpose")
self.assertEqual(pg_1.group_desc, "test_purpose")
@@ -1579,7 +1629,9 @@ class DistributedDataParallelTest(
def _get_process_group(self):
store = self._get_store()
- c10d.init_process_group("nccl", store=store, rank=self.rank, world_size=self.world_size)
+ c10d.init_process_group(
+ "nccl", store=store, rank=self.rank, world_size=self.world_size
+ )
return c10d.distributed_c10d._get_default_group()
def _test_nccl_backend(
@@ -1599,7 +1651,9 @@ class DistributedDataParallelTest(
os.environ["TORCH_NCCL_BLOCKING_WAIT"] = "1"
store = c10d.FileStore(self.file_name, self.world_size)
# provide sufficient timeout to initialize NCCL comm.
- pg = c10d.ProcessGroupNCCL(store, self.rank, self.world_size, timeout=timedelta(seconds=15))
+ pg = c10d.ProcessGroupNCCL(
+ store, self.rank, self.world_size, timeout=timedelta(seconds=15)
+ )
pg_gloo = c10d.ProcessGroupGloo(store, self.rank, self.world_size)
pg.barrier().wait(timedelta(seconds=5))
# Simulate stuckness in rank 0.
@@ -1746,7 +1800,7 @@ class DistributedDataParallelTest(
# Input 2**15, so that the gradients will overflow with a
# world_size of 2, unless we normalize the gradient by the
# world_size before the reduction
- input = torch.tensor([[2 ** 15]]).cuda(gpus[0]).half()
+ input = torch.tensor([[2**15]]).cuda(gpus[0]).half()
# Step model
ddp_model.train()
@@ -2250,7 +2304,9 @@ class DistributedDataParallelTest(
c10d.destroy_process_group(process_group)
store = c10d.FileStore(recovery_filename, self.world_size)
- c10d.init_process_group("nccl", store=store, rank=self.rank, world_size=self.world_size)
+ c10d.init_process_group(
+ "nccl", store=store, rank=self.rank, world_size=self.world_size
+ )
process_group = c10d.distributed_c10d._get_default_group()
ddp = DistributedDataParallel(
model,
@@ -2329,9 +2385,7 @@ class DistributedDataParallelTest(
layer_formats, layer_dtypes, bucketsizes
):
with first_bucket_size(bucketsize):
- model_msg = (
- f"rank = {self.rank} formats = {formats} dtypes = {dtypes} bucketsize = {bucketsize} "
- )
+ model_msg = f"rank = {self.rank} formats = {formats} dtypes = {dtypes} bucketsize = {bucketsize} "
try:
m = ConvNet(layer_devs, formats, dtypes)
m_ddp = DistributedDataParallel(
@@ -2614,7 +2668,9 @@ class DistributedDataParallelTest(
# Get GPU model with the hook registered.
# Test the hook with different algorithmic configs.
for use_error_feedback, warm_start, batch_tensors_with_same_shape in product(
- [True, False], [True, False], [True, False],
+ [True, False],
+ [True, False],
+ [True, False],
):
state = powerSGD.PowerSGDState(
process_group=process_group,
@@ -2852,10 +2908,14 @@ class DistributedDataParallelTest(
seq_tensor = seq_tensor[permutation_idx]
embedded_seq_tensor = embed(seq_tensor)
packed_input = torch.nn.utils.rnn.pack_padded_sequence(
- embedded_seq_tensor, seq_lengths, batch_first=True,
+ embedded_seq_tensor,
+ seq_lengths,
+ batch_first=True,
)
packed_input_ddp = torch.nn.utils.rnn.pack_padded_sequence(
- embedded_seq_tensor.detach().clone(), seq_lengths, batch_first=True,
+ embedded_seq_tensor.detach().clone(),
+ seq_lengths,
+ batch_first=True,
)
# Move the input to GPU explicitly for the local model
packed_output, (ht, ct) = lstm(packed_input.to(self.rank))
@@ -2874,7 +2934,9 @@ class DistributedDataParallelTest(
def test_channels_last_contig(self):
process_group = self._get_process_group()
device = torch.device(f"cuda:{self.rank}")
- tensor = torch.ones((2, 16, 768, 1152), dtype=torch.float32, device=device).to(memory_format=torch.channels_last)
+ tensor = torch.ones((2, 16, 768, 1152), dtype=torch.float32, device=device).to(
+ memory_format=torch.channels_last
+ )
process_group.broadcast([tensor]).wait()
@requires_nccl()
@@ -2885,10 +2947,16 @@ class DistributedDataParallelTest(
super().__init__()
self.hin = hin
self.win = win
- self.weight = nn.Parameter(torch.ones((n_features, n_features, hin, win // 2 + 1), dtype=torch.cfloat))
+ self.weight = nn.Parameter(
+ torch.ones(
+ (n_features, n_features, hin, win // 2 + 1), dtype=torch.cfloat
+ )
+ )
def forward(self, x):
- xc = torch.fft.rfft2(x, s=(self.hin, self.win), dim=(-2, -1), norm="ortho")
+ xc = torch.fft.rfft2(
+ x, s=(self.hin, self.win), dim=(-2, -1), norm="ortho"
+ )
xcw = torch.einsum("nchw,cohw->nohw", xc, self.weight)
x = torch.fft.irfft2(xcw, dim=(-2, -1), norm="ortho")
return x
@@ -2915,7 +2983,6 @@ class DistributedDataParallelTest(
class WorkHookTest(MultiProcessTestCase):
-
@property
def world_size(self):
return 2
@@ -2940,7 +3007,9 @@ class WorkHookTest(MultiProcessTestCase):
def _get_process_group(self):
store = self._get_store()
- c10d.init_process_group("nccl", store=store, rank=self.rank, world_size=self.world_size)
+ c10d.init_process_group(
+ "nccl", store=store, rank=self.rank, world_size=self.world_size
+ )
return c10d.distributed_c10d._get_default_group()
@requires_nccl()
@@ -3007,7 +3076,10 @@ class WorkHookTest(MultiProcessTestCase):
self.assertEqual(
tensor_list,
- [torch.ones([2, 3]).cuda(self.rank) * self.world_size for _ in range(self.world_size)],
+ [
+ torch.ones([2, 3]).cuda(self.rank) * self.world_size
+ for _ in range(self.world_size)
+ ],
)
@requires_nccl()
@@ -3046,7 +3118,11 @@ class WorkHookTest(MultiProcessTestCase):
# from rank0 to other ranks. However, this is DDP's internal implementation,
# which is subject to change in future versions.
self.assertTrue(num_hook_fired[OpType.BROADCAST] > 0)
- ctor_allreduce = num_hook_fired[OpType.ALLREDUCE] if OpType.ALLREDUCE in num_hook_fired else 0
+ ctor_allreduce = (
+ num_hook_fired[OpType.ALLREDUCE]
+ if OpType.ALLREDUCE in num_hook_fired
+ else 0
+ )
x = torch.zeros(2, 1000).cuda(self.rank)
ddp(x).sum().backward()
@@ -3128,6 +3204,7 @@ class WorkHookTest(MultiProcessTestCase):
self.assertEqual(num_hook_fired, work_count)
self.assertEqual(work, seq)
+
class NcclErrorHandlingTest(MultiProcessTestCase):
def setUp(self):
super().setUp()
@@ -3198,7 +3275,9 @@ class NcclErrorHandlingTest(MultiProcessTestCase):
self.assertTrue(t.is_alive())
if prev_nccl_async_error_handling is not None:
- os.environ["TORCH_NCCL_ASYNC_ERROR_HANDLING"] = prev_nccl_async_error_handling
+ os.environ[
+ "TORCH_NCCL_ASYNC_ERROR_HANDLING"
+ ] = prev_nccl_async_error_handling
def _test_nccl_errors_blocking(self, func):
store = c10d.FileStore(self.file_name, self.world_size)
@@ -3286,7 +3365,9 @@ class NcclErrorHandlingTest(MultiProcessTestCase):
# It seems the error message would be different depending on
# whether the test is run on CI machine and devGPU. Skipping
# the error message check to make both sides happy.
- process_group.barrier().wait(timeout=timedelta(seconds=self.op_timeout_sec))
+ process_group.barrier().wait(
+ timeout=timedelta(seconds=self.op_timeout_sec)
+ )
def _run_invalid_nccl_blocking_wait_env(self, val):
os.environ["TORCH_NCCL_BLOCKING_WAIT"] = val
@@ -3317,13 +3398,19 @@ class NcclErrorHandlingTest(MultiProcessTestCase):
# to coordinate btwn ranks.
pg_gloo = c10d.ProcessGroupGloo(store, self.rank, self.world_size)
failed_collective_timeout = timedelta(milliseconds=100)
- process_group.allreduce(torch.rand(10).cuda(self.rank)).wait(timeout=timedelta(seconds=5))
+ process_group.allreduce(torch.rand(10).cuda(self.rank)).wait(
+ timeout=timedelta(seconds=5)
+ )
if self.rank == 0:
# This should timeout in about 1 second.
# Watchdog may abort timed out work resulting in NCCL error instead of operation timed out.
- with self.assertRaisesRegex(dist.DistBackendError, self.blocking_wait_error_msg):
- process_group.allreduce(torch.rand(10).cuda(self.rank)).wait(timeout=failed_collective_timeout)
+ with self.assertRaisesRegex(
+ dist.DistBackendError, self.blocking_wait_error_msg
+ ):
+ process_group.allreduce(torch.rand(10).cuda(self.rank)).wait(
+ timeout=failed_collective_timeout
+ )
# Now do a barrier to tell other rank to go ahead.
pg_gloo.barrier().wait()
else:
@@ -3331,8 +3418,9 @@ class NcclErrorHandlingTest(MultiProcessTestCase):
try:
pg_gloo.barrier().wait()
except Exception as e:
- raise ValueError(f"Rank {self.rank} barrier timed out waiting for rank 0 with error: {str(e)}") from e
-
+ raise ValueError(
+ f"Rank {self.rank} barrier timed out waiting for rank 0 with error: {str(e)}"
+ ) from e
class CommTest(test_c10d_common.AbstractCommTest, MultiProcessTestCase):
@@ -3340,7 +3428,6 @@ class CommTest(test_c10d_common.AbstractCommTest, MultiProcessTestCase):
def device(self):
return f"cuda:{self.rank}"
-
def setUp(self):
super().setUp()
# TORCH_NCCL_BLOCKING_WAIT overrides TORCH_NCCL_ASYNC_ERROR_HANDLING hence tests
@@ -3390,7 +3477,9 @@ class CommTest(test_c10d_common.AbstractCommTest, MultiProcessTestCase):
@skip_if_lt_x_gpu(2)
def test_broadcast_coalesced_nccl(self):
store = c10d.FileStore(self.file_name, self.world_size)
- c10d.init_process_group(backend="nccl", store=store, rank=self.rank, world_size=self.world_size)
+ c10d.init_process_group(
+ backend="nccl", store=store, rank=self.rank, world_size=self.world_size
+ )
process_group = c10d.distributed_c10d._get_default_group()
device = torch.device("cuda:%d" % self.rank)
ranks = [0, 1]
@@ -3401,29 +3490,51 @@ class CommTest(test_c10d_common.AbstractCommTest, MultiProcessTestCase):
@skip_if_lt_x_gpu(2)
def test_all_reduce_coalesced_nccl(self):
store = c10d.FileStore(self.file_name, self.world_size)
- c10d.init_process_group(backend="nccl", store=store, rank=self.rank, world_size=self.world_size)
+ c10d.init_process_group(
+ backend="nccl", store=store, rank=self.rank, world_size=self.world_size
+ )
process_group = c10d.distributed_c10d._get_default_group()
device = torch.device("cuda:%d" % self.rank)
- tensors = [torch.full((60 + i,), self.rank + 1 + i, device=device, dtype=torch.float) for i in range(5)]
+ tensors = [
+ torch.full((60 + i,), self.rank + 1 + i, device=device, dtype=torch.float)
+ for i in range(5)
+ ]
torch.distributed.all_reduce_coalesced(tensors, group=process_group)
for i, t in enumerate(tensors):
- self.assertEqual(t, torch.full_like(t, self.world_size * (i + (self.world_size + 1.) / 2.)))
+ self.assertEqual(
+ t,
+ torch.full_like(
+ t, self.world_size * (i + (self.world_size + 1.0) / 2.0)
+ ),
+ )
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_all_reduce_coalesced_manager_nccl(self):
store = c10d.FileStore(self.file_name, self.world_size)
- c10d.init_process_group(backend="nccl", store=store, rank=self.rank, world_size=self.world_size)
+ c10d.init_process_group(
+ backend="nccl", store=store, rank=self.rank, world_size=self.world_size
+ )
process_group = c10d.distributed_c10d._get_default_group()
device = torch.device("cuda:%d" % self.rank)
- tensors = [torch.full((60 + i,), self.rank + 1 + i, device=device, dtype=torch.float) for i in range(5)]
- with torch.distributed._coalescing_manager(group=process_group, device=device, async_ops=True) as cm:
+ tensors = [
+ torch.full((60 + i,), self.rank + 1 + i, device=device, dtype=torch.float)
+ for i in range(5)
+ ]
+ with torch.distributed._coalescing_manager(
+ group=process_group, device=device, async_ops=True
+ ) as cm:
for tensor in tensors:
torch.distributed.all_reduce(tensor)
self.assertEqual(len(cm.works), 1)
cm.wait()
for i, t in enumerate(tensors):
- self.assertEqual(t, torch.full_like(t, self.world_size * (i + (self.world_size + 1.) / 2.)))
+ self.assertEqual(
+ t,
+ torch.full_like(
+ t, self.world_size * (i + (self.world_size + 1.0) / 2.0)
+ ),
+ )
@requires_nccl()
@skip_if_lt_x_gpu(2)
@@ -3431,6 +3542,7 @@ class CommTest(test_c10d_common.AbstractCommTest, MultiProcessTestCase):
def test_intra_node_comm_all_reduce(self):
from torch._C._distributed_c10d import _get_intra_node_comm_usage_counter
from torch.testing._internal.common_cuda import SM80OrLater
+
for peer in range(self.world_size):
if peer == self.rank:
continue
@@ -3471,13 +3583,15 @@ class CommTest(test_c10d_common.AbstractCommTest, MultiProcessTestCase):
self.assertTrue(t.eq(expect).all())
self.assertEqual(_get_intra_node_comm_usage_counter(), 2)
- t = torch.full((10 * 1024 ** 2 // 2,), self.rank, dtype=torch.bfloat16).cuda()
+ t = torch.full((10 * 1024**2 // 2,), self.rank, dtype=torch.bfloat16).cuda()
c10d.all_reduce(t, c10d.ReduceOp.SUM)
self.assertTrue(t.eq(expect).all())
self.assertEqual(_get_intra_node_comm_usage_counter(), 3)
# Verify that IntraNodeComm is not used beyond 10MB
- t = torch.full((10 * 1024 ** 2 // 2 + 1,), self.rank, dtype=torch.bfloat16).cuda()
+ t = torch.full(
+ (10 * 1024**2 // 2 + 1,), self.rank, dtype=torch.bfloat16
+ ).cuda()
c10d.all_reduce(t, c10d.ReduceOp.SUM)
self.assertTrue(t.eq(expect).all())
self.assertEqual(_get_intra_node_comm_usage_counter(), 3)
@@ -3535,7 +3649,9 @@ class CommTest(test_c10d_common.AbstractCommTest, MultiProcessTestCase):
self._test_pass_nccl_options(pg_opts)
@requires_nccl()
- @requires_nccl_version((2, 17), "Need NCCL 2.17+ for configuring NCCL communicators")
+ @requires_nccl_version(
+ (2, 17), "Need NCCL 2.17+ for configuring NCCL communicators"
+ )
@skip_if_lt_x_gpu(2)
def test_pass_nccl_options_config(self):
pg_opts = c10d.ProcessGroupNCCL.Options()
@@ -3552,10 +3668,14 @@ class CommTest(test_c10d_common.AbstractCommTest, MultiProcessTestCase):
# Tests if comms were configured
nccl_debug_file_content = nccl_debug_file.read()
- max_ctas = re.search(rb'Max CTAs.*(\d+)|$', nccl_debug_file_content).group(1)
- min_ctas = re.search(rb'Min CTAs.*(\d+)|$', nccl_debug_file_content).group(1)
- cga_cluster_size = re.search(rb'CGA cluster.*(\d+)|$', nccl_debug_file_content).group(1)
- net_name = re.search(rb'Using network.([a-zA-z]+)|$', nccl_debug_file_content).group(1)
+ max_ctas = re.search(rb"Max CTAs.*(\d+)|$", nccl_debug_file_content).group(1)
+ min_ctas = re.search(rb"Min CTAs.*(\d+)|$", nccl_debug_file_content).group(1)
+ cga_cluster_size = re.search(
+ rb"CGA cluster.*(\d+)|$", nccl_debug_file_content
+ ).group(1)
+ net_name = re.search(
+ rb"Using network.([a-zA-z]+)|$", nccl_debug_file_content
+ ).group(1)
self.assertEqual(pg_opts.config.max_ctas, int(max_ctas))
self.assertEqual(pg_opts.config.min_ctas, int(min_ctas))
self.assertEqual(pg_opts.config.cga_cluster_size, int(cga_cluster_size))
@@ -3648,8 +3768,8 @@ class CommTest(test_c10d_common.AbstractCommTest, MultiProcessTestCase):
def test_tensor_dtype_complex(self):
self._test_tensor_dtype_complex(backend="nccl")
-class CompilerTest(test_c10d_common.CompilerTest):
+class CompilerTest(test_c10d_common.CompilerTest):
@property
def world_size(self):
return 2
@@ -3672,9 +3792,7 @@ class CompilerTest(test_c10d_common.CompilerTest):
@skip_if_lt_x_gpu(2)
def test_allgather_work_wait_gpu(self):
- self._test_allgather_work_wait(
- torch.ones(2, 2, device=self.rank) * self.rank
- )
+ self._test_allgather_work_wait(torch.ones(2, 2, device=self.rank) * self.rank)
@skip_if_lt_x_gpu(2)
def test_allgather_into_tensor_work_wait_gpu(self):
@@ -3696,21 +3814,15 @@ class CompilerTest(test_c10d_common.CompilerTest):
@skip_if_lt_x_gpu(2)
def test_broadcast_work_wait_gpu(self):
- self._test_broadcast_work_wait(
- torch.ones(2, 2, device=self.rank) * self.rank
- )
+ self._test_broadcast_work_wait(torch.ones(2, 2, device=self.rank) * self.rank)
@skip_if_lt_x_gpu(2)
def test_scatter_work_wait_gpu(self):
- self._test_scatter_work_wait(
- torch.ones(2, 2, device=self.rank) * self.rank
- )
+ self._test_scatter_work_wait(torch.ones(2, 2, device=self.rank) * self.rank)
@skip_if_lt_x_gpu(2)
def test_alltoall_work_wait_gpu(self):
- self._test_alltoall_work_wait(
- torch.ones(2, 2, device=self.rank) * self.rank
- )
+ self._test_alltoall_work_wait(torch.ones(2, 2, device=self.rank) * self.rank)
@skip_if_lt_x_gpu(2)
def test_nested_comm_tensor_wrapping(self):
@@ -3735,7 +3847,9 @@ class CompilerTest(test_c10d_common.CompilerTest):
store=store,
)
output_tensor = torch.zeros(2, dtype=torch.int64).to(self.rank)
- input_tensors = torch.arange(self.world_size * 2, dtype=torch.int64).to(self.rank)
+ input_tensors = torch.arange(self.world_size * 2, dtype=torch.int64).to(
+ self.rank
+ )
input_tensors = torch.reshape(input_tensors, (self.world_size, 2))
dist.reduce_scatter_tensor(output_tensor, input_tensors)
self.assertEqual(output_tensor, input_tensors[self.rank] * self.world_size)
@@ -3757,11 +3871,15 @@ class CompilerTest(test_c10d_common.CompilerTest):
dist.reduce_scatter_tensor(output_tensors[i], input_tensors[i])
self.assertEqual(output_tensors, input_tensors[self.rank] * self.world_size)
+
class SetDeviceMethod(Enum):
TORCH_CUDA_SET = auto() # torch.cuda.set_device
COLLECTIVE_ARGUMENT = auto() # broadcast_object_list(device=)
-class NcclProcessGroupWithDispatchedCollectivesTests(test_c10d_common.ProcessGroupWithDispatchedCollectivesTests):
+
+class NcclProcessGroupWithDispatchedCollectivesTests(
+ test_c10d_common.ProcessGroupWithDispatchedCollectivesTests
+):
@requires_nccl()
@skip_if_lt_x_gpu(1)
def test_collectives(self):
@@ -3793,6 +3911,7 @@ class NcclProcessGroupWithDispatchedCollectivesTests(test_c10d_common.ProcessGro
dist.all_gather_into_tensor(output_tensor, tensor)
self.assertEqual(output_tensor, tensor)
+
class LargeCommTest(test_c10d_common.AbstractLargeCommTest, MultiProcessTestCase):
def setUp(self):
super().setUp()
@@ -3833,7 +3952,9 @@ class LargeCommTest(test_c10d_common.AbstractLargeCommTest, MultiProcessTestCase
f"need world size of 4 to get 2 subgroup PGs, but got world size of {world_size}"
)
store = c10d.FileStore(self.file_name, world_size)
- c10d.init_process_group(backend="nccl", store=store, rank=self.rank, world_size=world_size)
+ c10d.init_process_group(
+ backend="nccl", store=store, rank=self.rank, world_size=world_size
+ )
# every rank creates the same sub groups
# including unused sub groups in the current rank
a_group = c10d.new_group([0, 1])
@@ -3853,12 +3974,24 @@ class LargeCommTest(test_c10d_common.AbstractLargeCommTest, MultiProcessTestCase
input = torch.ones((10,), device=device) * self.rank
if self.rank == 0 or self.rank == 2:
gather_list = [torch.empty_like(input) for _ in range(subgroup.size())]
- torch.distributed.gather(input, gather_list=gather_list, dst=self.rank, group=subgroup, async_op=False)
+ torch.distributed.gather(
+ input,
+ gather_list=gather_list,
+ dst=self.rank,
+ group=subgroup,
+ async_op=False,
+ )
for src in range(len(gather_list)):
expected = (torch.ones_like(input) * self.rank) + src
self.assertEqual(gather_list[src], expected)
else:
- torch.distributed.gather(input, gather_list=None, dst=self.rank - 1, group=subgroup, async_op=False)
+ torch.distributed.gather(
+ input,
+ gather_list=None,
+ dst=self.rank - 1,
+ group=subgroup,
+ async_op=False,
+ )
@requires_nccl()
@skip_if_lt_x_gpu(4)
@@ -3880,11 +4013,15 @@ class LargeCommTest(test_c10d_common.AbstractLargeCommTest, MultiProcessTestCase
# another weird thing- what's the point of making me specify some empty objects in my list?
# empty list should be valid imo. (but it throws an error)
gather_list = [{}, {}]
- torch.distributed.gather_object(input, object_gather_list=gather_list, dst=self.rank, group=subgroup)
+ torch.distributed.gather_object(
+ input, object_gather_list=gather_list, dst=self.rank, group=subgroup
+ )
for src in range(len(gather_list)):
self.assertEqual(gather_list[src]["rank"], self.rank + src)
else:
- torch.distributed.gather_object(input, object_gather_list=None, dst=self.rank - 1, group=subgroup)
+ torch.distributed.gather_object(
+ input, object_gather_list=None, dst=self.rank - 1, group=subgroup
+ )
@requires_nccl()
@skip_if_lt_x_gpu(4)
@@ -3945,7 +4082,10 @@ class LargeCommTest(test_c10d_common.AbstractLargeCommTest, MultiProcessTestCase
@requires_nccl()
@skip_if_lt_x_gpu(4)
- @parametrize("set_device", [SetDeviceMethod.TORCH_CUDA_SET, SetDeviceMethod.COLLECTIVE_ARGUMENT])
+ @parametrize(
+ "set_device",
+ [SetDeviceMethod.TORCH_CUDA_SET, SetDeviceMethod.COLLECTIVE_ARGUMENT],
+ )
def test_broadcast_object_list_subgroup(self, set_device: SetDeviceMethod):
world_size = 4
if self.rank >= world_size:
@@ -3958,7 +4098,9 @@ class LargeCommTest(test_c10d_common.AbstractLargeCommTest, MultiProcessTestCase
device = torch.device("cuda:%d" % self.rank)
if self.rank == 0 or self.rank == 2:
x = [{}]
- c10d.broadcast_object_list(x, src=self.rank + 1, group=subgroup, device=device)
+ c10d.broadcast_object_list(
+ x, src=self.rank + 1, group=subgroup, device=device
+ )
expected = [{"rank": self.rank + 1}]
self.assertEqual(x, expected)
else:
@@ -3985,7 +4127,6 @@ class LargeCommTest(test_c10d_common.AbstractLargeCommTest, MultiProcessTestCase
c10d.scatter(x, scatter_list=scatter_list, src=self.rank, group=subgroup)
self.assertEqual(x, expected)
-
@requires_nccl()
@skip_if_lt_x_gpu(4)
def test_scatter_object_list_subgroup(self):
@@ -4017,8 +4158,10 @@ class LargeCommTest(test_c10d_common.AbstractLargeCommTest, MultiProcessTestCase
)
self.assertEqual(scatter_object_output_list, expected)
+
instantiate_parametrized_tests(LargeCommTest)
+
class SparseCollective(MultiProcessTestCase):
@property
def world_size(self):
@@ -4042,7 +4185,9 @@ class SparseCollective(MultiProcessTestCase):
class ToyModel(nn.Module):
def __init__(self, rank, vocab_size, embedding_dim):
super().__init__()
- self.embedding = nn.Embedding(vocab_size, embedding_dim, sparse=True).to(rank)
+ self.embedding = nn.Embedding(vocab_size, embedding_dim, sparse=True).to(
+ rank
+ )
self.linear = nn.Linear(embedding_dim, 1).to(rank)
def forward(self, inputs):
@@ -4067,12 +4212,14 @@ class SparseCollective(MultiProcessTestCase):
vocab_size = 5
- model = SparseCollective.ToyModel(self.rank, vocab_size=vocab_size, embedding_dim=10)
+ model = SparseCollective.ToyModel(
+ self.rank, vocab_size=vocab_size, embedding_dim=10
+ )
ddp_model = DistributedDataParallel(model)
inputs = torch.tensor([[1, 0, 0], [0, 0, 0], [0, 0, 0]]).to(self.rank)
# set sparse metadata on the DDP model
indices = torch.Tensor(list(range(vocab_size)))
- ddp_model._set_sparse_metadata({"embedding.weight" : indices})
+ ddp_model._set_sparse_metadata({"embedding.weight": indices})
# forward pass
try:
output = ddp_model(inputs)
@@ -4092,22 +4239,26 @@ class SparseCollective(MultiProcessTestCase):
class NCCLTraceTestBase(MultiProcessTestCase):
def setUp(self):
super().setUp()
- os.environ["TORCH_NCCL_ENABLE_TIMING"] = '0' # see 'timing_enabled' parametrized tests
- os.environ["TORCH_NCCL_TRACE_BUFFER_SIZE"] = '1000'
- os.environ["TORCH_NCCL_DUMP_ON_TIMEOUT"] = '1'
+ os.environ[
+ "TORCH_NCCL_ENABLE_TIMING"
+ ] = "0" # see 'timing_enabled' parametrized tests
+ os.environ["TORCH_NCCL_TRACE_BUFFER_SIZE"] = "1000"
+ os.environ["TORCH_NCCL_DUMP_ON_TIMEOUT"] = "1"
self.tempdir = tempfile.TemporaryDirectory()
os.environ["TORCH_NCCL_DEBUG_INFO_TEMP_FILE"] = self._trace_basename()
os.environ["TORCH_NCCL_DEBUG_INFO_PIPE_FILE"] = self._trace_basename()
self._spawn_processes()
@classmethod
- def _run(cls, parent_conn, rank: int, test_name: str, file_name: str, parent_pipe) -> None:
+ def _run(
+ cls, parent_conn, rank: int, test_name: str, file_name: str, parent_pipe
+ ) -> None:
cls.parent = parent_conn
super()._run(rank, test_name, file_name, parent_pipe)
@property
def local_device(self):
- return torch.device('cuda', self.rank_to_GPU[self.rank][0])
+ return torch.device("cuda", self.rank_to_GPU[self.rank][0])
def _join_processes(self, fn):
fn()
@@ -4126,15 +4277,14 @@ class NCCLTraceTestBase(MultiProcessTestCase):
def wrap(*positional, args, **kwargs):
args = (next(piter), *args)
return proc(*positional, args=args, **kwargs)
+
self._start_processes(wrap)
def _create_process_group_nccl(self):
store = dist.FileStore(self.file_name, self.world_size)
c10d.init_process_group(
- "nccl",
- world_size=self.world_size,
- rank=self.rank,
- store=store)
+ "nccl", world_size=self.world_size, rank=self.rank, store=store
+ )
pg = c10d.distributed_c10d._get_default_group()
return pg
@@ -4164,8 +4314,8 @@ class NCCLTraceTestBase(MultiProcessTestCase):
def started_or_scheduled(self, timing_enabled):
return "started" if timing_enabled else "scheduled"
-class NCCLTraceTest(NCCLTraceTestBase):
+class NCCLTraceTest(NCCLTraceTestBase):
@requires_nccl()
@skip_but_pass_in_sandcastle_if(not TEST_MULTIGPU, "NCCL test requires 2+ GPUs")
@parametrize("timing_enabled", [True, False])
@@ -4186,39 +4336,41 @@ class NCCLTraceTest(NCCLTraceTestBase):
time.sleep(1)
t = pickle.loads(torch._C._distributed_c10d._dump_nccl_trace())
- ver = t['version']
+ ver = t["version"]
self.assertEqual(ver, "1.5")
- pg_config = t['pg_config']
+ pg_config = t["pg_config"]
self.assertEqual(len(pg_config), 1)
- default_pg_info = pg_config['0']
- self.assertIn('name', default_pg_info)
- self.assertIn('desc', default_pg_info)
- self.assertIn('ranks', default_pg_info)
- global_ranks = pg_config['0']['ranks']
+ default_pg_info = pg_config["0"]
+ self.assertIn("name", default_pg_info)
+ self.assertIn("desc", default_pg_info)
+ self.assertIn("ranks", default_pg_info)
+ global_ranks = pg_config["0"]["ranks"]
self.assertEqual(len(json.loads(global_ranks)), self.world_size)
- t = t['entries']
+ t = t["entries"]
self.assertEqual(len(t), 2)
last = t[-1]
- self.assertEqual(last['process_group'], ('0', 'default_pg'))
- self.assertEqual(last['state'], 'completed')
- s = last['time_discovered_started_ns']
- f = last['time_discovered_completed_ns']
- self.assertEqual(last['record_id'], 1)
+ self.assertEqual(last["process_group"], ("0", "default_pg"))
+ self.assertEqual(last["state"], "completed")
+ s = last["time_discovered_started_ns"]
+ f = last["time_discovered_completed_ns"]
+ self.assertEqual(last["record_id"], 1)
self.assertIsNotNone(f)
if timing_enabled:
self.assertIsNotNone(s)
self.assertTrue(s <= f)
- self.assertIn('test_c10d_nccl.py', str(last['frames']))
- self.assertEqual(last['input_sizes'], ((3, 4),))
- self.assertEqual(last['output_sizes'], ((3, 4),))
- self.assertEqual(last['seq_id'], 2)
+ self.assertIn("test_c10d_nccl.py", str(last["frames"]))
+ self.assertEqual(last["input_sizes"], ((3, 4),))
+ self.assertEqual(last["output_sizes"], ((3, 4),))
+ self.assertEqual(last["seq_id"], 2)
now = datetime.now()
- event_created_time = datetime.fromtimestamp(last['time_created_ns'] / 1000000000)
+ event_created_time = datetime.fromtimestamp(
+ last["time_created_ns"] / 1000000000
+ )
before_test = now - timedelta(minutes=1)
self.assertTrue(before_test < event_created_time < now)
if timing_enabled:
# very loose bounds, measured 0.036 ms on devgpu
- self.assertTrue(0 < last['duration_ms'] < 100)
+ self.assertTrue(0 < last["duration_ms"] < 100)
else:
self.assertTrue("duration_ms" not in last)
@@ -4230,22 +4382,22 @@ class NCCLTraceTest(NCCLTraceTestBase):
while time.time() - start_time < timeout:
if os.path.exists(file_path):
return open(file_path, mode)
- time.sleep(.1)
+ time.sleep(0.1)
raise FileNotFoundError
if self.rank == self.MAIN_PROCESS_RANK:
for c in self.children_pipes:
- self.assertEqual(c.recv(), 'next')
+ self.assertEqual(c.recv(), "next")
dump_file = self._trace_name(rank=0)
pipe_file = dump_file + ".pipe"
- with open_file_with_timeout(pipe_file, 'w') as f:
- f.write('1\n')
- with open_file_with_timeout(dump_file, 'rb', timeout=10.0) as f:
- self.assertTrue('all_reduce' in str(pickle.load(f)))
+ with open_file_with_timeout(pipe_file, "w") as f:
+ f.write("1\n")
+ with open_file_with_timeout(dump_file, "rb", timeout=10.0) as f:
+ self.assertTrue("all_reduce" in str(pickle.load(f)))
for c in self.children_pipes:
- c.send('next')
+ c.send("next")
return
pg = self._create_process_group_nccl()
@@ -4255,13 +4407,13 @@ class NCCLTraceTest(NCCLTraceTestBase):
f = pg.allreduce(a)
f.wait()
torch.cuda.synchronize(device=device)
- self.parent.send('next')
+ self.parent.send("next")
self.parent.recv()
@requires_nccl()
@skip_but_pass_in_sandcastle_if(not TEST_MULTIGPU, "NCCL test requires 2+ GPUs")
def test_long(self):
- os.environ["TORCH_NCCL_TRACE_BUFFER_SIZE"] = '10'
+ os.environ["TORCH_NCCL_TRACE_BUFFER_SIZE"] = "10"
if self.rank == self.MAIN_PROCESS_RANK:
return
pg = self._create_process_group_nccl()
@@ -4281,16 +4433,16 @@ class NCCLTraceTest(NCCLTraceTestBase):
f.wait()
torch.cuda.synchronize(device=device)
t = pickle.loads(torch._C._distributed_c10d._dump_nccl_trace())
- t = t['entries']
+ t = t["entries"]
self.assertEqual(len(t), 10)
first = t[0]
last = t[-1]
- self.assertEqual(last['profiling_name'], 'nccl:all_reduce')
- self.assertEqual(last['state'], 'completed')
- self.assertIn('test_c10d_nccl.py', str(last['frames']))
- self.assertEqual(last['input_sizes'], ((3, 4),))
- self.assertEqual(last['output_sizes'], ((3, 4),))
- self.assertEqual(last['seq_id'] - first['seq_id'], 9)
+ self.assertEqual(last["profiling_name"], "nccl:all_reduce")
+ self.assertEqual(last["state"], "completed")
+ self.assertIn("test_c10d_nccl.py", str(last["frames"]))
+ self.assertEqual(last["input_sizes"], ((3, 4),))
+ self.assertEqual(last["output_sizes"], ((3, 4),))
+ self.assertEqual(last["seq_id"] - first["seq_id"], 9)
@requires_nccl()
@skip_but_pass_in_sandcastle_if(not TEST_MULTIGPU, "NCCL test requires 2+ GPUs")
@@ -4298,9 +4450,9 @@ class NCCLTraceTest(NCCLTraceTestBase):
def test_trace_while_active(self, timing_enabled):
if self.rank == self.MAIN_PROCESS_RANK:
for c in self.children_pipes:
- self.assertEqual(c.recv(), 'next')
+ self.assertEqual(c.recv(), "next")
for c in self.children_pipes:
- c.send('next')
+ c.send("next")
return
pg = self._create_process_group_nccl()
@@ -4317,17 +4469,19 @@ class NCCLTraceTest(NCCLTraceTestBase):
pg.allreduce(a).wait()
e.synchronize()
t = pickle.loads(torch._C._distributed_c10d._dump_nccl_trace())
- t = t['entries']
- self.assertEqual(t[-1]['profiling_name'], 'nccl:all_reduce')
+ t = t["entries"]
+ self.assertEqual(t[-1]["profiling_name"], "nccl:all_reduce")
if self.rank == 0:
- self.assertEqual(t[-1]['seq_id'], 1)
- self.assertEqual(t[-1]['state'], 'completed')
+ self.assertEqual(t[-1]["seq_id"], 1)
+ self.assertEqual(t[-1]["state"], "completed")
else:
- self.assertEqual(t[-1]['seq_id'], 2)
- self.assertEqual(t[-1]['state'], self.started_or_scheduled(timing_enabled))
+ self.assertEqual(t[-1]["seq_id"], 2)
+ self.assertEqual(
+ t[-1]["state"], self.started_or_scheduled(timing_enabled)
+ )
- self.parent.send('next')
- self.assertEqual('next', self.parent.recv())
+ self.parent.send("next")
+ self.assertEqual("next", self.parent.recv())
if self.rank == 0:
pg.allreduce(a).wait()
torch.cuda.synchronize(device=device)
@@ -4338,9 +4492,9 @@ class NCCLTraceTest(NCCLTraceTestBase):
def test_trace_while_stuck(self, timing_enabled):
if self.rank == self.MAIN_PROCESS_RANK:
for c in self.children_pipes:
- self.assertEqual(c.recv(), 'next')
+ self.assertEqual(c.recv(), "next")
for c in self.children_pipes:
- c.send('next')
+ c.send("next")
return
pg = self._create_process_group_nccl()
@@ -4360,18 +4514,20 @@ class NCCLTraceTest(NCCLTraceTestBase):
# give the other thread some time to fill the cuda buffer
time.sleep(5)
t = pickle.loads(torch._C._distributed_c10d._dump_nccl_trace())
- t = t['entries']
- self.assertEqual(t[-1]['profiling_name'], 'nccl:all_reduce')
+ t = t["entries"]
+ self.assertEqual(t[-1]["profiling_name"], "nccl:all_reduce")
if self.rank == 0:
- self.assertEqual(t[-1]['seq_id'], 1)
- self.assertEqual(t[-1]['state'], 'completed')
+ self.assertEqual(t[-1]["seq_id"], 1)
+ self.assertEqual(t[-1]["state"], "completed")
else:
- self.assertEqual(t[-1]['seq_id'], 2)
- self.assertEqual(t[-1]['state'], self.started_or_scheduled(timing_enabled))
- self.assertIsNone(t[-1]['time_discovered_completed_ns'])
+ self.assertEqual(t[-1]["seq_id"], 2)
+ self.assertEqual(
+ t[-1]["state"], self.started_or_scheduled(timing_enabled)
+ )
+ self.assertIsNone(t[-1]["time_discovered_completed_ns"])
# this will eventually cause the missing rank 0
# to continue which will unblock the non-zero ranks
- self.parent.send('next')
+ self.parent.send("next")
if self.rank != 0:
pg.allreduce(a).wait()
@@ -4385,17 +4541,20 @@ class NCCLTraceTest(NCCLTraceTestBase):
else:
gather_trace()
- self.assertEqual('next', self.parent.recv())
+ self.assertEqual("next", self.parent.recv())
if self.rank == 0:
pg.allreduce(a).wait()
torch.cuda.synchronize(device=device)
@requires_nccl()
@skip_but_pass_in_sandcastle_if(not TEST_MULTIGPU, "NCCL test requires 2+ GPUs")
- @parametrize("op_sizes_per_coalesce", [
- [(2, 3)],
- [(2, 3), (5, 5), (1,)],
- ])
+ @parametrize(
+ "op_sizes_per_coalesce",
+ [
+ [(2, 3)],
+ [(2, 3), (5, 5), (1,)],
+ ],
+ )
@parametrize("timing_enabled", [True, False])
def test_batched_send_recv(self, op_sizes_per_coalesce, timing_enabled):
"""
@@ -4430,7 +4589,7 @@ class NCCLTraceTest(NCCLTraceTestBase):
time.sleep(1)
t = pickle.loads(torch._C._distributed_c10d._dump_nccl_trace())
- self.assertEqual(len(t['entries']), num_coalesced_ops * (ops_per_coalesce + 1))
+ self.assertEqual(len(t["entries"]), num_coalesced_ops * (ops_per_coalesce + 1))
expected_record_id = 0
expected_seq = 1
@@ -4438,44 +4597,61 @@ class NCCLTraceTest(NCCLTraceTestBase):
for seq in range(num_coalesced_ops):
first_op = seq * (ops_per_coalesce + 1)
coalesced_op = first_op + ops_per_coalesce
- for p2p_op_idx, input_sizes in zip(range(first_op, coalesced_op, 1), op_sizes_per_coalesce):
+ for p2p_op_idx, input_sizes in zip(
+ range(first_op, coalesced_op, 1), op_sizes_per_coalesce
+ ):
# the indivudal ops inside the coalescing group the individual op metadata,
# but not the timing info coming from the actual coalesced kernel
- profiling_name = 'nccl:recv 0<-1' if self.rank == 0 else 'nccl:send 1->0'
- self.assertEqual(t['entries'][p2p_op_idx]['record_id'], expected_record_id)
+ profiling_name = (
+ "nccl:recv 0<-1" if self.rank == 0 else "nccl:send 1->0"
+ )
+ self.assertEqual(
+ t["entries"][p2p_op_idx]["record_id"], expected_record_id
+ )
expected_record_id += 1
- self.assertEqual(t['entries'][p2p_op_idx]['profiling_name'], profiling_name)
- self.assertEqual(t['entries'][p2p_op_idx]['seq_id'], expected_seq)
- self.assertEqual(t['entries'][p2p_op_idx]['op_id'], expected_op_id)
+ self.assertEqual(
+ t["entries"][p2p_op_idx]["profiling_name"], profiling_name
+ )
+ self.assertEqual(t["entries"][p2p_op_idx]["seq_id"], expected_seq)
+ self.assertEqual(t["entries"][p2p_op_idx]["op_id"], expected_op_id)
expected_op_id += 1
- self.assertEqual(t['entries'][p2p_op_idx]['input_sizes'], [input_sizes])
- self.assertEqual(t['entries'][p2p_op_idx]['output_sizes'], [input_sizes])
+ self.assertEqual(t["entries"][p2p_op_idx]["input_sizes"], [input_sizes])
+ self.assertEqual(
+ t["entries"][p2p_op_idx]["output_sizes"], [input_sizes]
+ )
# duration doesn't get tagged onto individual ops yet, nor is their state updated
- self.assertEqual(t['entries'][p2p_op_idx]['state'], 'scheduled')
- self.assertTrue('duration_ms' not in t['entries'][p2p_op_idx])
+ self.assertEqual(t["entries"][p2p_op_idx]["state"], "scheduled")
+ self.assertTrue("duration_ms" not in t["entries"][p2p_op_idx])
# the coalesced op has no metadata but indicates that coalescing was used,
# and accurately reflects the timing and state info for the whole group
- self.assertEqual(t['entries'][coalesced_op]['record_id'], expected_record_id)
+ self.assertEqual(
+ t["entries"][coalesced_op]["record_id"], expected_record_id
+ )
expected_record_id += 1
- self.assertEqual(t['entries'][coalesced_op]['profiling_name'], 'nccl:coalesced')
- self.assertEqual(t['entries'][coalesced_op]['seq_id'], expected_seq)
+ self.assertEqual(
+ t["entries"][coalesced_op]["profiling_name"], "nccl:coalesced"
+ )
+ self.assertEqual(t["entries"][coalesced_op]["seq_id"], expected_seq)
expected_seq += 1
- self.assertEqual(t['entries'][coalesced_op]['state'], 'completed')
- self.assertEqual(t['entries'][coalesced_op]['input_sizes'], [])
- self.assertEqual(t['entries'][coalesced_op]['output_sizes'], [])
+ self.assertEqual(t["entries"][coalesced_op]["state"], "completed")
+ self.assertEqual(t["entries"][coalesced_op]["input_sizes"], [])
+ self.assertEqual(t["entries"][coalesced_op]["output_sizes"], [])
if timing_enabled:
- duration = t['entries'][coalesced_op]['duration_ms']
+ duration = t["entries"][coalesced_op]["duration_ms"]
self.assertTrue(0.001 < duration < 10000, duration)
else:
- self.assertTrue('duration_ms' not in t['entries'][coalesced_op])
+ self.assertTrue("duration_ms" not in t["entries"][coalesced_op])
@requires_nccl()
@skip_but_pass_in_sandcastle_if(not TEST_MULTIGPU, "NCCL test requires 2+ GPUs")
- @parametrize("op_sizes", [
- [(2, 3)],
- [(2, 3), (5, 5), (1,)],
- ])
+ @parametrize(
+ "op_sizes",
+ [
+ [(2, 3)],
+ [(2, 3), (5, 5), (1,)],
+ ],
+ )
@parametrize("timing_enabled", [True, False])
def test_individual_send_recv(self, op_sizes, timing_enabled):
"""
@@ -4505,26 +4681,26 @@ class NCCLTraceTest(NCCLTraceTestBase):
time.sleep(1)
t = pickle.loads(torch._C._distributed_c10d._dump_nccl_trace())
- self.assertEqual(len(t['entries']), num_repeats * (ops_per_repeat))
+ self.assertEqual(len(t["entries"]), num_repeats * (ops_per_repeat))
expected_seq = 1
expected_op_id = 1
for seq in range(num_repeats * ops_per_repeat):
input_sizes = op_sizes[seq % ops_per_repeat]
- profiling_name = 'nccl:recv 0<-1' if self.rank == 0 else 'nccl:send 1->0'
- self.assertEqual(t['entries'][seq]['profiling_name'], profiling_name)
- self.assertEqual(t['entries'][seq]['seq_id'], expected_seq)
+ profiling_name = "nccl:recv 0<-1" if self.rank == 0 else "nccl:send 1->0"
+ self.assertEqual(t["entries"][seq]["profiling_name"], profiling_name)
+ self.assertEqual(t["entries"][seq]["seq_id"], expected_seq)
expected_seq += 1
- self.assertEqual(t['entries'][seq]['op_id'], expected_op_id)
+ self.assertEqual(t["entries"][seq]["op_id"], expected_op_id)
expected_op_id += 1
- self.assertEqual(t['entries'][seq]['input_sizes'], [input_sizes])
- self.assertEqual(t['entries'][seq]['output_sizes'], [input_sizes])
- self.assertEqual(t['entries'][seq]['state'], 'completed')
+ self.assertEqual(t["entries"][seq]["input_sizes"], [input_sizes])
+ self.assertEqual(t["entries"][seq]["output_sizes"], [input_sizes])
+ self.assertEqual(t["entries"][seq]["state"], "completed")
if timing_enabled:
- duration = t['entries'][seq]['duration_ms']
+ duration = t["entries"][seq]["duration_ms"]
self.assertTrue(0.001 < duration < 10000, duration)
else:
- self.assertTrue('duration_ms' not in t['entries'][seq])
+ self.assertTrue("duration_ms" not in t["entries"][seq])
# TODO(whc) support and test coalesced collectives that use the c++ start/end group thingy instead of python
# coalescing manager
@@ -4566,17 +4742,32 @@ class NCCLTraceTest(NCCLTraceTestBase):
t = pickle.loads(torch._C._distributed_c10d._dump_nccl_trace())
- self.assertEqual(len(t['entries']), 1) # one for the reduce_scatter_tensor_coalesced, one for the endCoalescing
- self.assertEqual(t['entries'][0]['profiling_name'], "nccl:reduce_scatter_tensor_coalesced")
- self.assertEqual(t['entries'][0]['seq_id'], 1)
- self.assertEqual(t['entries'][0]['input_sizes'], [[2, 2], [2, 2]])
- self.assertEqual(t['entries'][0]['output_sizes'], [[2,], [2,]])
- self.assertEqual(t['entries'][0]['state'], 'completed')
+ self.assertEqual(
+ len(t["entries"]), 1
+ ) # one for the reduce_scatter_tensor_coalesced, one for the endCoalescing
+ self.assertEqual(
+ t["entries"][0]["profiling_name"], "nccl:reduce_scatter_tensor_coalesced"
+ )
+ self.assertEqual(t["entries"][0]["seq_id"], 1)
+ self.assertEqual(t["entries"][0]["input_sizes"], [[2, 2], [2, 2]])
+ self.assertEqual(
+ t["entries"][0]["output_sizes"],
+ [
+ [
+ 2,
+ ],
+ [
+ 2,
+ ],
+ ],
+ )
+ self.assertEqual(t["entries"][0]["state"], "completed")
if timing_enabled:
- duration = t['entries'][0]['duration_ms']
+ duration = t["entries"][0]["duration_ms"]
self.assertTrue(0.001 < duration < 10000, duration)
else:
- self.assertTrue('duration_ms' not in t['entries'][0])
+ self.assertTrue("duration_ms" not in t["entries"][0])
+
class NCCLTraceTestDumpOnTimeoutBase(NCCLTraceTestBase):
timeout_sec = 1
@@ -4588,7 +4779,8 @@ class NCCLTraceTestDumpOnTimeoutBase(NCCLTraceTestBase):
world_size=self.world_size,
rank=self.rank,
store=store,
- timeout=timedelta(seconds=NCCLTraceTestDumpOnTimeoutBase.timeout_sec))
+ timeout=timedelta(seconds=NCCLTraceTestDumpOnTimeoutBase.timeout_sec),
+ )
pg = c10d.distributed_c10d._get_default_group()
return pg
@@ -4605,28 +4797,31 @@ class NCCLTraceTestDumpOnTimeoutBase(NCCLTraceTestBase):
except TimeoutError:
return None
+
class NCCLTraceTestDumpOnTimeout(NCCLTraceTestDumpOnTimeoutBase):
@requires_nccl()
@skip_but_pass_in_sandcastle_if(not TEST_MULTIGPU, "NCCL test requires 2+ GPUs")
@parametrize("timing_enabled", [True, False])
def test_timeout_dumps(self, timing_enabled):
# dump on heartbeatmonitor thread
- os.environ['TORCH_NCCL_COORD_CHECK_MILSEC'] = '1000'
+ os.environ["TORCH_NCCL_COORD_CHECK_MILSEC"] = "1000"
# need rank0 to crash before looking for its output file
- os.environ['TORCH_NCCL_HEARTBEAT_TIMEOUT_SEC'] = '1'
+ os.environ["TORCH_NCCL_HEARTBEAT_TIMEOUT_SEC"] = "1"
if self.rank == self.MAIN_PROCESS_RANK:
# wait for rank0 to crash before looking for its output file
# we rely on rank0 holding off its abort long enough to dump the debug info
self.assertEqual(self._wait_process(0, timeout=90), -6)
- with open(self._trace_name(rank=0), 'rb') as f:
+ with open(self._trace_name(rank=0), "rb") as f:
t = pickle.load(f)
- t = t['entries']
+ t = t["entries"]
self.assertEqual(len(t), 2)
- self.assertEqual(t[0]['seq_id'], 1)
- self.assertEqual(t[0]['state'], 'completed')
- self.assertEqual(t[1]['seq_id'], 2)
- self.assertEqual(t[1]['state'], self.started_or_scheduled(timing_enabled))
+ self.assertEqual(t[0]["seq_id"], 1)
+ self.assertEqual(t[0]["state"], "completed")
+ self.assertEqual(t[1]["seq_id"], 2)
+ self.assertEqual(
+ t[1]["state"], self.started_or_scheduled(timing_enabled)
+ )
self.assertFalse(os.path.exists(self._trace_name(rank=1)))
@@ -4648,10 +4843,12 @@ class NCCLTraceTestDumpOnTimeout(NCCLTraceTestDumpOnTimeoutBase):
# rank 0 will crash before it passes the sync, but rank1 will exit quickly and cleanly
torch.cuda.synchronize()
+
instantiate_parametrized_tests(ProcessGroupNCCLTest)
instantiate_parametrized_tests(NCCLTraceTestDumpOnTimeout)
instantiate_parametrized_tests(NCCLTraceTest)
+
class NCCLTraceTestTimeoutDumpOnStuckRanks(NCCLTraceTestDumpOnTimeoutBase):
def _check_return_codes(self, elapsed_time):
# the base test infra assumes processes exit with matching return codes,
@@ -4663,9 +4860,9 @@ class NCCLTraceTestTimeoutDumpOnStuckRanks(NCCLTraceTestDumpOnTimeoutBase):
@skip_but_pass_in_sandcastle_if(not TEST_MULTIGPU, "NCCL test requires 2+ GPUs")
def test_timeout_dumps_on_stuck_ranks(self):
# need rank0 to crash quicker after detecting timeout
- os.environ['TORCH_NCCL_HEARTBEAT_TIMEOUT_SEC'] = '1'
+ os.environ["TORCH_NCCL_HEARTBEAT_TIMEOUT_SEC"] = "1"
# restore this env var to its prior default in case another test changed it
- os.environ['TORCH_NCCL_COORD_CHECK_MILSEC'] = '1000'
+ os.environ["TORCH_NCCL_COORD_CHECK_MILSEC"] = "1000"
if self.rank == self.MAIN_PROCESS_RANK:
# wait for both rank0 and 1 to crash before looking for both ranks' output
@@ -4674,16 +4871,16 @@ class NCCLTraceTestTimeoutDumpOnStuckRanks(NCCLTraceTestDumpOnTimeoutBase):
self.assertEqual(self._wait_process(1, timeout=90), -6)
self.assertTrue(os.path.exists(self._trace_name(rank=1)))
self.assertTrue(os.path.exists(self._trace_name(rank=0)))
- with open(self._trace_name(rank=0), 'rb') as f:
+ with open(self._trace_name(rank=0), "rb") as f:
t = pickle.load(f)
- t = t['entries']
+ t = t["entries"]
self.assertEqual(len(t), 2)
- with open(self._trace_name(rank=1), 'rb') as f:
+ with open(self._trace_name(rank=1), "rb") as f:
t = pickle.load(f)
- t = t['entries']
+ t = t["entries"]
self.assertEqual(len(t), 1)
- self.assertEqual(t[0]['seq_id'], 1)
- self.assertEqual(t[0]['state'], 'completed')
+ self.assertEqual(t[0]["seq_id"], 1)
+ self.assertEqual(t[0]["state"], "completed")
return
pg = self._create_process_group_nccl()
@@ -4703,6 +4900,7 @@ class NCCLTraceTestTimeoutDumpOnStuckRanks(NCCLTraceTestDumpOnTimeoutBase):
# getting the global signal to dump the debugging info.
time.sleep(600)
+
class NcclErrorDumpTest(NCCLTraceTestBase):
def _wait_process(self, rank, timeout):
try:
@@ -4723,10 +4921,10 @@ class NcclErrorDumpTest(NCCLTraceTestBase):
@skip_if_rocm
def test_nccl_errors_dump(self):
os.environ["TORCH_NCCL_ASYNC_ERROR_HANDLING"] = "1"
- os.environ["TORCH_NCCL_TRACE_BUFFER_SIZE"] = '1000'
- os.environ["TORCH_NCCL_DUMP_ON_TIMEOUT"] = '1'
+ os.environ["TORCH_NCCL_TRACE_BUFFER_SIZE"] = "1000"
+ os.environ["TORCH_NCCL_DUMP_ON_TIMEOUT"] = "1"
# need rank0 to dump before abort
- os.environ['TORCH_NCCL_HEARTBEAT_TIMEOUT_SEC'] = '5'
+ os.environ["TORCH_NCCL_HEARTBEAT_TIMEOUT_SEC"] = "5"
if self.rank == self.MAIN_PROCESS_RANK:
# wait for both rank0 and 1 to crash before looking for dump
diff --git a/test/distributed/test_c10d_object_collectives.py b/test/distributed/test_c10d_object_collectives.py
index aadd3b2f5f..f8a4731f47 100644
--- a/test/distributed/test_c10d_object_collectives.py
+++ b/test/distributed/test_c10d_object_collectives.py
@@ -2,7 +2,7 @@
import os
import sys
-from functools import wraps, partial
+from functools import partial, wraps
import torch
import torch.distributed as dist
@@ -11,23 +11,21 @@ if not dist.is_available():
print("Distributed not available, skipping tests", file=sys.stderr)
sys.exit(0)
-from torch.testing._internal.common_distributed import (
- MultiProcessTestCase,
- TEST_SKIPS
-)
+from torch.testing._internal.common_distributed import MultiProcessTestCase, TEST_SKIPS
-from torch.testing._internal.common_utils import (
- run_tests,
- TEST_WITH_DEV_DBG_ASAN,
-)
+from torch.testing._internal.common_utils import run_tests, TEST_WITH_DEV_DBG_ASAN
if TEST_WITH_DEV_DBG_ASAN:
- print("Skip dev-asan as torch + multiprocessing spawn have known issues", file=sys.stderr)
+ print(
+ "Skip dev-asan as torch + multiprocessing spawn have known issues",
+ file=sys.stderr,
+ )
sys.exit(0)
BACKEND = dist.Backend.NCCL if torch.cuda.is_available() else dist.Backend.GLOO
WORLD_SIZE = min(4, max(2, torch.cuda.device_count()))
+
def with_comms(func=None):
if func is None:
return partial(
@@ -41,8 +39,10 @@ def with_comms(func=None):
self.dist_init()
func(self)
self.destroy_comms()
+
return wrapper
+
class TestObjectCollectives(MultiProcessTestCase):
def setUp(self):
super().setUp()
@@ -52,8 +52,11 @@ class TestObjectCollectives(MultiProcessTestCase):
@property
def device(self):
- return torch.device(self.rank) if BACKEND == dist.Backend.NCCL \
+ return (
+ torch.device(self.rank)
+ if BACKEND == dist.Backend.NCCL
else torch.device("cpu")
+ )
@property
def world_size(self):
@@ -83,9 +86,7 @@ class TestObjectCollectives(MultiProcessTestCase):
@with_comms()
def test_all_gather_object(self):
output = [None] * dist.get_world_size()
- dist.all_gather_object(
- object_list=output,
- obj=self.rank)
+ dist.all_gather_object(object_list=output, obj=self.rank)
for i, v in enumerate(output):
self.assertEqual(i, v, f"rank: {self.rank}")
@@ -93,15 +94,12 @@ class TestObjectCollectives(MultiProcessTestCase):
@with_comms()
def test_gather_object(self):
output = [None] * dist.get_world_size() if self.rank == 0 else None
- dist.gather_object(
- obj=self.rank,
- object_gather_list=output)
+ dist.gather_object(obj=self.rank, object_gather_list=output)
if self.rank == 0:
for i, v in enumerate(output):
self.assertEqual(i, v, f"rank: {self.rank}")
-
@with_comms()
def test_broadcast_object_list(self):
val = 99 if self.rank == 0 else None
@@ -116,8 +114,8 @@ class TestObjectCollectives(MultiProcessTestCase):
input_list = list(range(dist.get_world_size())) if self.rank == 0 else None
output_list = [None]
dist.scatter_object_list(
- scatter_object_output_list=output_list,
- scatter_object_input_list=input_list)
+ scatter_object_output_list=output_list, scatter_object_input_list=input_list
+ )
self.assertEqual(self.rank, output_list[0])
@@ -161,5 +159,6 @@ class TestObjectCollectives(MultiProcessTestCase):
dist.broadcast_object_list(out_list, src=ranks[0], group=my_pg)
self.assertEqual(ranks[0], out_list[0])
+
if __name__ == "__main__":
run_tests()
diff --git a/test/distributed/test_c10d_pypg.py b/test/distributed/test_c10d_pypg.py
index 32f3359185..1d6743f038 100644
--- a/test/distributed/test_c10d_pypg.py
+++ b/test/distributed/test_c10d_pypg.py
@@ -1,27 +1,26 @@
# Owner(s): ["oncall: distributed"]
import os
+import weakref
+
+import test_c10d_common
import torch
import torch.distributed as dist
-from torch.testing._internal.common_utils import (
- run_tests,
-)
-from torch.futures import Future
import torch.nn as nn
-from torch.nn.parallel import DistributedDataParallel as DDP
-import test_c10d_common
-import weakref
from torch._C._distributed_c10d import _create_work_from_future
-from torch.testing._internal.common_distributed import (
- MultiProcessTestCase,
-)
+from torch.futures import Future
+from torch.nn.parallel import DistributedDataParallel as DDP
+from torch.testing._internal.common_distributed import MultiProcessTestCase
+from torch.testing._internal.common_utils import run_tests
+
def create_work(result):
future = Future()
future.set_result(result)
return _create_work_from_future(future)
+
class MyWork(dist._Work):
def __init__(self, result, pg):
super().__init__()
@@ -38,10 +37,12 @@ class MyWork(dist._Work):
self.pg_().get_future_count += 1
return self.future_
+
class LonelyRankProcessGroup(dist.ProcessGroup):
"""
This PG only supports world_size of 1
"""
+
def __init__(self, rank, world, use_wrapper):
super().__init__(rank, world)
assert rank == 0
@@ -88,6 +89,7 @@ class LonelyRankProcessGroup(dist.ProcessGroup):
def __repr__(self):
return f"PLG w:{self._world} r:{self._rank}"
+
# We cannot use parametrize as some tests are defined on the base class and use _get_process_group
class AbstractDDPSingleRank(test_c10d_common.CommonDistributedDataParallelTest):
def setUp(self):
@@ -112,10 +114,7 @@ class AbstractDDPSingleRank(test_c10d_common.CommonDistributedDataParallelTest):
pg = self._get_process_group()
torch.manual_seed(123)
- model = nn.Sequential(
- nn.Linear(2, 2),
- nn.ReLU()
- )
+ model = nn.Sequential(nn.Linear(2, 2), nn.ReLU())
wrapped_model = model
input_tensor = torch.rand(2)
model = DDP(model, process_group=pg)
@@ -138,17 +137,22 @@ class AbstractDDPSingleRank(test_c10d_common.CommonDistributedDataParallelTest):
def test_ddp_with_pypg_with_grad_views(self):
pg = self._get_process_group()
- self._test_ddp_with_process_group(pg, [torch.device("cpu")], device_ids=None, gradient_as_bucket_view=True)
+ self._test_ddp_with_process_group(
+ pg, [torch.device("cpu")], device_ids=None, gradient_as_bucket_view=True
+ )
+
class TestDDPWithWorkSubclass(AbstractDDPSingleRank, MultiProcessTestCase):
@property
def use_wrapper(self):
return False
+
class TestDDPWithWorkWrapper(AbstractDDPSingleRank, MultiProcessTestCase):
@property
def use_wrapper(self):
return True
-if __name__ == '__main__':
+
+if __name__ == "__main__":
run_tests()
diff --git a/test/distributed/test_c10d_spawn.py b/test/distributed/test_c10d_spawn.py
index a6fb33d4ea..97dc628c8a 100644
--- a/test/distributed/test_c10d_spawn.py
+++ b/test/distributed/test_c10d_spawn.py
@@ -7,10 +7,8 @@ import tempfile
import torch
import torch.distributed as c10d
import torch.multiprocessing as mp
-from torch.testing._internal.common_distributed import \
- MultiProcessTestCase
-from torch.testing._internal.common_utils import load_tests, \
- NO_MULTIPROCESSING_SPAWN
+from torch.testing._internal.common_distributed import MultiProcessTestCase
+from torch.testing._internal.common_utils import load_tests, NO_MULTIPROCESSING_SPAWN
# Torch distributed.nn is not available in windows
# check #42095, it errors on import.
@@ -25,11 +23,11 @@ except ImportError:
load_tests = load_tests
if not c10d.is_available():
- print('c10d not available, skipping tests', file=sys.stderr)
+ print("c10d not available, skipping tests", file=sys.stderr)
sys.exit(0)
if NO_MULTIPROCESSING_SPAWN:
- print('spawn not available, skipping tests', file=sys.stderr)
+ print("spawn not available, skipping tests", file=sys.stderr)
sys.exit(0)
@@ -40,14 +38,14 @@ class AbstractProcessGroupShareTensorTest:
ws = self.world_size
# file store will delete the test file on destruction
file = tempfile.NamedTemporaryFile(delete=False)
- ctx = mp.get_context('spawn')
+ ctx = mp.get_context("spawn")
c2p = ctx.Queue(2)
p2c = ctx.Queue(2)
ps = []
for i in range(ws):
p = ctx.Process(
- target=f,
- args=(i, file.name, shared_tensors, ws, init_pg, c2p, p2c))
+ target=f, args=(i, file.name, shared_tensors, ws, init_pg, c2p, p2c)
+ )
p.start()
ps.append(p)
@@ -57,7 +55,7 @@ class AbstractProcessGroupShareTensorTest:
self.assertEqual(
expected,
result,
- msg=f"Expect rank {pid} to receive tensor {expected} but got {result}."
+ msg=f"Expect rank {pid} to receive tensor {expected} but got {result}.",
)
for _ in range(ws):
@@ -70,7 +68,8 @@ class AbstractProcessGroupShareTensorTest:
# spawn mode. See https://bugs.python.org/issue33884.
@classmethod
def _test_broadcast_process(
- cls, rank, filename, shared_tensors, world_size, init_pg, c2p, p2c):
+ cls, rank, filename, shared_tensors, world_size, init_pg, c2p, p2c
+ ):
pg = init_pg(rank, filename, world_size)
xs = [shared_tensors[rank]]
pg.broadcast(xs).wait()
@@ -79,7 +78,8 @@ class AbstractProcessGroupShareTensorTest:
@classmethod
def _test_allreduce_process(
- cls, rank, filename, shared_tensors, world_size, init_pg, c2p, p2c):
+ cls, rank, filename, shared_tensors, world_size, init_pg, c2p, p2c
+ ):
pg = init_pg(rank, filename, world_size)
xs = [shared_tensors[rank]]
pg.allreduce(xs, op=c10d.ReduceOp.SUM).wait()
@@ -88,7 +88,8 @@ class AbstractProcessGroupShareTensorTest:
@classmethod
def _test_allgather_process(
- cls, rank, filename, shared_tensors, world_size, init_pg, c2p, p2c):
+ cls, rank, filename, shared_tensors, world_size, init_pg, c2p, p2c
+ ):
pg = init_pg(rank, filename, world_size)
xs = [shared_tensors[rank]]
ys = [[torch.zeros_like(xs[0]) for i in range(world_size)]]
diff --git a/test/distributed/test_c10d_spawn_gloo.py b/test/distributed/test_c10d_spawn_gloo.py
index 70009e9eb3..95897a2938 100644
--- a/test/distributed/test_c10d_spawn_gloo.py
+++ b/test/distributed/test_c10d_spawn_gloo.py
@@ -11,19 +11,29 @@ import torch.distributed as c10d
import torch.nn as nn
from test_c10d_spawn import _torch_dist_nn_available, TestDistributedNNFunctions
from torch.testing._internal.common_cuda import TEST_CUDA, TEST_MULTIGPU
-from torch.testing._internal.common_distributed import requires_gloo, \
- create_device, skip_if_lt_x_gpu
-from torch.testing._internal.common_utils import TestCase, run_tests, skip_but_pass_in_sandcastle_if, TEST_WITH_DEV_DBG_ASAN
+from torch.testing._internal.common_distributed import (
+ create_device,
+ requires_gloo,
+ skip_if_lt_x_gpu,
+)
+from torch.testing._internal.common_utils import (
+ run_tests,
+ skip_but_pass_in_sandcastle_if,
+ TEST_WITH_DEV_DBG_ASAN,
+ TestCase,
+)
# Fails on Python-3.9, see https://github.com/pytorch/pytorch/issues/51619
if sys.version_info < (3, 9):
- class ProcessGroupShareTensorTest(test_c10d_spawn.AbstractProcessGroupShareTensorTest, TestCase):
+ class ProcessGroupShareTensorTest(
+ test_c10d_spawn.AbstractProcessGroupShareTensorTest, TestCase
+ ):
@classmethod
def opts(cls, threads=2):
opts = c10d.ProcessGroupGloo._Options()
opts._timeout = 5.0
- opts._devices = [create_device(interface='lo')]
+ opts._devices = [create_device(interface="lo")]
opts._threads = threads
return opts
@@ -31,42 +41,59 @@ if sys.version_info < (3, 9):
def _init_pg_gloo(cls, rank, filename, world_size):
store = c10d.FileStore(filename, world_size)
backend = c10d.ProcessGroupGloo(
- store, rank, world_size, ProcessGroupShareTensorTest.opts())
+ store, rank, world_size, ProcessGroupShareTensorTest.opts()
+ )
# set process group backends manually
- c10d.init_process_group(backend="gloo", store=store, rank=rank, world_size=world_size)
+ c10d.init_process_group(
+ backend="gloo", store=store, rank=rank, world_size=world_size
+ )
pg = c10d.distributed_c10d._get_default_group()
- pg._register_backend(torch.device("cpu"), c10d.ProcessGroup.BackendType.GLOO, backend)
- pg._register_backend(torch.device("cuda"), c10d.ProcessGroup.BackendType.GLOO, backend)
+ pg._register_backend(
+ torch.device("cpu"), c10d.ProcessGroup.BackendType.GLOO, backend
+ )
+ pg._register_backend(
+ torch.device("cuda"), c10d.ProcessGroup.BackendType.GLOO, backend
+ )
return pg
- @skip_but_pass_in_sandcastle_if(not TEST_MULTIGPU, "At least 2 CUDA GPUS needed")
+ @skip_but_pass_in_sandcastle_if(
+ not TEST_MULTIGPU, "At least 2 CUDA GPUS needed"
+ )
def test_shared_broadcast_gloo(self):
self._test_multiprocess(
ProcessGroupShareTensorTest._test_broadcast_process,
[torch.ones(2, 2).to(i) * i for i in range(self.world_size)],
ProcessGroupShareTensorTest._init_pg_gloo,
- 1)
+ 1,
+ )
- @skip_but_pass_in_sandcastle_if(not TEST_MULTIGPU, "At least 2 CUDA GPUS needed")
+ @skip_but_pass_in_sandcastle_if(
+ not TEST_MULTIGPU, "At least 2 CUDA GPUS needed"
+ )
def test_shared_allreduce_gloo(self):
self._test_multiprocess(
ProcessGroupShareTensorTest._test_allreduce_process,
[torch.ones(2, 2).to(i) for i in range(self.world_size)],
ProcessGroupShareTensorTest._init_pg_gloo,
- 1)
+ 1,
+ )
- @skip_but_pass_in_sandcastle_if(not TEST_MULTIGPU, "At least 2 CUDA GPUS needed")
+ @skip_but_pass_in_sandcastle_if(
+ not TEST_MULTIGPU, "At least 2 CUDA GPUS needed"
+ )
def test_shared_allgather_gloo(self):
self._test_multiprocess(
ProcessGroupShareTensorTest._test_allgather_process,
[torch.ones(2, 2).to(i) * i for i in range(self.world_size)],
ProcessGroupShareTensorTest._init_pg_gloo,
- self.world_size)
+ self.world_size,
+ )
@classmethod
def _test_allgather_chunk_process(
- cls, rank, filename, shared_tensor, world_size, init_pg, c2p, p2c):
+ cls, rank, filename, shared_tensor, world_size, init_pg, c2p, p2c
+ ):
pg = init_pg(rank, filename, world_size)
chunks = torch.chunk(shared_tensor, world_size, dim=0)
x = chunks[rank]
@@ -76,13 +103,16 @@ if sys.version_info < (3, 9):
c2p.put((rank, chunks[1].to("cpu"), ys[1].to("cpu")))
p2c.get()
- @skip_but_pass_in_sandcastle_if(not TEST_MULTIGPU, "At least 2 CUDA GPUS needed")
+ @skip_but_pass_in_sandcastle_if(
+ not TEST_MULTIGPU, "At least 2 CUDA GPUS needed"
+ )
def test_shared_allgather_chunk_gloo(self):
self._test_multiprocess(
ProcessGroupShareTensorTest._test_allgather_chunk_process,
torch.tensor(range(4)).reshape(2, 2),
ProcessGroupShareTensorTest._init_pg_gloo,
- self.world_size)
+ self.world_size,
+ )
class DistributedDataParallelSingleProcessTest(TestCase):
@@ -99,7 +129,9 @@ class DistributedDataParallelSingleProcessTest(TestCase):
def _test_base(self, net, inp, check_allclose=True):
store = c10d.FileStore(self.file.name, self.world_size)
- c10d.init_process_group(backend="gloo", store=store, rank=self.rank, world_size=self.world_size)
+ c10d.init_process_group(
+ backend="gloo", store=store, rank=self.rank, world_size=self.world_size
+ )
process_group = c10d.distributed_c10d._get_default_group()
if inp[0].is_cuda:
device_ids = [torch.cuda.current_device()]
@@ -107,9 +139,7 @@ class DistributedDataParallelSingleProcessTest(TestCase):
device_ids = None
ddp = nn.parallel.DistributedDataParallel(
- copy.deepcopy(net),
- device_ids=device_ids,
- process_group=process_group
+ copy.deepcopy(net), device_ids=device_ids, process_group=process_group
)
net_opt = torch.optim.Adam(net.parameters(), lr=0.001)
@@ -161,7 +191,9 @@ class DistributedDataParallelSingleProcessTest(TestCase):
self.output_dim = output_dim
self.hidden_layers = hidden_layers
- self.lstm = nn.LSTM(input_dim, hidden_dim, hidden_layers, batch_first=True)
+ self.lstm = nn.LSTM(
+ input_dim, hidden_dim, hidden_layers, batch_first=True
+ )
self.h2o = nn.Linear(hidden_dim, output_dim)
def forward(self, x, y):
@@ -174,7 +206,7 @@ class DistributedDataParallelSingleProcessTest(TestCase):
net = Net(INPUT_DIM, HIDDEN_DIM, OUTPUT_DIM, N_LAYERS).to(0)
inp = [
torch.randn((BATCH_SIZE, SEQ_LEN, INPUT_DIM)).to(0),
- torch.rand((BATCH_SIZE, SEQ_LEN, OUTPUT_DIM)).to(0)
+ torch.rand((BATCH_SIZE, SEQ_LEN, OUTPUT_DIM)).to(0),
]
# Not checking result allclose as the parameter inconsistency exist
@@ -184,53 +216,70 @@ class DistributedDataParallelSingleProcessTest(TestCase):
# Skip dev-asan as torch + multiprocessing spawn have known issues
if not TEST_WITH_DEV_DBG_ASAN:
+
class TestDistributedNNFunctionsGloo(TestDistributedNNFunctions):
# Test Common Ops First.
@requires_gloo()
@skip_if_lt_x_gpu(2)
- @skip_but_pass_in_sandcastle_if(not _torch_dist_nn_available, "torch.distributed.nn is not available")
+ @skip_but_pass_in_sandcastle_if(
+ not _torch_dist_nn_available, "torch.distributed.nn is not available"
+ )
def test_broadcast(self):
self._test_broadcast("gloo")
@requires_gloo()
@skip_if_lt_x_gpu(2)
- @skip_but_pass_in_sandcastle_if(not _torch_dist_nn_available, "torch.distributed.nn is not available")
+ @skip_but_pass_in_sandcastle_if(
+ not _torch_dist_nn_available, "torch.distributed.nn is not available"
+ )
def test_reduce(self):
self._test_reduce("gloo")
@requires_gloo()
@skip_if_lt_x_gpu(2)
- @skip_but_pass_in_sandcastle_if(not _torch_dist_nn_available, "torch.distributed.nn is not available")
+ @skip_but_pass_in_sandcastle_if(
+ not _torch_dist_nn_available, "torch.distributed.nn is not available"
+ )
def test_allreduce(self):
self._test_allreduce("gloo")
@requires_gloo()
@skip_if_lt_x_gpu(2)
- @skip_but_pass_in_sandcastle_if(not _torch_dist_nn_available, "torch.distributed.nn is not available")
+ @skip_but_pass_in_sandcastle_if(
+ not _torch_dist_nn_available, "torch.distributed.nn is not available"
+ )
def test_all_gather(self):
self._test_all_gather("gloo")
@requires_gloo()
@skip_if_lt_x_gpu(2)
- @skip_but_pass_in_sandcastle_if(not _torch_dist_nn_available, "torch.distributed.nn is not available")
+ @skip_but_pass_in_sandcastle_if(
+ not _torch_dist_nn_available, "torch.distributed.nn is not available"
+ )
def test_all_to_all(self):
self._test_all_to_all("gloo")
@requires_gloo()
@skip_if_lt_x_gpu(2)
- @skip_but_pass_in_sandcastle_if(not _torch_dist_nn_available, "torch.distributed.nn is not available")
+ @skip_but_pass_in_sandcastle_if(
+ not _torch_dist_nn_available, "torch.distributed.nn is not available"
+ )
def test_all_to_all_single(self):
self._test_all_to_all_single("gloo")
# Test Ops only supported in GLOO.
@requires_gloo()
@skip_if_lt_x_gpu(2)
- @skip_but_pass_in_sandcastle_if(not _torch_dist_nn_available, "torch.distributed.nn is not available")
+ @skip_but_pass_in_sandcastle_if(
+ not _torch_dist_nn_available, "torch.distributed.nn is not available"
+ )
def test_gather(self):
store = c10d.FileStore(self.file_name, self.world_size)
# This is required because these functions calls directly to the .dist and needs
# the world to be initialized
- c10d.init_process_group(store=store, rank=self.rank, world_size=self.world_size, backend='gloo')
+ c10d.init_process_group(
+ store=store, rank=self.rank, world_size=self.world_size, backend="gloo"
+ )
device = torch.device(f"cuda:{self.rank}")
x = torch.ones(5, 5, device=device) + self.rank
x.requires_grad = True
@@ -252,12 +301,16 @@ if not TEST_WITH_DEV_DBG_ASAN:
@requires_gloo()
@skip_if_lt_x_gpu(2)
- @skip_but_pass_in_sandcastle_if(not _torch_dist_nn_available, "torch.distributed.nn is not available")
+ @skip_but_pass_in_sandcastle_if(
+ not _torch_dist_nn_available, "torch.distributed.nn is not available"
+ )
def test_scatter(self):
store = c10d.FileStore(self.file_name, self.world_size)
# This is required because these functions calls directly to the .dist and needs
# the world to be initialized
- c10d.init_process_group(store=store, rank=self.rank, world_size=self.world_size, backend='gloo')
+ c10d.init_process_group(
+ store=store, rank=self.rank, world_size=self.world_size, backend="gloo"
+ )
device = torch.device(f"cuda:{self.rank}")
x0 = torch.ones(5, 5, device=device)
x1 = torch.ones(5, 5, device=device) + 1
@@ -282,5 +335,5 @@ if not TEST_WITH_DEV_DBG_ASAN:
self.assertEqual(x0.grad, torch.zeros(5, 5, device=device))
-if __name__ == '__main__':
+if __name__ == "__main__":
run_tests()
diff --git a/test/distributed/test_c10d_spawn_nccl.py b/test/distributed/test_c10d_spawn_nccl.py
index b543b80032..67d6fae242 100644
--- a/test/distributed/test_c10d_spawn_nccl.py
+++ b/test/distributed/test_c10d_spawn_nccl.py
@@ -1,20 +1,18 @@
# Owner(s): ["oncall: distributed"]
import sys
+
import test_c10d_spawn
import torch
import torch.distributed as c10d
from test_c10d_spawn import _torch_dist_nn_available, TestDistributedNNFunctions
from torch.testing._internal.common_cuda import TEST_MULTIGPU
-from torch.testing._internal.common_distributed import (
- requires_nccl,
- skip_if_lt_x_gpu,
-)
+from torch.testing._internal.common_distributed import requires_nccl, skip_if_lt_x_gpu
from torch.testing._internal.common_utils import (
- TestCase,
run_tests,
skip_but_pass_in_sandcastle_if,
TEST_WITH_DEV_DBG_ASAN,
+ TestCase,
)
NO_NCCL = not hasattr(c10d, "ProcessGroupNCCL")
@@ -30,7 +28,9 @@ if sys.version_info < (3, 9):
store = c10d.FileStore(filename, world_size)
return c10d.ProcessGroupNCCL(store, rank, world_size)
- @skip_but_pass_in_sandcastle_if(not TEST_MULTIGPU, "At least 2 CUDA GPUS needed")
+ @skip_but_pass_in_sandcastle_if(
+ not TEST_MULTIGPU, "At least 2 CUDA GPUS needed"
+ )
@skip_but_pass_in_sandcastle_if(NO_NCCL, "NCCL needed")
def test_shared_broadcast_nccl(self):
self._test_multiprocess(
@@ -40,7 +40,9 @@ if sys.version_info < (3, 9):
1,
)
- @skip_but_pass_in_sandcastle_if(not TEST_MULTIGPU, "At least 2 CUDA GPUS needed")
+ @skip_but_pass_in_sandcastle_if(
+ not TEST_MULTIGPU, "At least 2 CUDA GPUS needed"
+ )
@skip_but_pass_in_sandcastle_if(NO_NCCL, "NCCL needed")
def test_shared_allreduce_nccl(self):
self._test_multiprocess(
@@ -63,7 +65,9 @@ if sys.version_info < (3, 9):
c2p.put((rank, torch.ones(2, 2), x.to("cpu")))
p2c.get()
- @skip_but_pass_in_sandcastle_if(not TEST_MULTIGPU, "At least 2 CUDA GPUS needed")
+ @skip_but_pass_in_sandcastle_if(
+ not TEST_MULTIGPU, "At least 2 CUDA GPUS needed"
+ )
@skip_but_pass_in_sandcastle_if(NO_NCCL, "NCCL needed")
def test_shared_reduce_nccl(self):
self._test_multiprocess(
@@ -73,7 +77,9 @@ if sys.version_info < (3, 9):
1,
)
- @skip_but_pass_in_sandcastle_if(not TEST_MULTIGPU, "At least 2 CUDA GPUS needed")
+ @skip_but_pass_in_sandcastle_if(
+ not TEST_MULTIGPU, "At least 2 CUDA GPUS needed"
+ )
@skip_but_pass_in_sandcastle_if(NO_NCCL, "NCCL needed")
def test_shared_allgather_nccl(self):
self._test_multiprocess(
@@ -99,50 +105,66 @@ if not TEST_WITH_DEV_DBG_ASAN:
@requires_nccl()
@skip_if_lt_x_gpu(2)
- @skip_but_pass_in_sandcastle_if(not _torch_dist_nn_available, "torch.distributed.nn is not available")
+ @skip_but_pass_in_sandcastle_if(
+ not _torch_dist_nn_available, "torch.distributed.nn is not available"
+ )
def test_reduce(self):
self._test_reduce("nccl")
@requires_nccl()
@skip_if_lt_x_gpu(2)
- @skip_but_pass_in_sandcastle_if(not _torch_dist_nn_available, "torch.distributed.nn is not available")
+ @skip_but_pass_in_sandcastle_if(
+ not _torch_dist_nn_available, "torch.distributed.nn is not available"
+ )
def test_allreduce(self):
self._test_allreduce("nccl")
@requires_nccl()
@skip_if_lt_x_gpu(2)
- @skip_but_pass_in_sandcastle_if(not _torch_dist_nn_available, "torch.distributed.nn is not available")
+ @skip_but_pass_in_sandcastle_if(
+ not _torch_dist_nn_available, "torch.distributed.nn is not available"
+ )
def test_all_gather(self):
self._test_all_gather("nccl")
@requires_nccl()
@skip_if_lt_x_gpu(2)
- @skip_but_pass_in_sandcastle_if(not _torch_dist_nn_available, "torch.distributed.nn is not available")
+ @skip_but_pass_in_sandcastle_if(
+ not _torch_dist_nn_available, "torch.distributed.nn is not available"
+ )
def test_all_to_all(self):
self._test_all_to_all("nccl")
@requires_nccl()
@skip_if_lt_x_gpu(2)
- @skip_but_pass_in_sandcastle_if(not _torch_dist_nn_available, "torch.distributed.nn is not available")
+ @skip_but_pass_in_sandcastle_if(
+ not _torch_dist_nn_available, "torch.distributed.nn is not available"
+ )
def test_all_to_all_single(self):
self._test_all_to_all_single("nccl")
# Test Ops only supported in NCCL.
@requires_nccl()
@skip_if_lt_x_gpu(2)
- @skip_but_pass_in_sandcastle_if(not _torch_dist_nn_available, "torch.distributed.nn is not available")
+ @skip_but_pass_in_sandcastle_if(
+ not _torch_dist_nn_available, "torch.distributed.nn is not available"
+ )
def test_reduce_scatter(self):
store = c10d.FileStore(self.file_name, self.world_size)
# This is required because these functions calls directly to the .dist and needs
# the world to be initialized
- c10d.init_process_group(store=store, rank=self.rank, world_size=self.world_size, backend='nccl')
+ c10d.init_process_group(
+ store=store, rank=self.rank, world_size=self.world_size, backend="nccl"
+ )
device = torch.device(f"cuda:{self.rank}")
x0 = torch.ones(5, 5, device=device) + self.rank
x1 = torch.ones(5, 5, device=device) + self.rank + 1
x0.requires_grad = True
x1.requires_grad = True
y = torch.empty_like(x0)
- expected = (1 + self.world_size) * self.world_size / 2 + self.world_size * self.rank
+ expected = (
+ 1 + self.world_size
+ ) * self.world_size / 2 + self.world_size * self.rank
y = torch.distributed.nn.reduce_scatter(y, [x0, x1])
self.assertEqual(y, torch.ones(5, 5, device=device) * expected)
z = y.sin().sum()
@@ -156,16 +178,19 @@ if not TEST_WITH_DEV_DBG_ASAN:
@requires_nccl()
@skip_if_lt_x_gpu(2)
- @skip_but_pass_in_sandcastle_if(not _torch_dist_nn_available, "torch.distributed.nn is not available")
+ @skip_but_pass_in_sandcastle_if(
+ not _torch_dist_nn_available, "torch.distributed.nn is not available"
+ )
def test_reduce_scatter_non_contiguous(self):
store = c10d.FileStore(self.file_name, self.world_size)
# This is required because these functions calls directly to the .dist and needs
# the world to be initialized
- c10d.init_process_group(store=store, rank=self.rank, world_size=self.world_size, backend='nccl')
+ c10d.init_process_group(
+ store=store, rank=self.rank, world_size=self.world_size, backend="nccl"
+ )
device = torch.device(f"cuda:{self.rank}")
class NonContiguousGrad(torch.autograd.Function):
-
@staticmethod
def forward(ctx, input):
return input
@@ -184,10 +209,14 @@ if not TEST_WITH_DEV_DBG_ASAN:
@requires_nccl()
@skip_if_lt_x_gpu(2)
- @skip_but_pass_in_sandcastle_if(not _torch_dist_nn_available, "torch.distributed.nn is not available")
+ @skip_but_pass_in_sandcastle_if(
+ not _torch_dist_nn_available, "torch.distributed.nn is not available"
+ )
def test_all_gather_base(self):
store = c10d.FileStore(self.file_name, self.world_size)
- c10d.init_process_group(store=store, rank=self.rank, world_size=self.world_size, backend='nccl')
+ c10d.init_process_group(
+ store=store, rank=self.rank, world_size=self.world_size, backend="nccl"
+ )
device = torch.device(f"cuda:{self.rank}")
x = torch.ones(5, 5, device=device) + self.rank
@@ -198,7 +227,10 @@ if not TEST_WITH_DEV_DBG_ASAN:
self.assertEqual(output.size(), torch.Size((5 * self.world_size, 5)))
for idx in range(self.world_size):
- self.assertEqual(output[5 * idx : 5 * (idx + 1)], torch.ones(5, 5, device=device) + idx)
+ self.assertEqual(
+ output[5 * idx : 5 * (idx + 1)],
+ torch.ones(5, 5, device=device) + idx,
+ )
y = torch.sum(output.view(self.world_size, 5, 5), axis=0)
z = y.sin().sum()
diff --git a/test/distributed/test_c10d_spawn_ucc.py b/test/distributed/test_c10d_spawn_ucc.py
index ecd4bc2230..81f7ec6210 100644
--- a/test/distributed/test_c10d_spawn_ucc.py
+++ b/test/distributed/test_c10d_spawn_ucc.py
@@ -1,21 +1,19 @@
# Owner(s): ["oncall: distributed"]
import sys
+
import test_c10d_spawn
import torch
import torch.distributed as c10d
from test_c10d_spawn import _torch_dist_nn_available, TestDistributedNNFunctions
from torch.testing._internal.common_cuda import TEST_MULTIGPU
-from torch.testing._internal.common_distributed import (
- requires_ucc,
- skip_if_lt_x_gpu,
-)
+from torch.testing._internal.common_distributed import requires_ucc, skip_if_lt_x_gpu
from torch.testing._internal.common_utils import (
- TestCase,
run_tests,
skip_but_pass_in_sandcastle,
skip_but_pass_in_sandcastle_if,
TEST_WITH_DEV_DBG_ASAN,
+ TestCase,
)
NO_UCC = not hasattr(c10d, "ProcessGroupUCC")
@@ -29,10 +27,14 @@ if sys.version_info < (3, 9):
@classmethod
def _init_pg_ucc(cls, rank, filename, world_size):
store = c10d.FileStore(filename, world_size)
- c10d.init_process_group(backend="ucc", store=store, rank=rank, world_size=world_size)
+ c10d.init_process_group(
+ backend="ucc", store=store, rank=rank, world_size=world_size
+ )
return c10d.distributed_c10d._get_default_group()
- @skip_but_pass_in_sandcastle_if(not TEST_MULTIGPU, "At least 2 CUDA GPUS needed")
+ @skip_but_pass_in_sandcastle_if(
+ not TEST_MULTIGPU, "At least 2 CUDA GPUS needed"
+ )
@skip_but_pass_in_sandcastle_if(NO_UCC, "UCC needed")
def test_shared_broadcast_ucc(self):
self._test_multiprocess(
@@ -42,7 +44,9 @@ if sys.version_info < (3, 9):
1,
)
- @skip_but_pass_in_sandcastle_if(not TEST_MULTIGPU, "At least 2 CUDA GPUS needed")
+ @skip_but_pass_in_sandcastle_if(
+ not TEST_MULTIGPU, "At least 2 CUDA GPUS needed"
+ )
@skip_but_pass_in_sandcastle_if(NO_UCC, "UCC needed")
def test_shared_allreduce_ucc(self):
self._test_multiprocess(
@@ -52,7 +56,9 @@ if sys.version_info < (3, 9):
1,
)
- @skip_but_pass_in_sandcastle_if(not TEST_MULTIGPU, "At least 2 CUDA GPUS needed")
+ @skip_but_pass_in_sandcastle_if(
+ not TEST_MULTIGPU, "At least 2 CUDA GPUS needed"
+ )
@skip_but_pass_in_sandcastle_if(NO_UCC, "UCC needed")
def test_shared_allgather_ucc(self):
self._test_multiprocess(
@@ -78,34 +84,47 @@ if not TEST_WITH_DEV_DBG_ASAN:
@requires_ucc()
@skip_if_lt_x_gpu(2)
- @skip_but_pass_in_sandcastle_if(not _torch_dist_nn_available, "torch.distributed.nn is not available")
+ @skip_but_pass_in_sandcastle_if(
+ not _torch_dist_nn_available, "torch.distributed.nn is not available"
+ )
def test_reduce(self):
self._test_reduce("ucc")
@requires_ucc()
@skip_if_lt_x_gpu(2)
- @skip_but_pass_in_sandcastle_if(not _torch_dist_nn_available, "torch.distributed.nn is not available")
+ @skip_but_pass_in_sandcastle_if(
+ not _torch_dist_nn_available, "torch.distributed.nn is not available"
+ )
def test_allreduce(self):
self._test_allreduce("ucc")
@requires_ucc()
@skip_if_lt_x_gpu(2)
- @skip_but_pass_in_sandcastle_if(not _torch_dist_nn_available, "torch.distributed.nn is not available")
- @skip_but_pass_in_sandcastle("runs into illegal memory access on first assertEqual check when run locally")
+ @skip_but_pass_in_sandcastle_if(
+ not _torch_dist_nn_available, "torch.distributed.nn is not available"
+ )
+ @skip_but_pass_in_sandcastle(
+ "runs into illegal memory access on first assertEqual check when run locally"
+ )
def test_all_gather(self):
self._test_all_gather("ucc")
@requires_ucc()
@skip_if_lt_x_gpu(2)
- @skip_but_pass_in_sandcastle_if(not _torch_dist_nn_available, "torch.distributed.nn is not available")
+ @skip_but_pass_in_sandcastle_if(
+ not _torch_dist_nn_available, "torch.distributed.nn is not available"
+ )
def test_all_to_all(self):
self._test_all_to_all("ucc")
@requires_ucc()
@skip_if_lt_x_gpu(2)
- @skip_but_pass_in_sandcastle_if(not _torch_dist_nn_available, "torch.distributed.nn is not available")
+ @skip_but_pass_in_sandcastle_if(
+ not _torch_dist_nn_available, "torch.distributed.nn is not available"
+ )
def test_all_to_all_single(self):
self._test_all_to_all_single("ucc")
+
if __name__ == "__main__":
run_tests()
diff --git a/test/distributed/test_c10d_ucc.py b/test/distributed/test_c10d_ucc.py
index 32557f8851..75294c90b5 100644
--- a/test/distributed/test_c10d_ucc.py
+++ b/test/distributed/test_c10d_ucc.py
@@ -23,9 +23,9 @@ import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from test_c10d_common import (
gpus_for_rank,
- Task,
ModuleForDdpCommHook,
SparseGradientModule,
+ Task,
)
from torch import nn
from torch.nn.parallel import DistributedDataParallel
@@ -36,10 +36,10 @@ from torch.testing._internal.common_distributed import (
verify_ddp_error_logged,
)
from torch.testing._internal.common_utils import (
- TestCase,
- run_tests,
retry_on_connect_failures,
+ run_tests,
skip_but_pass_in_sandcastle,
+ TestCase,
)
@@ -207,7 +207,7 @@ class ProcessGroupUCCTest(MultiProcessTestCase):
# Single input tests
tests = simple_reduce_tests(self.rank, self.world_size)
- for (op, input, expected) in tests:
+ for op, input, expected in tests:
opts = c10d.AllreduceOptions()
opts.reduceOp = op
tensor = fn(input)
@@ -260,7 +260,7 @@ class ProcessGroupUCCTest(MultiProcessTestCase):
def _test_reduce_basics(self, fn):
pg = self._create_process_group_ucc()
- for (op, input, output) in simple_reduce_tests(self.rank, self.world_size):
+ for op, input, output in simple_reduce_tests(self.rank, self.world_size):
for root in range(self.world_size):
opts = c10d.ReduceOptions()
opts.reduceOp = op
@@ -346,7 +346,9 @@ class DistributedDataParallelTest(
def _get_process_group(self):
store = self._get_store()
- c10d.init_process_group("ucc", store=store, rank=self.rank, world_size=self.world_size)
+ c10d.init_process_group(
+ "ucc", store=store, rank=self.rank, world_size=self.world_size
+ )
return c10d.distributed_c10d._get_default_group()
def _test_ucc_backend(
@@ -383,7 +385,9 @@ class DistributedDataParallelTest(
# TODO: test_ucc_backend_2gpu_module and test_ucc_backend_4gpu_module
# require broadcast_coalesced which is not supported by ucc currently
- @skip_but_pass_in_sandcastle("requires broadcast coalesced, which is not supported by ucc currently")
+ @skip_but_pass_in_sandcastle(
+ "requires broadcast coalesced, which is not supported by ucc currently"
+ )
@requires_ucc()
@skip_if_lt_x_gpu(4)
def test_ucc_backend_2gpu_module(self):
@@ -391,7 +395,9 @@ class DistributedDataParallelTest(
devices = [torch.device("cuda:" + str(i)) for i in int_devices]
self._test_ucc_backend(devices, None, multi_device=True)
- @skip_but_pass_in_sandcastle("requires broadcast coalesced, which is not supported by ucc currently")
+ @skip_but_pass_in_sandcastle(
+ "requires broadcast coalesced, which is not supported by ucc currently"
+ )
@requires_ucc()
@skip_if_lt_x_gpu(8)
def test_ucc_backend_4gpu_module(self):
@@ -646,7 +652,9 @@ class DistributedDataParallelTest(
# Check that the gradients are sparse and identical
vanilla_parameter = next(vanilla_model.parameters())
ddp_parameter = next(ddp_model.parameters())
- self.assertEqual(vanilla_parameter.grad.coalesce(), ddp_parameter.grad.coalesce())
+ self.assertEqual(
+ vanilla_parameter.grad.coalesce(), ddp_parameter.grad.coalesce()
+ )
@requires_ucc()
@skip_if_lt_x_gpu(2)
@@ -874,7 +882,9 @@ class DistributedDataParallelTest(
ModuleForDdpCommHook(), process_group=process_group
)
- expected_err = "Communication hook: return annotation should be torch.futures.Future"
+ expected_err = (
+ "Communication hook: return annotation should be torch.futures.Future"
+ )
with self.assertRaisesRegex(
ValueError,
expected_err,
@@ -1035,7 +1045,6 @@ class CommTest(test_c10d_common.AbstractCommTest, MultiProcessTestCase):
class CompilerTest(test_c10d_common.CompilerTest):
-
@property
def world_size(self):
return 2
@@ -1058,15 +1067,11 @@ class CompilerTest(test_c10d_common.CompilerTest):
@skip_if_lt_x_gpu(2)
def test_allgather_work_wait_gpu(self):
- self._test_allgather_work_wait(
- torch.ones(2, 2, device=self.rank) * self.rank
- )
+ self._test_allgather_work_wait(torch.ones(2, 2, device=self.rank) * self.rank)
@skip_if_lt_x_gpu(2)
def test_broadcast_work_wait_gpu(self):
- self._test_broadcast_work_wait(
- torch.ones(2, 2, device=self.rank) * self.rank
- )
+ self._test_broadcast_work_wait(torch.ones(2, 2, device=self.rank) * self.rank)
@skip_if_lt_x_gpu(2)
def test_nested_comm_tensor_wrapping_gpu(self):
@@ -1086,28 +1091,21 @@ class CompilerTest(test_c10d_common.CompilerTest):
)
def test_allgather_work_wait_cpu(self):
- self._test_allgather_work_wait(
- torch.ones(2, 2) * self.rank
- )
+ self._test_allgather_work_wait(torch.ones(2, 2) * self.rank)
def test_broadcast_work_wait_cpu(self):
- self._test_broadcast_work_wait(
- torch.ones(2, 2) * self.rank
- )
+ self._test_broadcast_work_wait(torch.ones(2, 2) * self.rank)
def test_nested_comm_tensor_wrapping_cpu(self):
- self._test_nested_comm_tensor_wrapping(
- torch.ones(2, 2) * self.rank
- )
+ self._test_nested_comm_tensor_wrapping(torch.ones(2, 2) * self.rank)
def test_consecutive_comm_work_wait_cpu(self):
- self._test_consecutive_comm_work_wait(
- torch.ones(2, 2) * self.rank
- )
-
+ self._test_consecutive_comm_work_wait(torch.ones(2, 2) * self.rank)
-class UccProcessGroupWithDispatchedCollectivesTests(test_c10d_common.ProcessGroupWithDispatchedCollectivesTests):
+class UccProcessGroupWithDispatchedCollectivesTests(
+ test_c10d_common.ProcessGroupWithDispatchedCollectivesTests
+):
@skip_but_pass_in_sandcastle("Fails on M60")
@requires_ucc()
@skip_if_lt_x_gpu(1)
diff --git a/test/distributed/test_collective_utils.py b/test/distributed/test_collective_utils.py
index 3b0c2e0199..727850680a 100644
--- a/test/distributed/test_collective_utils.py
+++ b/test/distributed/test_collective_utils.py
@@ -6,8 +6,8 @@ import torch.distributed as c10d
from torch.distributed.collective_utils import all_gather, broadcast
from torch.testing._internal.common_distributed import MultiProcessTestCase
-class TestCollectiveUtils(MultiProcessTestCase):
+class TestCollectiveUtils(MultiProcessTestCase):
def setUp(self):
super().setUp()
self._spawn_processes()
@@ -26,7 +26,9 @@ class TestCollectiveUtils(MultiProcessTestCase):
Basic unit test for broadcast using a process group of default world size.
"""
store = c10d.FileStore(self.file_name, self.world_size)
- c10d.init_process_group(backend="gloo", store=store, rank=self.rank, world_size=self.world_size)
+ c10d.init_process_group(
+ backend="gloo", store=store, rank=self.rank, world_size=self.world_size
+ )
pg = c10d.new_group(pg_options=self.opts())
func = mock.MagicMock()
@@ -77,7 +79,9 @@ class TestCollectiveUtils(MultiProcessTestCase):
Basic unit test for all_gather using a process group of default world size.
"""
store = c10d.FileStore(self.file_name, self.world_size)
- c10d.init_process_group(backend="gloo", store=store, rank=self.rank, world_size=self.world_size)
+ c10d.init_process_group(
+ backend="gloo", store=store, rank=self.rank, world_size=self.world_size
+ )
pg = c10d.new_group(pg_options=self.opts())
func = mock.MagicMock()
@@ -85,7 +89,9 @@ class TestCollectiveUtils(MultiProcessTestCase):
res = all_gather(data_or_fn=func, pg=pg)
func.assert_called_once()
- assert res == list(range(self.world_size)), f"Expect res to be list of 0 through {self.world_size} (got {res})"
+ assert res == list(
+ range(self.world_size)
+ ), f"Expect res to be list of 0 through {self.world_size} (got {res})"
def test_all_gather_result_no_pg(self) -> None:
"""
diff --git a/test/distributed/test_data_parallel.py b/test/distributed/test_data_parallel.py
index 3d88fc3851..8e380ca9df 100644
--- a/test/distributed/test_data_parallel.py
+++ b/test/distributed/test_data_parallel.py
@@ -1,48 +1,61 @@
# Owner(s): ["oncall: distributed"]
import contextlib
+import functools
import io
-from copy import deepcopy
from collections import OrderedDict
+from copy import deepcopy
from itertools import product
-import functools
import torch
+import torch.nn.functional as F
+import torch.nn.parallel as dp
from torch import nn
from torch.cuda.amp import autocast
-import torch.nn.parallel as dp
-from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA
-from torch.testing._internal.common_device_type import instantiate_device_type_tests, dtypes, onlyCUDA, skipMeta
-from torch.testing._internal.common_utils import run_tests, TestCase
-from torch.testing._internal.common_utils import _assertGradAndGradgradChecks, gradcheck
-from torch.testing._internal.common_utils import dtype2prec_DONTUSE
-from torch.testing._internal.common_utils import skip_but_pass_in_sandcastle_if
-import torch.nn.functional as F
+from torch.testing._internal.common_cuda import TEST_CUDA, TEST_MULTIGPU
+from torch.testing._internal.common_device_type import (
+ dtypes,
+ instantiate_device_type_tests,
+ onlyCUDA,
+ skipMeta,
+)
+from torch.testing._internal.common_utils import (
+ _assertGradAndGradgradChecks,
+ dtype2prec_DONTUSE,
+ gradcheck,
+ run_tests,
+ skip_but_pass_in_sandcastle_if,
+ TestCase,
+)
NO_NCCL = not hasattr(torch.distributed, "ProcessGroupNCCL")
# batched grad doesn't support data parallel
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
-_assertGradAndGradgradChecks = functools.partial(_assertGradAndGradgradChecks, check_batched_grad=False)
+_assertGradAndGradgradChecks = functools.partial(
+ _assertGradAndGradgradChecks, check_batched_grad=False
+)
-class TestDataParallel(TestCase):
+class TestDataParallel(TestCase):
@skip_but_pass_in_sandcastle_if(not TEST_MULTIGPU, "multi-GPU not supported")
def test_data_parallel_buffers_requiring_grad(self):
class TestModule(nn.Module):
def __init__(self, t):
super().__init__()
- self.register_buffer('t_rg', t)
- self.register_buffer('t_not_rg', t.clone().detach())
+ self.register_buffer("t_rg", t)
+ self.register_buffer("t_not_rg", t.clone().detach())
def forward(self, x):
return x * self.t_rg + self.t_not_rg
- m = TestModule(torch.randn(100, device='cuda', requires_grad=True, dtype=torch.double))
+ m = TestModule(
+ torch.randn(100, device="cuda", requires_grad=True, dtype=torch.double)
+ )
self.assertTrue(m.t_rg.requires_grad)
dpm = nn.DataParallel(m, [0, 1])
- inp = torch.randn(2, 100, device='cuda', dtype=torch.double)
+ inp = torch.randn(2, 100, device="cuda", dtype=torch.double)
def fn(t):
return dpm(inp)
@@ -51,12 +64,12 @@ class TestDataParallel(TestCase):
@skip_but_pass_in_sandcastle_if(not TEST_MULTIGPU, "multi-GPU not supported")
def test_data_parallel_rnn(self):
-
class TestModule(torch.nn.Module):
-
def __init__(self):
super().__init__()
- self.rnn = torch.nn.LSTM(300, 1024, 1, batch_first=True, bidirectional=True)
+ self.rnn = torch.nn.LSTM(
+ 300, 1024, 1, batch_first=True, bidirectional=True
+ )
def forward(self, x):
self.rnn.flatten_parameters()
@@ -86,8 +99,9 @@ class TestDataParallel(TestCase):
@skip_but_pass_in_sandcastle_if(not TEST_MULTIGPU, "multi-GPU not supported")
def test_data_parallel_lazy_linear(self):
-
- with self.assertRaisesRegex(ValueError, 'Attempted to use an uninitialized parameter'):
+ with self.assertRaisesRegex(
+ ValueError, "Attempted to use an uninitialized parameter"
+ ):
model_dp = torch.nn.DataParallel(torch.nn.LazyLinear(10).to(0))
model_dp(torch.rand(10, 10).to(0))
@@ -133,23 +147,23 @@ class TestDataParallel(TestCase):
def test_parallel_apply_passes_exception(self):
# we define and instantiate a module that will throw a KeyError
class TestModule(nn.Module):
-
def forward(self, *args):
- return {}['wonderful']
+ return {}["wonderful"]
l1 = TestModule().to("cuda", torch.float)
# and check that parallel_apply passes on the exception
# (we can use a single device twice for this test)
- with self.assertRaisesRegex(KeyError,
- 'Caught KeyError in replica \\d '
- 'on device 0.\nOriginal Traceback'
- '[\\s\\S]+wonderful'):
+ with self.assertRaisesRegex(
+ KeyError,
+ "Caught KeyError in replica \\d "
+ "on device 0.\nOriginal Traceback"
+ "[\\s\\S]+wonderful",
+ ):
dp.parallel_apply(modules=(l1, l1), inputs=(None, None))
@skip_but_pass_in_sandcastle_if(not TEST_MULTIGPU, "multi-GPU not supported")
def test_data_parallel_multiple_input(self):
class TestModule(nn.Module):
-
def forward(self, var1, var2, float1, var3=None):
if var3 is None:
return float1 * (var1 * var2)
@@ -205,13 +219,13 @@ class TestDataParallel(TestCase):
out = dpm(var1, var2, float1, var3=var3)
local_test(out)
- kwarg_wrap = {'var3': var3}
+ kwarg_wrap = {"var3": var3}
out = dp.data_parallel(
- m, (var1, var2, float1), (0, 1), module_kwargs=kwarg_wrap)
+ m, (var1, var2, float1), (0, 1), module_kwargs=kwarg_wrap
+ )
local_test(out)
- out = dp.data_parallel(
- m, (var1, var2, float1), (0,), module_kwargs=kwarg_wrap)
+ out = dp.data_parallel(m, (var1, var2, float1), (0,), module_kwargs=kwarg_wrap)
local_test(out)
@skip_but_pass_in_sandcastle_if(not TEST_MULTIGPU, "multi-GPU not supported")
@@ -223,8 +237,7 @@ class TestDataParallel(TestCase):
@skip_but_pass_in_sandcastle_if(not TEST_MULTIGPU, "multi-GPU not supported")
def test_data_parallel_model_device(self):
- r"""Test device[0] check at forward time.
- """
+ r"""Test device[0] check at forward time."""
l = nn.Linear(2, 2)
inp = torch.randn(2, 2)
inp_cuda0 = inp.cuda(0)
@@ -246,8 +259,12 @@ class TestDataParallel(TestCase):
expect_device = torch.device(f"cuda:{device_ids[0]}")
if should_fail:
+
def assert_correct():
- return self.assertRaisesRegex(RuntimeError, error_msg.format(expect_device))
+ return self.assertRaisesRegex(
+ RuntimeError, error_msg.format(expect_device)
+ )
+
else:
assert_correct = dummy_ctx_manager
@@ -263,14 +280,14 @@ class TestDataParallel(TestCase):
with assert_correct():
nn.parallel.data_parallel(inner_m.to(dp_device), inp, device_ids)
- test(l.to('cpu'), None, inp, None, should_fail=True)
+ test(l.to("cpu"), None, inp, None, should_fail=True)
test(l.cuda(1), None, inp_cuda0, None, should_fail=True)
test(l.cuda(), None, inp_cuda0, [1, 0], should_fail=True)
test(l.cuda(), None, inp_cuda0, None, should_fail=False)
- test(l.cpu(), 'cuda', inp_cuda0, None, should_fail=False)
+ test(l.cpu(), "cuda", inp_cuda0, None, should_fail=False)
test(l.cuda(1), None, inp_cuda1, [1, 0], should_fail=False)
- test(l.cpu(), 'cuda:1', inp_cuda1, [1, 0], should_fail=False)
+ test(l.cpu(), "cuda:1", inp_cuda1, [1, 0], should_fail=False)
s = nn.Sequential(l.cpu())
test(s, None, inp, None, should_fail=True)
@@ -391,8 +408,10 @@ class TestDataParallel(TestCase):
def test_data_parallel_nested_output(self):
def fn(input):
return [
- input, (input.sin(), input.cos(), [input.add(1)]), input,
- OrderedDict(a=input, b=[input.sin()])
+ input,
+ (input.sin(), input.cos(), [input.add(1)]),
+ input,
+ OrderedDict(a=input, b=[input.sin()]),
]
class Net(nn.Module):
@@ -412,11 +431,11 @@ class TestDataParallel(TestCase):
self.assertIsInstance(output[2], torch.Tensor)
self.assertIsInstance(output[3], dict)
self.assertEqual(len(output[3]), 2)
- self.assertIn('a', output[3])
- self.assertIn('b', output[3])
- self.assertIsInstance(output[3]['a'], torch.Tensor)
- self.assertIsInstance(output[3]['b'], list)
- self.assertIsInstance(output[3]['b'][0], torch.Tensor)
+ self.assertIn("a", output[3])
+ self.assertIn("b", output[3])
+ self.assertIsInstance(output[3]["a"], torch.Tensor)
+ self.assertIsInstance(output[3]["b"], list)
+ self.assertIsInstance(output[3]["b"][0], torch.Tensor)
@skip_but_pass_in_sandcastle_if(not TEST_MULTIGPU, "multi-GPU not supported")
def test_data_parallel_nested_input(self):
@@ -437,14 +456,14 @@ class TestDataParallel(TestCase):
def test_data_parallel_module_zero_inputs(self):
class TestModule(nn.Module):
def forward(self):
- t = torch.eye(2, 3, device='cuda:0')
+ t = torch.eye(2, 3, device="cuda:0")
return t + (1 - t)
def test_helper(output, expected):
self.assertEqual(output.get_device(), 0)
self.assertEqual(output, expected)
- expected = torch.ones(2, 3, device='cuda:0')
+ expected = torch.ones(2, 3, device="cuda:0")
model = TestModule()
test_helper(nn.DataParallel(model, [0])(), expected)
@@ -454,8 +473,8 @@ class TestDataParallel(TestCase):
@skip_but_pass_in_sandcastle_if(not TEST_MULTIGPU, "multi-GPU not supported")
def test_data_parallel_device_args(self):
- cuda0 = torch.device('cuda:0')
- cuda1 = torch.device('cuda:1')
+ cuda0 = torch.device("cuda:0")
+ cuda1 = torch.device("cuda:1")
# test output_device
l = nn.Linear(10, 5).to(cuda0, torch.float)
@@ -475,9 +494,12 @@ class TestDataParallel(TestCase):
def gradient_penalty(net, x):
output = net(x)
loss = torch.autograd.grad(
- outputs=output, inputs=x,
+ outputs=output,
+ inputs=x,
grad_outputs=x.new_ones(output.size()),
- create_graph=True, retain_graph=True)[0].mean()
+ create_graph=True,
+ retain_graph=True,
+ )[0].mean()
return loss
net = nn.Linear(4, 1).cuda()
@@ -490,9 +512,9 @@ class TestDataParallel(TestCase):
grads = [p.grad for p in net.parameters()]
self.assertEqual(2, len(grads))
self.assertEqual(
- torch.tensor([[0.25, 0.25, 0.25, 0.25]], device='cuda:0'),
- grads[0])
- self.assertEqual(torch.tensor([0.0], device='cuda:0'), grads[1])
+ torch.tensor([[0.25, 0.25, 0.25, 0.25]], device="cuda:0"), grads[0]
+ )
+ self.assertEqual(torch.tensor([0.0], device="cuda:0"), grads[1])
def _test_scatter(self, tensor):
x = tensor.detach().requires_grad_()
@@ -523,7 +545,9 @@ class TestDataParallel(TestCase):
class Cplx(torch.nn.Module):
def __init__(self):
super().__init__()
- self.cplx = torch.nn.Parameter(torch.zeros(1, 10, dtype=torch.cfloat).cuda())
+ self.cplx = torch.nn.Parameter(
+ torch.zeros(1, 10, dtype=torch.cfloat).cuda()
+ )
def forward(self, x):
return x + self.cplx
@@ -537,8 +561,8 @@ class TestDataParallel(TestCase):
def _test_gather(self, output_device):
inputs = (
- torch.randn(2, 4, device='cuda:0', requires_grad=True, dtype=torch.double),
- torch.randn(2, 4, device='cuda:1', requires_grad=True, dtype=torch.double),
+ torch.randn(2, 4, device="cuda:0", requires_grad=True, dtype=torch.double),
+ torch.randn(2, 4, device="cuda:1", requires_grad=True, dtype=torch.double),
)
result = dp.gather(inputs, output_device)
self.assertEqual(result.size(), torch.Size([4, 4]))
@@ -554,12 +578,14 @@ class TestDataParallel(TestCase):
result.backward(grad)
self.assertEqual(inputs[0].grad, grad[:2])
self.assertEqual(inputs[1].grad, grad[2:])
- _assertGradAndGradgradChecks(self, lambda x, y: dp.gather((x, y), output_device), inputs)
+ _assertGradAndGradgradChecks(
+ self, lambda x, y: dp.gather((x, y), output_device), inputs
+ )
# test scalar inputs, should stack into a vector in this case
inputs = (
- torch.randn((), device='cuda:0', requires_grad=True, dtype=torch.double),
- torch.randn((), device='cuda:1', requires_grad=True, dtype=torch.double),
+ torch.randn((), device="cuda:0", requires_grad=True, dtype=torch.double),
+ torch.randn((), device="cuda:1", requires_grad=True, dtype=torch.double),
)
result = dp.gather(inputs, output_device)
self.assertEqual(result.size(), torch.Size([2]))
@@ -575,7 +601,9 @@ class TestDataParallel(TestCase):
result.backward(grad)
self.assertEqual(inputs[0].grad, grad[0])
self.assertEqual(inputs[1].grad, grad[1])
- _assertGradAndGradgradChecks(self, lambda x, y: dp.gather((x, y), output_device), inputs)
+ _assertGradAndGradgradChecks(
+ self, lambda x, y: dp.gather((x, y), output_device), inputs
+ )
@skip_but_pass_in_sandcastle_if(not TEST_MULTIGPU, "multi-GPU not supported")
def test_gather_cpu(self):
@@ -588,11 +616,11 @@ class TestDataParallel(TestCase):
@skip_but_pass_in_sandcastle_if(not TEST_MULTIGPU, "multi-GPU not supported")
def test_gather_different_len_dicts(self):
inputs = (
- {'a': torch.randn(1, 2, requires_grad=True, device="cuda:0")},
+ {"a": torch.randn(1, 2, requires_grad=True, device="cuda:0")},
{
- 'b': torch.randn(1, 2, requires_grad=True, device="cuda:1"),
- 'a': torch.randn(1, 2, requires_grad=True, device="cuda:1"),
- }
+ "b": torch.randn(1, 2, requires_grad=True, device="cuda:1"),
+ "a": torch.randn(1, 2, requires_grad=True, device="cuda:1"),
+ },
)
with self.assertRaises(ValueError):
_ = dp.gather(inputs, target_device=0)
@@ -618,9 +646,19 @@ class TestDataParallel(TestCase):
for devices in [(0, 1), [0, 1]]:
replicas = dp.replicate(net, devices)
for i, replica in enumerate(replicas):
- self.assertEqual(replica.bn.running_mean.get_device(), i, msg='buffer on wrong device')
- self.assertEqual(replica.bn.running_var.get_device(), i, msg='buffer on wrong device')
- self.assertEqual(replica.bn.num_batches_tracked.get_device(), i, msg='buffer on wrong device')
+ self.assertEqual(
+ replica.bn.running_mean.get_device(),
+ i,
+ msg="buffer on wrong device",
+ )
+ self.assertEqual(
+ replica.bn.running_var.get_device(), i, msg="buffer on wrong device"
+ )
+ self.assertEqual(
+ replica.bn.num_batches_tracked.get_device(),
+ i,
+ msg="buffer on wrong device",
+ )
@skip_but_pass_in_sandcastle_if(not TEST_MULTIGPU, "multi-GPU not supported")
def test_zero_grad(self):
@@ -633,8 +671,9 @@ class TestDataParallel(TestCase):
def forward(self, x):
with self._testcase.assertWarnsRegex(
- UserWarning,
- r"Calling \.zero_grad\(\) from a module created with nn\.DataParallel\(\) has no effect."):
+ UserWarning,
+ r"Calling \.zero_grad\(\) from a module created with nn\.DataParallel\(\) has no effect.",
+ ):
self.zero_grad()
return x
@@ -672,10 +711,18 @@ class TestDataParallel(TestCase):
def __init__(self, layouts, dtype_list):
super().__init__()
self.dtypes = dtype_list
- self.conv0 = torch.nn.Conv2d(8, 16, (2, 2)).to(memory_format=layouts[0], dtype=dtype_list[0])
- self.conv1 = torch.nn.Conv2d(16, 32, (2, 2)).to(memory_format=layouts[1], dtype=dtype_list[1])
- self.conv2 = torch.nn.Conv2d(32, 16, (2, 2)).to(memory_format=layouts[2], dtype=dtype_list[2])
- self.conv3 = torch.nn.Conv2d(16, 8, (2, 2)).to(memory_format=layouts[3], dtype=dtype_list[3])
+ self.conv0 = torch.nn.Conv2d(8, 16, (2, 2)).to(
+ memory_format=layouts[0], dtype=dtype_list[0]
+ )
+ self.conv1 = torch.nn.Conv2d(16, 32, (2, 2)).to(
+ memory_format=layouts[1], dtype=dtype_list[1]
+ )
+ self.conv2 = torch.nn.Conv2d(32, 16, (2, 2)).to(
+ memory_format=layouts[2], dtype=dtype_list[2]
+ )
+ self.conv3 = torch.nn.Conv2d(16, 8, (2, 2)).to(
+ memory_format=layouts[3], dtype=dtype_list[3]
+ )
def forward(self, x):
x = x.to(self.dtypes[0])
@@ -685,19 +732,25 @@ class TestDataParallel(TestCase):
x = self.conv3(x)
return x
- layer_formats = ([torch.contiguous_format] * 4,
- [torch.channels_last] * 2 + [torch.contiguous_format] * 2,
- [torch.channels_last] * 4,)
- layer_dtypes = ([torch.float] * 4,
- [torch.float] * 2 + [torch.half] * 2,
- [torch.half] * 4,)
+ layer_formats = (
+ [torch.contiguous_format] * 4,
+ [torch.channels_last] * 2 + [torch.contiguous_format] * 2,
+ [torch.channels_last] * 4,
+ )
+ layer_dtypes = (
+ [torch.float] * 4,
+ [torch.float] * 2 + [torch.half] * 2,
+ [torch.half] * 4,
+ )
ndevs = torch.cuda.device_count()
input = torch.randn(ndevs * 8, 8, 8, 8, device="cuda:0", dtype=torch.float)
target = torch.randn(ndevs * 8, 8, 4, 4, device="cuda:0", dtype=torch.float)
device_ids = list(range(ndevs))
- with torch.backends.cudnn.flags(enabled=True, deterministic=True, benchmark=False):
+ with torch.backends.cudnn.flags(
+ enabled=True, deterministic=True, benchmark=False
+ ):
for formats, dtype_list in product(layer_formats, layer_dtypes):
model_msg = f"formats = {formats} dtypes = {dtypes}"
try:
@@ -706,10 +759,13 @@ class TestDataParallel(TestCase):
opt = torch.optim.SGD(m.parameters(), lr=0.1)
opt_dp = torch.optim.SGD(m_dp.parameters(), lr=0.1)
has_half = any(p.dtype is torch.half for p in m.parameters())
- tol = 1.e-3 if has_half else 1.e-5
+ tol = 1.0e-3 if has_half else 1.0e-5
except BaseException:
# Prints case-specific debugging info to narrow down failing case.
- print("Caught exception during model creation for " + model_msg, flush=True)
+ print(
+ "Caught exception during model creation for " + model_msg,
+ flush=True,
+ )
raise
# 2 iters: First iter creates grads, second iter tries zeroed grads.
for it in range(2):
@@ -718,14 +774,28 @@ class TestDataParallel(TestCase):
try:
F.mse_loss(m(input).float(), target).backward()
F.mse_loss(m_dp(input).float(), target).backward()
- for i, ((layer_name, m_child), m_dp_child) in enumerate(zip(m.named_children(),
- m_dp.module.children())):
+ for i, ((layer_name, m_child), m_dp_child) in enumerate(
+ zip(m.named_children(), m_dp.module.children())
+ ):
named_msg = layer_name + ".weight " + iter_msg
- self.assertTrue(m_child.weight.grad.is_contiguous(memory_format=formats[i]), named_msg)
- self.assertTrue(m_dp_child.weight.grad.is_contiguous(memory_format=formats[i]), named_msg)
- for j, ((param_name, p), p_dp) in enumerate(zip(m_child.named_parameters(),
- m_dp_child.parameters())):
- named_msg = layer_name + "." + param_name + " " + iter_msg
+ self.assertTrue(
+ m_child.weight.grad.is_contiguous(
+ memory_format=formats[i]
+ ),
+ named_msg,
+ )
+ self.assertTrue(
+ m_dp_child.weight.grad.is_contiguous(
+ memory_format=formats[i]
+ ),
+ named_msg,
+ )
+ for j, ((param_name, p), p_dp) in enumerate(
+ zip(m_child.named_parameters(), m_dp_child.parameters())
+ ):
+ named_msg = (
+ layer_name + "." + param_name + " " + iter_msg
+ )
self.assertEqual(p.grad, p_dp.grad, rtol=tol, atol=tol)
opt.step()
opt_dp.step()
@@ -733,7 +803,10 @@ class TestDataParallel(TestCase):
opt_dp.zero_grad()
except BaseException:
# Makes sure we still get info if an error occurred somewhere other than the asserts.
- print("Caught exception during iterations at " + named_msg, flush=True)
+ print(
+ "Caught exception during iterations at " + named_msg,
+ flush=True,
+ )
raise
@skip_but_pass_in_sandcastle_if(not TEST_MULTIGPU, "multi-GPU not supported")
@@ -779,7 +852,6 @@ class TestDataParallel(TestCase):
class TestDataParallelDeviceType(TestCase):
-
@onlyCUDA
@skipMeta
@dtypes(torch.float, torch.double, torch.half)
@@ -822,13 +894,13 @@ class TestDataParallelDeviceType(TestCase):
self.l = l
def forward(self, input):
- return self.l(input['data'])
+ return self.l(input["data"])
l = nn.Linear(10, 5).to(device, dtype)
i = torch.randn(20, 10, device=device, dtype=dtype)
expected_out = l(i)
n = nn.DataParallel(Net())
- out = n(input={'data': i, 'unused': []})
+ out = n(input={"data": i, "unused": []})
self.assertEqual(out.get_device(), 0)
self.assertEqual(out, expected_out, atol=dtype2prec_DONTUSE[dtype], rtol=0)
@@ -842,13 +914,13 @@ class TestDataParallelDeviceType(TestCase):
self.l = l
def forward(self, input):
- return self.l(input['data'])
+ return self.l(input["data"])
l = nn.Linear(10, 5).to(device, dtype)
i = torch.randn(20, 10, device=device, dtype=dtype)
expected_out = l(i)
n = nn.DataParallel(Net())
- out = n(input={'data': i, 'unused': {}})
+ out = n(input={"data": i, "unused": {}})
self.assertEqual(out.get_device(), 0)
self.assertEqual(out, expected_out, atol=dtype2prec_DONTUSE[dtype], rtol=0)
@@ -862,19 +934,19 @@ class TestDataParallelDeviceType(TestCase):
self.l = l
def forward(self, input):
- return self.l(input['data'])
+ return self.l(input["data"])
l = nn.Linear(10, 5).to(device, dtype)
i = torch.randn(20, 10, device=device, dtype=dtype)
expected_out = l(i)
n = nn.DataParallel(Net())
- out = n(input={'data': i, 'unused': ()})
+ out = n(input={"data": i, "unused": ()})
self.assertEqual(out.get_device(), 0)
self.assertEqual(out, expected_out, atol=dtype2prec_DONTUSE[dtype], rtol=0)
instantiate_device_type_tests(TestDataParallelDeviceType, globals())
-if __name__ == '__main__':
+if __name__ == "__main__":
TestCase._default_dtype_check_enabled = True
run_tests()
diff --git a/test/distributed/test_distributed_spawn.py b/test/distributed/test_distributed_spawn.py
index 9867dcc373..22e3fb3b06 100644
--- a/test/distributed/test_distributed_spawn.py
+++ b/test/distributed/test_distributed_spawn.py
@@ -2,10 +2,10 @@
import os
import sys
+from os import path
import torch
import torch.distributed as dist
-from os import path
torch.backends.cuda.matmul.allow_tf32 = False
@@ -13,13 +13,21 @@ if not dist.is_available():
print("Distributed not available, skipping tests", file=sys.stderr)
sys.exit(0)
-from torch.testing._internal.common_utils import run_tests, TEST_WITH_DEV_DBG_ASAN, NO_MULTIPROCESSING_SPAWN
+from torch.testing._internal.common_utils import (
+ NO_MULTIPROCESSING_SPAWN,
+ run_tests,
+ TEST_WITH_DEV_DBG_ASAN,
+)
from torch.testing._internal.distributed.distributed_test import (
- DistributedTest, TestDistBackend
+ DistributedTest,
+ TestDistBackend,
)
if TEST_WITH_DEV_DBG_ASAN:
- print("Skip dev-asan as torch + multiprocessing spawn have known issues", file=sys.stderr)
+ print(
+ "Skip dev-asan as torch + multiprocessing spawn have known issues",
+ file=sys.stderr,
+ )
sys.exit(0)
if NO_MULTIPROCESSING_SPAWN:
@@ -45,12 +53,13 @@ if (
BACKEND = os.environ["BACKEND"]
if BACKEND in _allowed_backends:
- class TestDistBackendWithSpawn(TestDistBackend, DistributedTest._DistTestBase):
+ class TestDistBackendWithSpawn(TestDistBackend, DistributedTest._DistTestBase):
def setUp(self):
super().setUp()
self._spawn_processes()
torch.backends.cudnn.flags(enabled=True, allow_tf32=False).__enter__()
+
else:
print(f"Invalid backend {BACKEND}. Tests will not be run!")
diff --git a/test/distributed/test_dynamo_distributed.py b/test/distributed/test_dynamo_distributed.py
index 9383598ac6..22ce528590 100644
--- a/test/distributed/test_dynamo_distributed.py
+++ b/test/distributed/test_dynamo_distributed.py
@@ -1,54 +1,62 @@
# Owner(s): ["module: dynamo"]
+import contextlib
import copy
import functools
-from io import StringIO
-from typing import List
import random
import unittest
+from contextlib import contextmanager
+from io import StringIO
+from typing import List
from unittest.mock import patch
-import contextlib
+
import numpy as np
import torch
-from torch._C import FileCheck
import torch._dynamo
-from torch._dynamo.backends.distributed import DDPOptimizer
+import torch._dynamo.logging
import torch._dynamo.test_case
-from contextlib import contextmanager
from torch import nn
+from torch._C import FileCheck
from torch._dynamo import config
-from torch._dynamo.utils import same
+from torch._dynamo.backends.distributed import DDPOptimizer
+from torch._dynamo.comptime import comptime
from torch._dynamo.testing import collect_results
-from torch.utils._triton import has_triton
-from torch.distributed.fsdp.wrap import transformer_auto_wrap_policy, lambda_auto_wrap_policy
+from torch._dynamo.utils import same
from torch._higher_order_ops.wrap import tag_activation_checkpoint
-from torch.nn.parallel import DistributedDataParallel as DDP
+from torch.distributed._functional_collectives import _maybe_wrap_tensor
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
+from torch.distributed.fsdp.wrap import (
+ lambda_auto_wrap_policy,
+ transformer_auto_wrap_policy,
+)
+from torch.nn.parallel import DistributedDataParallel as DDP
+from torch.testing._internal.common_cuda import (
+ PLATFORM_SUPPORTS_FLASH_ATTENTION,
+ PLATFORM_SUPPORTS_MEM_EFF_ATTENTION,
+)
from torch.testing._internal.common_distributed import (
- DynamoDistributedSingleProcTestCase,
+ _dynamo_dist_per_rank_init,
DynamoDistributedMultiProcTestCase,
+ DynamoDistributedSingleProcTestCase,
import_transformers_or_skip,
- skip_if_lt_x_gpu,
requires_nccl,
- _dynamo_dist_per_rank_init,
+ skip_if_lt_x_gpu,
)
from torch.testing._internal.common_utils import requires_cuda
-import torch._dynamo.logging
-from torch.testing._internal.common_cuda import (
- PLATFORM_SUPPORTS_FLASH_ATTENTION, PLATFORM_SUPPORTS_MEM_EFF_ATTENTION
-)
-from torch._dynamo.comptime import comptime
-from torch.distributed._functional_collectives import _maybe_wrap_tensor
+from torch.utils._triton import has_triton
+
def reset_rng_state():
torch.manual_seed(1337)
random.seed(1337)
np.random.seed(1337)
+
def init_weights(m):
if isinstance(m, nn.Linear):
nn.init.xavier_uniform_(m.weight)
m.bias.data.fill_(0.01)
+
class ToyModel(nn.Module):
def __init__(self, in_feat=10, hidden_feat=5000, out_feat=5, ctx_manager=None):
super().__init__()
@@ -67,15 +75,22 @@ class ToyModel(nn.Module):
else:
return self.net(inputs)
-def get_model(device, bsz=20, in_feat=10, hidden_feat=5000, out_feat=5, ctx_manager=None):
- m = ToyModel(in_feat=in_feat, hidden_feat=hidden_feat, out_feat=out_feat, ctx_manager=ctx_manager).to(device)
+
+def get_model(
+ device, bsz=20, in_feat=10, hidden_feat=5000, out_feat=5, ctx_manager=None
+):
+ m = ToyModel(
+ in_feat=in_feat,
+ hidden_feat=hidden_feat,
+ out_feat=out_feat,
+ ctx_manager=ctx_manager,
+ ).to(device)
m.apply(init_weights)
inputs = torch.rand(bsz, in_feat).to(device)
outputs = m(inputs)
return m, inputs, outputs
-
class ToyInnerModel(nn.Module):
def __init__(self):
super().__init__()
@@ -85,11 +100,14 @@ class ToyInnerModel(nn.Module):
def forward(self, inputs):
return self.layers(inputs)
+
class ToyOuterModel(nn.Module):
def __init__(self, device):
super().__init__()
self.layers = [ToyInnerModel().to(device) for _ in range(2)]
- self.layers = nn.Sequential(self.layers[0], nn.ReLU(), self.layers[1], nn.ReLU())
+ self.layers = nn.Sequential(
+ self.layers[0], nn.ReLU(), self.layers[1], nn.ReLU()
+ )
def forward(self, inputs):
return self.layers(inputs)
@@ -109,16 +127,17 @@ def find_first_node(gm, func):
return None
-def apply_fsdp_with_checkpointing(model, wrap_policy, checkpoint_policy, use_activation_checkpointing=True):
+def apply_fsdp_with_checkpointing(
+ model, wrap_policy, checkpoint_policy, use_activation_checkpointing=True
+):
from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import (
apply_activation_checkpointing,
checkpoint_wrapper,
CheckpointImpl,
)
+
model = FSDP(
- copy.deepcopy(model),
- auto_wrap_policy=wrap_policy,
- use_orig_params=True
+ copy.deepcopy(model), auto_wrap_policy=wrap_policy, use_orig_params=True
)
if use_activation_checkpointing:
checkpoint_wrapper_fn = functools.partial(
@@ -126,12 +145,13 @@ def apply_fsdp_with_checkpointing(model, wrap_policy, checkpoint_policy, use_act
checkpoint_impl=CheckpointImpl.NO_REENTRANT,
)
apply_activation_checkpointing(
- model, checkpoint_wrapper_fn=checkpoint_wrapper_fn, check_fn=checkpoint_policy,
+ model,
+ checkpoint_wrapper_fn=checkpoint_wrapper_fn,
+ check_fn=checkpoint_policy,
)
return model
-
def get_custom_model(device):
class MyCustomLinear(torch.nn.Module):
def __init__(self):
@@ -177,22 +197,26 @@ def get_custom_model(device):
correct_outputs = m(*inputs)
return m, inputs, correct_outputs
+
def get_hf_bert(rank):
# Note: use @import_transformers_or_skip on your test case if you use this
# in a multiprocessing test
try:
- from transformers import BertConfig, AutoModelForMaskedLM
+ from transformers import AutoModelForMaskedLM, BertConfig
except ImportError as e:
raise unittest.SkipTest("Unable to import transformers") from e
batch_size, max_length, config, device = 4, 512, BertConfig(), f"cuda:{rank}"
model = AutoModelForMaskedLM.from_config(config).to(device)
input_ids = torch.randint(0, config.vocab_size, (batch_size, max_length)).to(device)
- decoder_ids = torch.randint(0, config.vocab_size, (batch_size, max_length)).to(device)
- inputs = {'input_ids': input_ids, 'labels': decoder_ids}
+ decoder_ids = torch.randint(0, config.vocab_size, (batch_size, max_length)).to(
+ device
+ )
+ inputs = {"input_ids": input_ids, "labels": decoder_ids}
model.train()
return model, inputs
+
class CheckSplitsCompiler:
def __init__(self):
self.compiler_called = 0
@@ -226,6 +250,7 @@ class FakeDDP(nn.Module):
with self._inside_ddp_forward():
return self.module.forward(*inputs, **kwargs)
+
def run_hf_bert_ddp(self, model, inputs, backend):
reset_rng_state()
correct_outputs = model(**inputs)
@@ -239,10 +264,13 @@ def run_hf_bert_ddp(self, model, inputs, backend):
opt_loss.backward()
inputs_flat = [inputs[k] for k in inputs]
- correct_results = collect_results(model, correct_outputs.logits, correct_loss, inputs_flat)
+ correct_results = collect_results(
+ model, correct_outputs.logits, correct_loss, inputs_flat
+ )
opt_results = collect_results(opt_model, opt_outputs.logits, opt_loss, inputs_flat)
self.assertTrue(same(correct_results, opt_results))
+
class TestFakeDistributedSingleProc(torch._dynamo.test_case.TestCase):
@unittest.skipIf(not has_triton(), "Inductor+gpu needs triton and recent GPU arch")
@patch.object(config, "optimize_ddp", True)
@@ -270,7 +298,6 @@ class TestFakeDistributedSingleProc(torch._dynamo.test_case.TestCase):
opt_model = torch._dynamo.optimize("aot_eager")(model)
opt_model()
-
@patch.object(config, "optimize_ddp", True)
def test_symbol_splitting(self):
class Model(nn.Module):
@@ -291,15 +318,23 @@ class TestFakeDistributedSingleProc(torch._dynamo.test_case.TestCase):
opt_model = torch.compile(dynamic=True)(model)
opt_model(torch.randn(20, 512))
-
@patch.object(config, "optimize_ddp", True)
def test_call_method_forward(self):
class Model(nn.Module):
- def __init__(self,):
+ def __init__(
+ self,
+ ):
super().__init__()
layers = []
for l in range(2):
- layer = nn.ModuleList([nn.LayerNorm(96), nn.MultiheadAttention(embed_dim=96, num_heads=4, batch_first=True)])
+ layer = nn.ModuleList(
+ [
+ nn.LayerNorm(96),
+ nn.MultiheadAttention(
+ embed_dim=96, num_heads=4, batch_first=True
+ ),
+ ]
+ )
layers.append(layer)
self.layers = nn.ModuleList(layers)
@@ -329,6 +364,7 @@ class TestMultiProc(DynamoDistributedMultiProcTestCase):
Prefer MultiThreadedTestCase for most tests. Perhaps use this one
sparingly for integration tests.
"""
+
@skip_if_lt_x_gpu(2)
@patch.object(config, "optimize_ddp", False)
def test_ddp_baseline_aot_eager_multiprocess(self):
@@ -384,11 +420,10 @@ class TestMultiProc(DynamoDistributedMultiProcTestCase):
@unittest.skipIf(not has_triton(), "Inductor+gpu needs triton and recent GPU arch")
@patch.object(config, "optimize_ddp", False)
def test_ddp_activation_checkpointing(self):
-
from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import (
- CheckpointImpl,
apply_activation_checkpointing,
checkpoint_wrapper,
+ CheckpointImpl,
)
class MyModel(torch.nn.Module):
@@ -410,8 +445,12 @@ class TestMultiProc(DynamoDistributedMultiProcTestCase):
checkpoint_wrapper,
checkpoint_impl=CheckpointImpl.NO_REENTRANT,
)
- check_fn = lambda submodule: isinstance(submodule, torch.nn.Linear) # noqa: E731
- apply_activation_checkpointing(model, checkpoint_wrapper_fn=non_reentrant_wrapper, check_fn=check_fn)
+ check_fn = lambda submodule: isinstance( # noqa: E731
+ submodule, torch.nn.Linear
+ )
+ apply_activation_checkpointing(
+ model, checkpoint_wrapper_fn=non_reentrant_wrapper, check_fn=check_fn
+ )
model = DDP(model)
x = torch.randn(10, 64).cuda()
@@ -436,9 +475,9 @@ class TestMultiProc(DynamoDistributedMultiProcTestCase):
fsdp_m = FSDP(
m,
auto_wrap_policy=functools.partial(
- transformer_auto_wrap_policy, transformer_layer_cls=(nn.Linear, )
+ transformer_auto_wrap_policy, transformer_layer_cls=(nn.Linear,)
),
- use_orig_params=True
+ use_orig_params=True,
)
fsdp_m = torch._dynamo.optimize("aot_eager")(fsdp_m)
outputs = fsdp_m(inputs)
@@ -460,20 +499,21 @@ class TestMultiProc(DynamoDistributedMultiProcTestCase):
fsdp_m = FSDP(
m,
auto_wrap_policy=functools.partial(
- transformer_auto_wrap_policy, transformer_layer_cls=(nn.Linear, )
+ transformer_auto_wrap_policy, transformer_layer_cls=(nn.Linear,)
),
- use_orig_params=True
+ use_orig_params=True,
)
fsdp_m = torch._dynamo.optimize("inductor")(fsdp_m)
outputs = fsdp_m(inputs)
self.assertTrue(same(correct_outputs, outputs))
-
@skip_if_lt_x_gpu(1)
@unittest.skipIf(not has_triton(), "Inductor+gpu needs triton and recent GPU arch")
def test_fsdp_activation_checkpointing(self):
with _dynamo_dist_per_rank_init(self.rank, self.world_size):
- model, inputs = get_toy_model_for_activation_checkpointing(f"cuda:{self.rank}")
+ model, inputs = get_toy_model_for_activation_checkpointing(
+ f"cuda:{self.rank}"
+ )
is_inner = lambda module: isinstance(module, ToyInnerModel) # noqa: E731
wrap_policy = functools.partial(lambda_auto_wrap_policy, lambda_fn=is_inner)
model = apply_fsdp_with_checkpointing(model, wrap_policy, is_inner)
@@ -484,9 +524,9 @@ class TestMultiProc(DynamoDistributedMultiProcTestCase):
self.assertTrue(same(correct_outputs, outputs))
# Each FSDP module is a separate graph
self.assertEqual(cnt.frame_count, 2)
- self.assertTrue(find_first_node(cnt.graphs[0], tag_activation_checkpoint) is not None)
-
-
+ self.assertTrue(
+ find_first_node(cnt.graphs[0], tag_activation_checkpoint) is not None
+ )
@import_transformers_or_skip()
@unittest.skipIf(not has_triton(), "Inductor+gpu needs triton and recent GPU arch")
@@ -495,24 +535,18 @@ class TestMultiProc(DynamoDistributedMultiProcTestCase):
@patch.object(torch._inductor.config, "fallback_random", True)
@unittest.skipIf(
PLATFORM_SUPPORTS_FLASH_ATTENTION or PLATFORM_SUPPORTS_MEM_EFF_ATTENTION,
- "Inaccurate results with fused SDPA kernels"
+ "Inaccurate results with fused SDPA kernels",
)
def test_hf_bert_fsdp(self):
-
def apply_fsdp(model, wrap_policy):
model = FSDP(
- copy.deepcopy(model),
- auto_wrap_policy=wrap_policy,
- use_orig_params=True
+ copy.deepcopy(model), auto_wrap_policy=wrap_policy, use_orig_params=True
)
return model
with _dynamo_dist_per_rank_init(self.rank, self.world_size):
- for (wrap_policy, test_instance) in (
- (
- None,
- "FSDP without recursive wrapping"
- ),
+ for wrap_policy, test_instance in (
+ (None, "FSDP without recursive wrapping"),
):
print(f"Running hf_bert test for {test_instance}")
model, inputs = get_hf_bert(self.rank)
@@ -530,12 +564,14 @@ class TestMultiProc(DynamoDistributedMultiProcTestCase):
opt_loss.backward()
inputs_flat = [inputs[k] for k in inputs]
- correct_results = collect_results(eager_model, correct_outputs.logits, correct_loss, inputs_flat)
- opt_results = collect_results(opt_model, opt_outputs.logits, opt_loss, inputs_flat)
+ correct_results = collect_results(
+ eager_model, correct_outputs.logits, correct_loss, inputs_flat
+ )
+ opt_results = collect_results(
+ opt_model, opt_outputs.logits, opt_loss, inputs_flat
+ )
self.assertTrue(same(correct_results, opt_results))
-
-
@import_transformers_or_skip()
@unittest.skipIf(not has_triton(), "Inductor+gpu needs triton and recent GPU arch")
# TODO(whc) Investigate why cudagraphs breaks inductor+fsdp for hf_bert
@@ -543,20 +579,27 @@ class TestMultiProc(DynamoDistributedMultiProcTestCase):
@patch.object(torch._inductor.config, "fallback_random", True)
def test_hf_bert_fsdp_activation_checkpointing(self):
from transformers.models.bert.modeling_bert import BertLayer
+
with _dynamo_dist_per_rank_init(self.rank, self.world_size):
- for (wrap_policy, test_instance) in (
+ for wrap_policy, test_instance in (
(
functools.partial(
- transformer_auto_wrap_policy, transformer_layer_cls=(BertLayer, )
+ transformer_auto_wrap_policy, transformer_layer_cls=(BertLayer,)
),
- "FSDP with recursive wrapping BertLayer instances"
+ "FSDP with recursive wrapping BertLayer instances",
),
):
- print(f"Running hf_bert_activation_checkpointing test for {test_instance}")
+ print(
+ f"Running hf_bert_activation_checkpointing test for {test_instance}"
+ )
model, inputs = get_hf_bert(self.rank)
- check_fn = lambda submodule: isinstance(submodule, BertLayer) # noqa: E731
+ check_fn = lambda submodule: isinstance( # noqa: E731
+ submodule, BertLayer
+ )
reset_rng_state()
- eager_model = apply_fsdp_with_checkpointing(model, wrap_policy, check_fn)
+ eager_model = apply_fsdp_with_checkpointing(
+ model, wrap_policy, check_fn
+ )
correct_outputs = eager_model(**inputs)
correct_loss = correct_outputs.loss
correct_loss.backward()
@@ -569,8 +612,12 @@ class TestMultiProc(DynamoDistributedMultiProcTestCase):
opt_loss.backward()
inputs_flat = [inputs[k] for k in inputs]
- correct_results = collect_results(eager_model, correct_outputs.logits, correct_loss, inputs_flat)
- opt_results = collect_results(opt_model, opt_outputs.logits, opt_loss, inputs_flat)
+ correct_results = collect_results(
+ eager_model, correct_outputs.logits, correct_loss, inputs_flat
+ )
+ opt_results = collect_results(
+ opt_model, opt_outputs.logits, opt_loss, inputs_flat
+ )
self.assertTrue(same(correct_results, opt_results))
@@ -584,8 +631,15 @@ class TestSingleProc(DynamoDistributedSingleProcTestCase):
Use TestMultiProc for things that really need to run on multiple nodes
"""
- def get_model(self, bsz=20, in_feat=10, hidden_feat=5000, out_feat=5, ctx_manager=None):
- m = ToyModel(in_feat=in_feat, hidden_feat=hidden_feat, out_feat=out_feat, ctx_manager=ctx_manager).to(self.device)
+ def get_model(
+ self, bsz=20, in_feat=10, hidden_feat=5000, out_feat=5, ctx_manager=None
+ ):
+ m = ToyModel(
+ in_feat=in_feat,
+ hidden_feat=hidden_feat,
+ out_feat=out_feat,
+ ctx_manager=ctx_manager,
+ ).to(self.device)
m.apply(init_weights)
inputs = torch.rand(bsz, in_feat).to(self.device)
outputs = m(inputs)
@@ -655,19 +709,20 @@ class TestSingleProc(DynamoDistributedSingleProcTestCase):
]:
for ctx_manager, output_test in [
(
- lambda: torch.autocast(torch.device(self.device).type, torch.float16),
+ lambda: torch.autocast(
+ torch.device(self.device).type, torch.float16
+ ),
lambda out: self.assertEqual(out.dtype, torch.float16),
),
- (
- torch.enable_grad,
- lambda out: self.assertTrue(out.requires_grad)
- ),
- (
- torch.no_grad,
- lambda out: self.assertTrue(not out.requires_grad)
- ),
+ (torch.enable_grad, lambda out: self.assertTrue(out.requires_grad)),
+ (torch.no_grad, lambda out: self.assertTrue(not out.requires_grad)),
]:
- m, inputs, correct_outputs = self.get_model(out_feat=1000, hidden_feat=1000, in_feat=1000, ctx_manager=ctx_manager)
+ m, inputs, correct_outputs = self.get_model(
+ out_feat=1000,
+ hidden_feat=1000,
+ in_feat=1000,
+ ctx_manager=ctx_manager,
+ )
# inp - 1000 * 1000 matrix of float32 (4 bytes) = 4MB
# hidden - 1000 * 1000 matrix of float32 (4 bytes) = 4MB
bucket_cap_mb = 3.5 # 4MB
@@ -675,7 +730,9 @@ class TestSingleProc(DynamoDistributedSingleProcTestCase):
compiler = get_compiler()
- @torch._dynamo.optimize(compiler.compile_fn if compiler else "aot_eager")
+ @torch._dynamo.optimize(
+ compiler.compile_fn if compiler else "aot_eager"
+ )
def opt_fn(inputs):
return ddp_m(inputs)
@@ -711,7 +768,9 @@ class TestSingleProc(DynamoDistributedSingleProcTestCase):
opt_outputs = opt_fn(inputs)
self.assertTrue(same(correct_outputs, opt_outputs))
- @torch._inductor.config.patch({"layout_optimization": True, "keep_output_stride": False})
+ @torch._inductor.config.patch(
+ {"layout_optimization": True, "keep_output_stride": False}
+ )
@patch.object(config, "optimize_ddp", True)
def _test_graph_split_inductor_layout_optimizations_impl(self, context):
assert config.optimize_ddp
@@ -722,10 +781,22 @@ class TestSingleProc(DynamoDistributedSingleProcTestCase):
def __init__(self):
super().__init__()
self.net = nn.Sequential(
- *[nn.Conv2d(channel_dim, channel_dim, 1, stride=1, bias=False), nn.ReLU()]
- + [nn.Conv2d(channel_dim, channel_dim, 1, stride=1, bias=False), nn.ReLU()]
- + [nn.Conv2d(channel_dim, channel_dim, 1, stride=1, bias=False), nn.ReLU()]
- + [nn.Conv2d(channel_dim, channel_dim, 1, stride=1, bias=False), nn.ReLU()]
+ *[
+ nn.Conv2d(channel_dim, channel_dim, 1, stride=1, bias=False),
+ nn.ReLU(),
+ ]
+ + [
+ nn.Conv2d(channel_dim, channel_dim, 1, stride=1, bias=False),
+ nn.ReLU(),
+ ]
+ + [
+ nn.Conv2d(channel_dim, channel_dim, 1, stride=1, bias=False),
+ nn.ReLU(),
+ ]
+ + [
+ nn.Conv2d(channel_dim, channel_dim, 1, stride=1, bias=False),
+ nn.ReLU(),
+ ]
)
def forward(self, inputs):
@@ -751,7 +822,9 @@ class TestSingleProc(DynamoDistributedSingleProcTestCase):
@unittest.skipIf(not has_triton(), "Inductor+gpu needs triton and recent GPU arch")
def test_graph_split_inductor_layout_optimizations_training(self):
- self._test_graph_split_inductor_layout_optimizations_impl(contextlib.nullcontext)
+ self._test_graph_split_inductor_layout_optimizations_impl(
+ contextlib.nullcontext
+ )
@unittest.skipIf(not has_triton(), "Inductor+gpu needs triton and recent GPU arch")
def test_graph_split_inductor_layout_optimizations_inference(self):
@@ -874,14 +947,13 @@ class TestSingleProc(DynamoDistributedSingleProcTestCase):
DDP._set_params_and_buffers_to_ignore_for_model(m, parameters_to_ignore)
ddp_m = DDP(m, device_ids=self.device_ids, bucket_cap_mb=25)
parameter_ids_to_ignore = [
- id(ddp_m.module.get_parameter(p))
- for p in ddp_m.parameters_to_ignore
+ id(ddp_m.module.get_parameter(p)) for p in ddp_m.parameters_to_ignore
]
check_splits_compiler = CheckSplitsCompiler()
ddp_optimizer = DDPOptimizer(
bucket_bytes_cap=ddp_m.bucket_bytes_cap,
- backend_compile_fn=check_splits_compiler.compile_fn
+ backend_compile_fn=check_splits_compiler.compile_fn,
)
@torch._dynamo.optimize(ddp_optimizer.compile_fn)
@@ -935,17 +1007,21 @@ class TestSingleProc(DynamoDistributedSingleProcTestCase):
with self.assertRaisesRegex(
torch._dynamo.exc.BackendCompilerFailed,
- "DDPOptimizer backend: Found a higher order op in the graph"
+ "DDPOptimizer backend: Found a higher order op in the graph",
):
torch.compile(mod, backend=cnt)(*args)
-
def test_fsdp_orig_params_assert(self):
# Test with basic FSDP wrapping (outer wrap around whole model)
m, inputs, correct_outputs = get_model(f"cuda:{self.rank}")
fsdp_m = FSDP(m, use_orig_params=False)
fsdp_m = torch._dynamo.optimize()(fsdp_m)
- self.assertRaisesRegex(AssertionError, "Dynamo only supports FSDP with use_orig_params=True", fsdp_m, inputs)
+ self.assertRaisesRegex(
+ AssertionError,
+ "Dynamo only supports FSDP with use_orig_params=True",
+ fsdp_m,
+ inputs,
+ )
def test_fsdp_skip_guards(self):
"""
@@ -965,7 +1041,7 @@ class TestSingleProc(DynamoDistributedSingleProcTestCase):
for skip_guards, expected_guard_source in (
(True, "local_fsdp_module"),
- (False, "local")
+ (False, "local"),
):
torch._dynamo.reset()
@@ -987,8 +1063,13 @@ class TestSingleProc(DynamoDistributedSingleProcTestCase):
ctx.print_guards(file=GUARDS_FILE)
return out
+
device = f"cuda:{self.rank}"
- m = ToyModel(in_feat=10, hidden_feat=5000, out_feat=5,).to(device)
+ m = ToyModel(
+ in_feat=10,
+ hidden_feat=5000,
+ out_feat=5,
+ ).to(device)
inputs = torch.rand(20, 10).to(device)
m.apply(init_weights)
correct_outputs = m(inputs)
@@ -999,14 +1080,17 @@ class TestSingleProc(DynamoDistributedSingleProcTestCase):
outputs = opt_m(inputs)
# far from an exhaustive check of all the expected guards, just check a couple of them.
- FileCheck() \
- .check("""local "L['self']" TYPE_MATCH""") \
- .check("""local "L['self']" ID_MATCH""") \
- .check(f"""{expected_guard_source} "L['self'].net" TYPE_MATCH""") \
- .check(f"""{expected_guard_source} "L['self'].net" ID_MATCH""") \
- .check(f"""{expected_guard_source} "L['self'].net[0]" TYPE_MATCH""") \
- .check(f"""{expected_guard_source} "L['self'].net[0]" ID_MATCH""") \
- .run(GUARDS_FILE.getvalue())
+ FileCheck().check("""local "L['self']" TYPE_MATCH""").check(
+ """local "L['self']" ID_MATCH"""
+ ).check(f"""{expected_guard_source} "L['self'].net" TYPE_MATCH""").check(
+ f"""{expected_guard_source} "L['self'].net" ID_MATCH"""
+ ).check(
+ f"""{expected_guard_source} "L['self'].net[0]" TYPE_MATCH"""
+ ).check(
+ f"""{expected_guard_source} "L['self'].net[0]" ID_MATCH"""
+ ).run(
+ GUARDS_FILE.getvalue()
+ )
self.assertTrue(same(correct_outputs, outputs))
def test_fsdp_skip_register_attr_or_module(self):
@@ -1017,6 +1101,7 @@ class TestSingleProc(DynamoDistributedSingleProcTestCase):
before calling `register_attr_or_module`
in variables/builder.py
"""
+
class ToyModel(nn.Module):
def __init__(self, in_feat=10, hidden_feat=5000, out_feat=5):
super().__init__()
@@ -1032,7 +1117,11 @@ class TestSingleProc(DynamoDistributedSingleProcTestCase):
torch._dynamo.reset()
device = f"cuda:{self.rank}"
- m = ToyModel(in_feat=10, hidden_feat=5000, out_feat=5,).to(device)
+ m = ToyModel(
+ in_feat=10,
+ hidden_feat=5000,
+ out_feat=5,
+ ).to(device)
inputs = torch.rand(20, 10).to(device)
m.apply(init_weights)
correct_outputs = m(inputs)
@@ -1045,9 +1134,12 @@ class TestSingleProc(DynamoDistributedSingleProcTestCase):
"l__self___net_0_weight",
"l__self___net_0_bias",
"l__self___net_2_weight",
- "l__self___net_2_bias"
+ "l__self___net_2_bias",
]:
- self.assertFalse(name in node.name, f"FSDP module {name} should not be registered as attributes")
+ self.assertFalse(
+ name in node.name,
+ f"FSDP module {name} should not be registered as attributes",
+ )
return gm
opt_m = torch._dynamo.optimize(backend=debug_compiler)(fsdp_m)
@@ -1061,6 +1153,7 @@ class TestSingleProc(DynamoDistributedSingleProcTestCase):
source are de-duplicated, meaning that they are each only passed once
as a graph input.
"""
+
class DuplicateModule(nn.Module):
def __init__(self) -> None:
super().__init__()
@@ -1091,6 +1184,7 @@ class TestSingleProc(DynamoDistributedSingleProcTestCase):
``a is b``, where ``a`` and ``b`` are certainly not the same. We check
this by checking for per-invocation recompiles.
"""
+
class BufModule(nn.Module):
def __init__(self) -> None:
super().__init__()
@@ -1134,6 +1228,7 @@ class TestSingleProc(DynamoDistributedSingleProcTestCase):
correctly both when the staticmethod is invoked from the class and from
the object itself.
"""
+
class ModuleWithStaticMethod(nn.Module):
def __init__(self, use_self: bool):
super().__init__()
@@ -1187,4 +1282,5 @@ class TestSingleProc(DynamoDistributedSingleProcTestCase):
if __name__ == "__main__":
from torch._dynamo.test_case import run_tests
+
run_tests()
diff --git a/test/distributed/test_fake_pg.py b/test/distributed/test_fake_pg.py
index 195034e06e..238b866601 100644
--- a/test/distributed/test_fake_pg.py
+++ b/test/distributed/test_fake_pg.py
@@ -1,28 +1,24 @@
# Owner(s): ["oncall: distributed"]
import sys
+import unittest
+
import torch
import torch.distributed as dist
-import torch.nn as nn
-import unittest
import torch.distributed._functional_collectives as funcol
-from torch.fx.experimental.proxy_tensor import make_fx
-from torch.testing._internal.distributed.fake_pg import FakeStore
-from torch.testing import FileCheck
-from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
+import torch.nn as nn
from torch.distributed._tensor import DeviceMesh, init_device_mesh, Shard
-from torch.testing._internal.common_utils import (
- TestCase,
- run_tests,
-)
+from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
from torch.distributed.tensor.parallel import (
ColwiseParallel,
parallelize_module,
RowwiseParallel,
)
-from torch.testing._internal.distributed._tensor.common_dtensor import (
- MLPModule,
-)
+from torch.fx.experimental.proxy_tensor import make_fx
+from torch.testing import FileCheck
+from torch.testing._internal.common_utils import run_tests, TestCase
+from torch.testing._internal.distributed._tensor.common_dtensor import MLPModule
+from torch.testing._internal.distributed.fake_pg import FakeStore
if not dist.is_available():
print("Distributed not available, skipping tests", file=sys.stderr)
@@ -30,6 +26,7 @@ if not dist.is_available():
HAS_CUDA = torch.cuda.is_available()
+
class TestFakePG(TestCase):
def tearDown(self):
super().tearDown()
@@ -37,9 +34,7 @@ class TestFakePG(TestCase):
def test_all_reduce(self):
store = FakeStore()
- dist.init_process_group(
- backend="fake", rank=1, world_size=2, store=store
- )
+ dist.init_process_group(backend="fake", rank=1, world_size=2, store=store)
output = torch.ones(3, 3) * dist.get_rank()
dist.all_reduce(output)
@@ -47,9 +42,7 @@ class TestFakePG(TestCase):
def test_allgather(self):
store = FakeStore()
- dist.init_process_group(
- backend="fake", rank=1, world_size=2, store=store
- )
+ dist.init_process_group(backend="fake", rank=1, world_size=2, store=store)
input_tensor = torch.ones(3, 3) * dist.get_rank()
output_tensors = [torch.empty_like(input_tensor) for _ in range(2)]
@@ -59,9 +52,7 @@ class TestFakePG(TestCase):
def test_reduce_scatter(self):
store = FakeStore()
- dist.init_process_group(
- backend="fake", rank=1, world_size=2, store=store
- )
+ dist.init_process_group(backend="fake", rank=1, world_size=2, store=store)
to_reduce_scatter = [torch.ones(3, 3) * rank for rank in range(2)]
output_tensor = torch.empty(3, 3)
@@ -69,24 +60,20 @@ class TestFakePG(TestCase):
dist.reduce_scatter(output_tensor, to_reduce_scatter)
self.assertEqual(tuple(output_tensor.shape), (3, 3))
- @unittest.skipIf(not HAS_CUDA, 'No CUDA')
+ @unittest.skipIf(not HAS_CUDA, "No CUDA")
def test_construct_fsdp(self):
store = FakeStore()
- dist.init_process_group(
- backend="fake", rank=0, world_size=2, store=store
- )
- FSDP(nn.Linear(2, 3, device='cuda'))
+ dist.init_process_group(backend="fake", rank=0, world_size=2, store=store)
+ FSDP(nn.Linear(2, 3, device="cuda"))
- @unittest.skipIf(not HAS_CUDA, 'No CUDA')
+ @unittest.skipIf(not HAS_CUDA, "No CUDA")
def test_fsdp_fake_e2e(self):
store = dist.HashStore()
- dist.init_process_group(
- backend="fake", rank=0, world_size=2, store=store
- )
+ dist.init_process_group(backend="fake", rank=0, world_size=2, store=store)
my_module = nn.Sequential(
- nn.Linear(2, 3, device='cuda'),
+ nn.Linear(2, 3, device="cuda"),
nn.ReLU(),
- nn.Linear(3, 2, device='cuda'),
+ nn.Linear(3, 2, device="cuda"),
)
sharded_module = FSDP(my_module, use_orig_params=True)
optim = torch.optim.Adam(sharded_module.parameters(), lr=0.0001)
@@ -96,19 +83,17 @@ class TestFakePG(TestCase):
loss.backward()
optim.step()
- @unittest.skipIf(not HAS_CUDA, 'No CUDA')
+ @unittest.skipIf(not HAS_CUDA, "No CUDA")
def test_fake_pg_tracing(self):
store = dist.HashStore()
- dist.init_process_group(
- backend="fake", rank=0, world_size=2, store=store
- )
+ dist.init_process_group(backend="fake", rank=0, world_size=2, store=store)
default_pg = dist.distributed_c10d._get_default_group()
def allgather_fn(tensor):
return funcol.all_gather_tensor(tensor, 0, default_pg)
- gm = make_fx(allgather_fn)(torch.randn(2, 2, device='cuda'))
+ gm = make_fx(allgather_fn)(torch.randn(2, 2, device="cuda"))
FileCheck().check("all_gather").check("wait_tensor").run(str(gm.graph))
def test_broadcast(self):
@@ -184,15 +169,13 @@ class TestFakePG(TestCase):
tp_size = 2
store = dist.HashStore()
- dist.init_process_group(backend="fake", rank=0, world_size=world_size, store=store)
-
- device_mesh = DeviceMesh(
- "cuda", torch.arange(0, world_size).view(-1, tp_size)
+ dist.init_process_group(
+ backend="fake", rank=0, world_size=world_size, store=store
)
+
+ device_mesh = DeviceMesh("cuda", torch.arange(0, world_size).view(-1, tp_size))
device_mesh = init_device_mesh(
- "cuda",
- (world_size // tp_size, tp_size),
- mesh_dim_names=["dp", "tp"]
+ "cuda", (world_size // tp_size, tp_size), mesh_dim_names=["dp", "tp"]
)
sequence_parallelize_plan = {
@@ -204,7 +187,6 @@ class TestFakePG(TestCase):
"net2": RowwiseParallel(),
}
for parallel_plan in [sequence_parallelize_plan, pairwise_parallelize_plan]:
-
my_module = parallelize_module(
MLPModule(device="cuda"),
device_mesh["tp"],
@@ -212,9 +194,7 @@ class TestFakePG(TestCase):
)
sharded_module = FSDP(
- my_module,
- use_orig_params=True,
- device_mesh=device_mesh["dp"]
+ my_module, use_orig_params=True, device_mesh=device_mesh["dp"]
)
optim = torch.optim.Adam(sharded_module.parameters(), lr=0.0001)
diff --git a/test/distributed/test_functional_api.py b/test/distributed/test_functional_api.py
index 491da6551f..90f750d400 100644
--- a/test/distributed/test_functional_api.py
+++ b/test/distributed/test_functional_api.py
@@ -12,10 +12,10 @@ import torch.distributed._tensor as dt
import torch.distributed.distributed_c10d as c10d
from functorch import make_fx
+from torch._inductor.utils import run_and_get_code
from torch.testing import FileCheck
from torch.testing._internal.distributed.fake_pg import FakeStore
from torch.utils._triton import has_triton
-from torch._inductor.utils import run_and_get_code
if not dist.is_available():
print("Distributed not available, skipping tests", file=sys.stderr)
@@ -24,8 +24,8 @@ if not dist.is_available():
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
MultiThreadedTestCase,
- TEST_SKIPS,
requires_nccl,
+ TEST_SKIPS,
)
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
@@ -401,7 +401,6 @@ class TestMakeFx(MultiThreadedTestCase):
self.assertFalse(torch.fx._symbolic_trace.is_fx_tracing())
def test_all_reduce_tracing(self):
-
def allred(input):
return ft_c.all_reduce(input, "sum", group=dist.group.WORLD) + 1
@@ -585,7 +584,6 @@ class TestCollectivesWithNCCL(MultiProcessTestCase):
class TestNCCLCollectivesWithWorldSize4(TestCollectivesWithNCCL):
-
@property
def world_size(self):
return 4
@@ -608,23 +606,17 @@ class TestNCCLCollectivesWithWorldSize4(TestCollectivesWithNCCL):
# rank0: [0., 1.], rank1: [2., 3.]
send_tensor = torch.arange(2, dtype=torch.float32, device=device) + 2 * rank
- recvd_tensor = ft_c.permute_tensor(
- send_tensor,
- [1, 0],
- group=mesh
- )
+ recvd_tensor = ft_c.permute_tensor(send_tensor, [1, 0], group=mesh)
# rank0: [2., 3.], rank1: [0., 1.]
- expected = torch.arange(
- 2,
- dtype=torch.float32,
- device=device
- ) + 2 * ((rank - 1 + 2) % 2)
+ expected = torch.arange(2, dtype=torch.float32, device=device) + 2 * (
+ (rank - 1 + 2) % 2
+ )
self.assertEqual(
recvd_tensor,
expected,
msg=f"Expected {expected} on {self.rank=} (local_rank={rank}), "
- f"but received {recvd_tensor} instead."
+ f"but received {recvd_tensor} instead.",
)
@@ -648,12 +640,7 @@ class TestFunctionalAutograd(MultiThreadedTestCase):
sizes = [1] * world_size
t = t * 10
assert t.requires_grad
- out = ft_c.all_to_all_single_autograd(
- t,
- sizes,
- sizes,
- group
- )
+ out = ft_c.all_to_all_single_autograd(t, sizes, sizes, group)
out = out + 2
return out
@@ -678,12 +665,7 @@ class TestFunctionalAutograd(MultiThreadedTestCase):
sizes = [1] * world_size
t = t * 10
assert t.requires_grad
- out = ft_c.all_to_all_single_autograd(
- t,
- sizes,
- sizes,
- group
- )
+ out = ft_c.all_to_all_single_autograd(t, sizes, sizes, group)
out = out + 2
return out.sum()
@@ -697,9 +679,9 @@ class TestFunctionalAutograd(MultiThreadedTestCase):
for code in codes:
FileCheck().check_count(
"_c10d_functional.all_to_all_single.default", 1, exactly=True
- ).check_count(
- "_c10d_functional.wait_tensor.default", 1, exactly=True
- ).run(code)
+ ).check_count("_c10d_functional.wait_tensor.default", 1, exactly=True).run(
+ code
+ )
self.assertIsNotNone(t.grad)
diff --git a/test/distributed/test_inductor_collectives.py b/test/distributed/test_inductor_collectives.py
index 5662ae964c..84db55d055 100644
--- a/test/distributed/test_inductor_collectives.py
+++ b/test/distributed/test_inductor_collectives.py
@@ -2,20 +2,25 @@
import functools
import unittest
from unittest.mock import patch
+
import torch
-from torch._C import FileCheck
-# for some reason importing functional collectives after dynamo breaks collectives handling!
-import torch.distributed._functional_collectives as _functional_collectives
import torch._dynamo
+import torch._dynamo.logging
import torch._dynamo.test_case
-from torch._dynamo.utils import same
+
+# for some reason importing functional collectives after dynamo breaks collectives handling!
+import torch.distributed._functional_collectives as _functional_collectives
+from torch._C import FileCheck
from torch._dynamo.testing import CompileCounter
+from torch._dynamo.utils import same
+from torch._inductor.compile_fx import compile_fx as inductor_compile_fx
+from torch._inductor.utils import run_and_get_triton_code
from torch.distributed.distributed_c10d import GroupMember
from torch.fx.experimental.proxy_tensor import make_fx
from torch.testing._internal.common_distributed import (
- DynamoDistributedSingleProcTestCase,
- DynamoDistributedMultiProcTestCase,
_dynamo_dist_per_rank_init,
+ DynamoDistributedMultiProcTestCase,
+ DynamoDistributedSingleProcTestCase,
requires_nccl,
skip_if_lt_x_gpu,
)
@@ -24,10 +29,8 @@ from torch.testing._internal.common_utils import (
parametrize,
requires_cuda,
)
-from torch._inductor.compile_fx import compile_fx as inductor_compile_fx
from torch.utils._triton import has_triton
-from torch._inductor.utils import run_and_get_triton_code
-import torch._dynamo.logging
+
def _tolist_with_constrain_as_size(tensor):
lst = tensor.tolist()
@@ -35,11 +38,13 @@ def _tolist_with_constrain_as_size(tensor):
torch._constrain_as_size(elem)
return lst
+
@requires_nccl()
class TestCollectivesMultiProc(DynamoDistributedMultiProcTestCase):
"""
Run correctness checks in multi-proc runner, mark with minimum # GPUs to run under
"""
+
def get_world_trs(self):
return {
"tag": "",
@@ -63,7 +68,9 @@ class TestCollectivesMultiProc(DynamoDistributedMultiProcTestCase):
"""
def example(tensor, src, *, tag, ranks, group_size):
- res = torch.ops.c10d_functional.broadcast(tensor, src, tag, ranks, group_size)
+ res = torch.ops.c10d_functional.broadcast(
+ tensor, src, tag, ranks, group_size
+ )
res = torch.ops.c10d_functional.wait_tensor(res)
return res
@@ -72,16 +79,12 @@ class TestCollectivesMultiProc(DynamoDistributedMultiProcTestCase):
return inductor_compile_fx(graph, example_inputs)
with _dynamo_dist_per_rank_init(self.rank, self.world_size):
-
example = functools.partial(
example,
**self.get_world_trs(),
)
t = torch.randn(4, 4, device="cuda")
- inputs = (
- t if self.rank == 0 else torch.zeros(4, 4, device="cuda"),
- 0
- )
+ inputs = (t if self.rank == 0 else torch.zeros(4, 4, device="cuda"), 0)
eager_out = example(*inputs)
self.assertTrue(same(t, eager_out))
@@ -106,14 +109,13 @@ class TestCollectivesMultiProc(DynamoDistributedMultiProcTestCase):
g = torch.matmul(e, f)
ar = torch.ops.c10d_functional.wait_tensor(ar)
out = torch.add(ar, g.repeat(2, 1))
- return (out, )
+ return (out,)
def compile(func, example_inputs):
graph = make_fx(func)(*example_inputs)
return inductor_compile_fx(graph, example_inputs)
with _dynamo_dist_per_rank_init(self.rank, self.world_size):
-
matmul_cat_col = functools.partial(
matmul_cat_col,
**self.get_world_trs(),
@@ -136,7 +138,6 @@ class TestCollectivesMultiProc(DynamoDistributedMultiProcTestCase):
# TODO: somehow inductor bg compile threads are causing hangs at exit with distributed work dtor
@patch.object(torch._inductor.config, "compile_threads", 1)
def test_eager_allreduce_inductor_wait(self):
-
def eager_func(a, b, c, d, *, tag, ranks, group_size):
x = torch.matmul(a, b)
y = torch.matmul(c, d)
@@ -148,14 +149,13 @@ class TestCollectivesMultiProc(DynamoDistributedMultiProcTestCase):
g = torch.matmul(e, f)
ar = torch.ops.c10d_functional.wait_tensor(ar)
out = torch.add(ar, g.repeat(2, 1))
- return (out, )
+ return (out,)
def compile(func, example_inputs):
graph = make_fx(func)(*example_inputs)
return inductor_compile_fx(graph, example_inputs)
with _dynamo_dist_per_rank_init(self.rank, self.world_size):
-
eager_func = functools.partial(
eager_func,
**self.get_world_trs(),
@@ -164,8 +164,12 @@ class TestCollectivesMultiProc(DynamoDistributedMultiProcTestCase):
inductor_inputs = (torch.ones(4, 4, device="cuda") + self.rank,) * 2
eager_out = inductor_func(eager_func(*eager_inputs), *inductor_inputs)
- compiled_inductor_func = compile(inductor_func, [eager_func(*eager_inputs)] + list(inductor_inputs))
- inductor_out = compiled_inductor_func(eager_func(*eager_inputs), *inductor_inputs)
+ compiled_inductor_func = compile(
+ inductor_func, [eager_func(*eager_inputs)] + list(inductor_inputs)
+ )
+ inductor_out = compiled_inductor_func(
+ eager_func(*eager_inputs), *inductor_inputs
+ )
print(f"eager_out, {eager_out}")
print(f"inductor_out, {inductor_out}")
self.assertTrue(same(eager_out, inductor_out, tol=0.001))
@@ -175,7 +179,6 @@ class TestCollectivesMultiProc(DynamoDistributedMultiProcTestCase):
# TODO: somehow inductor bg compile threads are causing hangs at exit with distributed work dtor
@patch.object(torch._inductor.config, "compile_threads", 1)
def test_inductor_allreduce_eager_wait(self):
-
def inductor_func(a, b, c, d, *, tag, ranks, group_size):
x = torch.matmul(a, b)
y = torch.matmul(c, d)
@@ -187,14 +190,13 @@ class TestCollectivesMultiProc(DynamoDistributedMultiProcTestCase):
g = torch.matmul(e, f)
ar = torch.ops.c10d_functional.wait_tensor(ar)
out = torch.add(ar, g.repeat(2, 1))
- return (out, )
+ return (out,)
def compile(func, example_inputs):
graph = make_fx(func)(*example_inputs)
return inductor_compile_fx(graph, example_inputs)
with _dynamo_dist_per_rank_init(self.rank, self.world_size):
-
inductor_func = functools.partial(
inductor_func,
**self.get_world_trs(),
@@ -204,7 +206,9 @@ class TestCollectivesMultiProc(DynamoDistributedMultiProcTestCase):
eager_out = eager_func(inductor_func(*inductor_inputs), *eager_inputs)
compiled_inductor_func = compile(inductor_func, inductor_inputs)
- inductor_out = eager_func(compiled_inductor_func(*inductor_inputs), *eager_inputs)
+ inductor_out = eager_func(
+ compiled_inductor_func(*inductor_inputs), *eager_inputs
+ )
self.assertTrue(same(eager_out, inductor_out, tol=0.001))
@unittest.skipIf(not has_triton(), "Inductor+gpu needs triton and recent GPU arch")
@@ -233,7 +237,9 @@ class TestCollectivesMultiProc(DynamoDistributedMultiProcTestCase):
@patch.object(torch._inductor.config, "compile_threads", 1)
def test_permute_tensor(self):
def func(tensor, src_dst_pairs, *, tag, ranks, group_size):
- return _functional_collectives.permute_tensor(tensor, src_dst_pairs, ranks, tag)
+ return _functional_collectives.permute_tensor(
+ tensor, src_dst_pairs, ranks, tag
+ )
with _dynamo_dist_per_rank_init(self.rank, self.world_size):
inputs = (
@@ -247,11 +253,9 @@ class TestCollectivesMultiProc(DynamoDistributedMultiProcTestCase):
self.assertTrue(same(out, correct))
# rank0: [2., 3.], rank1: [0., 1.]
- expected = torch.arange(
- 2,
- dtype=torch.float32,
- device="cuda"
- ) + 2 * ((self.rank - 1 + self.world_size) % self.world_size)
+ expected = torch.arange(2, dtype=torch.float32, device="cuda") + 2 * (
+ (self.rank - 1 + self.world_size) % self.world_size
+ )
self.assertEqual(out, expected)
self.assertEqual(correct, expected)
@@ -318,16 +322,17 @@ class TestCollectivesMultiProc(DynamoDistributedMultiProcTestCase):
def example(a, b, *, tag, ranks, group_size):
c = torch.matmul(a, b)
- ag = torch.ops.c10d_functional.all_gather_into_tensor(c, tag, ranks, group_size)
+ ag = torch.ops.c10d_functional.all_gather_into_tensor(
+ c, tag, ranks, group_size
+ )
ag = torch.ops.c10d_functional.wait_tensor(ag)
- return (ag, )
+ return (ag,)
def compile(func, example_inputs):
graph = make_fx(func)(*example_inputs)
return inductor_compile_fx(graph, example_inputs)
with _dynamo_dist_per_rank_init(self.rank, self.world_size):
-
example = functools.partial(
example,
**self.get_world_trs(),
@@ -374,9 +379,19 @@ class TestCollectivesMultiProc(DynamoDistributedMultiProcTestCase):
# TODO: somehow inductor bg compile threads are causing hangs at exit with distributed work dtor
@patch.object(torch._inductor.config, "compile_threads", 1)
def test_all_to_all_single_inductor(self):
- def example(inp, input_split_sizes_tensor, output_split_sizes_tensor, *, tag, ranks, group_size):
+ def example(
+ inp,
+ input_split_sizes_tensor,
+ output_split_sizes_tensor,
+ *,
+ tag,
+ ranks,
+ group_size,
+ ):
input_split_sizes = _tolist_with_constrain_as_size(input_split_sizes_tensor)
- output_split_sizes = _tolist_with_constrain_as_size(output_split_sizes_tensor)
+ output_split_sizes = _tolist_with_constrain_as_size(
+ output_split_sizes_tensor
+ )
a2a = torch.ops.c10d_functional.all_to_all_single(
inp,
output_split_sizes,
@@ -389,14 +404,22 @@ class TestCollectivesMultiProc(DynamoDistributedMultiProcTestCase):
out = a2a / a2a.sum(dim=0)
return out
- with _dynamo_dist_per_rank_init(self.rank, self.world_size), torch._dynamo.config.patch(
+ with _dynamo_dist_per_rank_init(
+ self.rank, self.world_size
+ ), torch._dynamo.config.patch(
dynamic_shapes=True,
capture_dynamic_output_shape_ops=True,
capture_scalar_outputs=True,
):
row = self.world_size * (self.rank + 1) * (self.world_size + 1) / 2
- input_split_sizes_tensor = torch.tensor([(i + 1) * (self.rank + 1) for i in range(self.world_size)], dtype=torch.int64)
- output_split_sizes_tensor = torch.tensor([(i + 1) * (self.rank + 1) for i in range(self.world_size)], dtype=torch.int64)
+ input_split_sizes_tensor = torch.tensor(
+ [(i + 1) * (self.rank + 1) for i in range(self.world_size)],
+ dtype=torch.int64,
+ )
+ output_split_sizes_tensor = torch.tensor(
+ [(i + 1) * (self.rank + 1) for i in range(self.world_size)],
+ dtype=torch.int64,
+ )
inputs = (
torch.ones(int(row), 5, device="cuda") * (self.rank + 1),
input_split_sizes_tensor,
@@ -407,9 +430,9 @@ class TestCollectivesMultiProc(DynamoDistributedMultiProcTestCase):
compiled_fn = torch.compile(example, fullgraph=True, dynamic=True)
code = run_and_get_triton_code(compiled_fn, *inputs, **trs)
- FileCheck() \
- .check_regex("all_to_all_single\\(buf\\d+\\[0\\], buf\\d+_inputs\\[0\\], output_split_sizes=\\[u\\d+, u\\d+\\], input_split_sizes=\\[u\\d+, u\\d+\\]") \
- .run(code) # noqa: B950
+ FileCheck().check_regex(
+ "all_to_all_single\\(buf\\d+\\[0\\], buf\\d+_inputs\\[0\\], output_split_sizes=\\[u\\d+, u\\d+\\], input_split_sizes=\\[u\\d+, u\\d+\\]" # noqa: B950
+ ).run(code)
eager_out = example(*inputs, **trs)
inductor_out = compiled_fn(*inputs, **trs)
@@ -436,18 +459,21 @@ class TestCollectivesMultiProc(DynamoDistributedMultiProcTestCase):
return out
with _dynamo_dist_per_rank_init(self.rank, self.world_size):
- input_split_sizes_tensor = torch.tensor([1] * self.world_size, dtype=torch.int64)
+ input_split_sizes_tensor = torch.tensor(
+ [1] * self.world_size, dtype=torch.int64
+ )
inputs = (
- torch.ones(self.world_size, self.world_size, device="cuda") * (self.rank + 1),
+ torch.ones(self.world_size, self.world_size, device="cuda")
+ * (self.rank + 1),
input_split_sizes_tensor,
)
trs = self.get_world_trs()
compiled_fn = torch.compile(example, fullgraph=True, dynamic=True)
code = run_and_get_triton_code(compiled_fn, *inputs, **trs)
- FileCheck() \
- .check_regex("all_to_all_single\\(buf\\d+\\[0\\], buf\\d+_inputs\\[0\\], output_split_sizes=None, input_split_sizes=\\[u\\d+, u\\d+\\]") \
- .run(code) # noqa: B950
+ FileCheck().check_regex(
+ "all_to_all_single\\(buf\\d+\\[0\\], buf\\d+_inputs\\[0\\], output_split_sizes=None, input_split_sizes=\\[u\\d+, u\\d+\\]" # noqa: B950
+ ).run(code)
eager_out = example(*inputs, **trs)
inductor_out = compiled_fn(*inputs, **trs)
@@ -460,7 +486,9 @@ class TestCollectivesMultiProc(DynamoDistributedMultiProcTestCase):
@patch.object(torch._inductor.config, "compile_threads", 1)
def test_all_to_all_single_inductor_input_split_sizes_none(self):
def example(inp, output_split_sizes_tensor, *, tag, ranks, group_size):
- output_split_sizes = _tolist_with_constrain_as_size(output_split_sizes_tensor)
+ output_split_sizes = _tolist_with_constrain_as_size(
+ output_split_sizes_tensor
+ )
a2a = torch.ops.c10d_functional.all_to_all_single(
inp,
output_split_sizes,
@@ -473,23 +501,28 @@ class TestCollectivesMultiProc(DynamoDistributedMultiProcTestCase):
out = a2a / a2a.sum(dim=0)
return out
- with _dynamo_dist_per_rank_init(self.rank, self.world_size), torch._dynamo.config.patch(
+ with _dynamo_dist_per_rank_init(
+ self.rank, self.world_size
+ ), torch._dynamo.config.patch(
dynamic_shapes=True,
capture_dynamic_output_shape_ops=True,
capture_scalar_outputs=True,
):
- output_split_sizes_tensor = torch.tensor([1] * self.world_size, dtype=torch.int64)
+ output_split_sizes_tensor = torch.tensor(
+ [1] * self.world_size, dtype=torch.int64
+ )
inputs = (
- torch.ones(self.world_size, self.world_size, device="cuda") * (self.rank + 1),
+ torch.ones(self.world_size, self.world_size, device="cuda")
+ * (self.rank + 1),
output_split_sizes_tensor,
)
trs = self.get_world_trs()
compiled_fn = torch.compile(example, fullgraph=True, dynamic=True)
code = run_and_get_triton_code(compiled_fn, *inputs, **trs)
- FileCheck() \
- .check_regex("all_to_all_single\\(buf\\d+\\[0\\], buf\\d+_inputs\\[0\\], output_split_sizes=\\[u\\d+, u\\d+\\], input_split_sizes=None") \
- .run(code) # noqa: B950
+ FileCheck().check_regex(
+ "all_to_all_single\\(buf\\d+\\[0\\], buf\\d+_inputs\\[0\\], output_split_sizes=\\[u\\d+, u\\d+\\], input_split_sizes=None" # noqa: B950
+ ).run(code)
eager_out = example(*inputs, **trs)
inductor_out = compiled_fn(*inputs, **trs)
@@ -514,14 +547,19 @@ class TestCollectivesMultiProc(DynamoDistributedMultiProcTestCase):
return out
with _dynamo_dist_per_rank_init(self.rank, self.world_size):
- inputs = (torch.ones(self.world_size, self.world_size, device="cuda") * (self.rank + 1),)
+ inputs = (
+ torch.ones(self.world_size, self.world_size, device="cuda")
+ * (self.rank + 1),
+ )
trs = self.get_world_trs()
compiled_fn = torch.compile(example, fullgraph=True, dynamic=True)
code = run_and_get_triton_code(compiled_fn, *inputs, **trs)
- FileCheck() \
- .check_regex("all_to_all_single\\(buf\\d+\\[0\\], buf\\d+_inputs\\[0\\], output_split_sizes=None, input_split_sizes=None") \
- .run(code) # noqa: B950
+ FileCheck().check_regex(
+ "all_to_all_single\\(buf\\d+\\[0\\], buf\\d+_inputs\\[0\\], output_split_sizes=None, input_split_sizes=None"
+ ).run(
+ code
+ ) # noqa: B950
eager_out = example(*inputs, **trs)
inductor_out = compiled_fn(*inputs, **trs)
@@ -535,6 +573,7 @@ class TestCollectivesInductor(DynamoDistributedSingleProcTestCase):
"""
Prefer single-proc test runner for basic tests as it is easier to work with.
"""
+
def get_world_trs(self, world_size=1):
return {
"tag": "",
@@ -545,9 +584,10 @@ class TestCollectivesInductor(DynamoDistributedSingleProcTestCase):
@unittest.skipIf(not has_triton(), "Inductor+gpu needs triton and recent GPU arch")
@torch._inductor.config.patch(debug=True)
def test_inductor_single_op(self):
-
def func(inp, *, tag, ranks, group_size):
- ar = torch.ops.c10d_functional.all_reduce(inp, "sum", tag, ranks, group_size)
+ ar = torch.ops.c10d_functional.all_reduce(
+ inp, "sum", tag, ranks, group_size
+ )
ar = torch.ops.c10d_functional.wait_tensor(ar)
return ar
@@ -558,15 +598,17 @@ class TestCollectivesInductor(DynamoDistributedSingleProcTestCase):
code = run_and_get_triton_code(compiled, inputs, **self.get_world_trs())
# NOTE: Make sure we are not unneccessarily copying the outputs of
# wait_tensors before they are returned from the graph.
- FileCheck() \
- .check("buf0 = empty") \
- .check("buf0.copy_(arg0_1)") \
- .check("buf1 = buf0") \
- .check("buf1_work = dist.all_reduce(buf1") \
- .check("fun_col_impl._register_tensor_work(buf1, buf1_work)") \
- .check("buf0 = _wait_tensor(buf0)") \
- .check("return (buf0, )") \
- .run(code)
+ FileCheck().check("buf0 = empty").check("buf0.copy_(arg0_1)").check(
+ "buf1 = buf0"
+ ).check("buf1_work = dist.all_reduce(buf1").check(
+ "fun_col_impl._register_tensor_work(buf1, buf1_work)"
+ ).check(
+ "buf0 = _wait_tensor(buf0)"
+ ).check(
+ "return (buf0, )"
+ ).run(
+ code
+ )
correct = func(inputs, **self.get_world_trs())
self.assertTrue(same(out, correct))
@@ -592,17 +634,21 @@ class TestCollectivesInductor(DynamoDistributedSingleProcTestCase):
code = run_and_get_triton_code(compiled, inputs, **self.get_world_trs())
# NOTE: Make sure we are not unneccessarily copying the outputs of
# wait_tensors before they are returned from the graph.
- FileCheck() \
- .check("buf1 = buf0; del buf0 # reuse") \
- .check_not("buf1.copy_(") \
- .check("buf2 = buf1") \
- .check("buf2_work = dist.all_reduce(buf2") \
- .check("fun_col_impl._register_tensor_work(buf2, buf2_work)") \
- .check("buf1 = _wait_tensor(buf1)") \
- .check("buf4 = buf1") \
- .check("buf5 = empty") \
- .check("return (buf1, buf5") \
- .run(code)
+ FileCheck().check("buf1 = buf0; del buf0 # reuse").check_not(
+ "buf1.copy_("
+ ).check("buf2 = buf1").check("buf2_work = dist.all_reduce(buf2").check(
+ "fun_col_impl._register_tensor_work(buf2, buf2_work)"
+ ).check(
+ "buf1 = _wait_tensor(buf1)"
+ ).check(
+ "buf4 = buf1"
+ ).check(
+ "buf5 = empty"
+ ).check(
+ "return (buf1, buf5"
+ ).run(
+ code
+ )
out = compiled(inputs, **self.get_world_trs())
correct = func(inputs, **self.get_world_trs())
self.assertTrue(same(out, correct))
@@ -629,25 +675,28 @@ class TestCollectivesInductor(DynamoDistributedSingleProcTestCase):
code = run_and_get_triton_code(compiled, inputs, **self.get_world_trs())
# NOTE: Make sure we are not unneccessarily copying the outputs of
# wait_tensors before they are returned from the graph.
- FileCheck() \
- .check("buf0 = empty") \
- .check("buf5 = empty") \
- .check("triton_poi__0.run(arg0_1, buf0, buf5") \
- .check_not("copy_(") \
- .check("buf1 = buf0; del buf0 # reuse") \
- .check("buf2 = buf1") \
- .check("buf2_work = dist.all_reduce(buf2") \
- .check("fun_col_impl._register_tensor_work(buf2, buf2_work)") \
- .check("buf1 = _wait_tensor(buf1)") \
- .check("buf4 = buf1") \
- .check("return (buf1, buf5, buf6") \
- .run(code)
+ FileCheck().check("buf0 = empty").check("buf5 = empty").check(
+ "triton_poi__0.run(arg0_1, buf0, buf5"
+ ).check_not("copy_(").check("buf1 = buf0; del buf0 # reuse").check(
+ "buf2 = buf1"
+ ).check(
+ "buf2_work = dist.all_reduce(buf2"
+ ).check(
+ "fun_col_impl._register_tensor_work(buf2, buf2_work)"
+ ).check(
+ "buf1 = _wait_tensor(buf1)"
+ ).check(
+ "buf4 = buf1"
+ ).check(
+ "return (buf1, buf5, buf6"
+ ).run(
+ code
+ )
out = compiled(inputs, **self.get_world_trs())
correct = func(inputs, **self.get_world_trs())
self.assertTrue(same(out, correct))
def test_dynamo_trace_allreduce(self):
-
def func(inp):
ar = _functional_collectives.all_reduce(inp, "sum", "0")
return ar
@@ -664,7 +713,6 @@ class TestCollectivesInductor(DynamoDistributedSingleProcTestCase):
self.assertTrue(same(out, correct))
def test_dynamo_trace_all_gather_tensor(self):
-
def func(inp):
ar = _functional_collectives.all_gather_tensor(inp, 0, "0")
return ar
@@ -681,7 +729,6 @@ class TestCollectivesInductor(DynamoDistributedSingleProcTestCase):
self.assertTrue(same(out, correct))
def test_dynamo_trace_all_gather_tensor_pg(self):
-
def func(inp, *, pg):
ar = _functional_collectives.all_gather_tensor(inp, 0, pg)
return ar
@@ -698,13 +745,13 @@ class TestCollectivesInductor(DynamoDistributedSingleProcTestCase):
self.assertTrue(same(out, correct))
def test_dynamo_rewrite_dist_all_gather(self):
-
def func(inp, out, *, pg):
torch.distributed.all_gather_into_tensor(
out,
inp,
pg,
)
+
local_size = [4, 4]
# single-proc test
global_size = local_size
@@ -723,13 +770,13 @@ class TestCollectivesInductor(DynamoDistributedSingleProcTestCase):
assert same(outputs, correct_outputs)
def test_dynamo_rewrite_dist_all_gather_list(self):
-
def func(inp, out, *, pg):
torch.distributed.all_gather(
out,
inp,
pg,
)
+
local_size = [4, 4]
# single-proc test
global_size = local_size
@@ -754,6 +801,7 @@ class TestCollectivesInductor(DynamoDistributedSingleProcTestCase):
group=pg,
async_op=False,
)
+
local_size = [4, 4]
# single-proc test
global_size = local_size
@@ -772,13 +820,13 @@ class TestCollectivesInductor(DynamoDistributedSingleProcTestCase):
assert same(outputs, correct_outputs)
def test_dynamo_rewrite_dist_reduce_scatter(self):
-
def func(inp, out, *, pg):
torch.distributed.reduce_scatter_tensor(
out,
inp,
group=pg,
)
+
local_size = [4, 4]
# single-proc test
global_size = local_size
@@ -804,10 +852,9 @@ class TestCollectivesInductor(DynamoDistributedSingleProcTestCase):
"kwargs",
"kwargs_none",
"unspecified",
- ]
+ ],
)
def test_dynamo_rewrite_dist_allreduce(self, pg_mode):
-
def func(tensor, *args, **kwargs):
torch.distributed.all_reduce(
tensor,
@@ -846,13 +893,8 @@ class TestCollectivesInductor(DynamoDistributedSingleProcTestCase):
assert same(inputs_compiled, inputs_eager)
def test_dynamo_rewrite_dist_all_to_all_single(self):
-
def func(output, input, pg):
- torch.distributed.all_to_all_single(
- output,
- input,
- group=pg
- )
+ torch.distributed.all_to_all_single(output, input, group=pg)
counter = CompileCounter()
compiled = torch.compile(func, backend=counter, fullgraph=True)
@@ -876,7 +918,7 @@ class TestCollectivesInductor(DynamoDistributedSingleProcTestCase):
torch.distributed.ReduceOp.PRODUCT,
torch.distributed.ReduceOp.MIN,
torch.distributed.ReduceOp.MAX,
- ]
+ ],
)
def test_dynamo_rewrite_dist_allreduce_reduce_op(self, reduce_op):
from torch.distributed._functional_collectives import REDUCE_OP_TO_STR
@@ -885,8 +927,9 @@ class TestCollectivesInductor(DynamoDistributedSingleProcTestCase):
ar_nodes = []
for node in gm.graph.nodes:
if node.target in [
- torch.ops.c10d_functional.all_reduce,
- torch.ops._c10d_functional.all_reduce]:
+ torch.ops.c10d_functional.all_reduce,
+ torch.ops._c10d_functional.all_reduce,
+ ]:
ar_nodes.append(node)
self.assertEqual(len(ar_nodes), 1)
reduce_op_str = ar_nodes[0].args[1]
@@ -906,14 +949,14 @@ class TestCollectivesInductor(DynamoDistributedSingleProcTestCase):
compiled(*inputs)
@parametrize(
- "source", [
+ "source",
+ [
"GroupMember.WORLD",
"group.WORLD",
"_get_default_group",
- ]
+ ],
)
def test_dynamo_get_world_group(self, source):
-
def func(tensor):
if source == "GroupMember.WORLD":
group = torch.distributed.GroupMember.WORLD
@@ -932,8 +975,9 @@ class TestCollectivesInductor(DynamoDistributedSingleProcTestCase):
ar_nodes = []
for node in gm.graph.nodes:
if node.target in [
- torch.ops.c10d_functional.all_reduce,
- torch.ops._c10d_functional.all_reduce]:
+ torch.ops.c10d_functional.all_reduce,
+ torch.ops._c10d_functional.all_reduce,
+ ]:
ar_nodes.append(node)
self.assertEqual(len(ar_nodes), 1)
return gm
@@ -942,18 +986,12 @@ class TestCollectivesInductor(DynamoDistributedSingleProcTestCase):
input = torch.ones(2, device=self.device)
compiled(input)
-
def test_dynamo_support_collective_op_with_async_op_False(self):
-
def func(inp, out, *, pg):
# user explicitly set the attribute `async_op` to False,
# there should be no graph break
- torch.distributed.reduce_scatter_tensor(
- out,
- inp,
- group=pg,
- async_op=False
- )
+ torch.distributed.reduce_scatter_tensor(out, inp, group=pg, async_op=False)
+
local_size = [4, 4]
# single-proc test
global_size = local_size
@@ -970,15 +1008,12 @@ class TestCollectivesInductor(DynamoDistributedSingleProcTestCase):
assert same(outputs, correct_outputs)
def test_dynamo_graphbreaks_unsupported_async_op(self):
-
def func(inp, out, *, pg):
work = torch.distributed.reduce_scatter_tensor(
- out,
- inp,
- group=pg,
- async_op=True
+ out, inp, group=pg, async_op=True
)
work.wait()
+
local_size = [4, 4]
# single-proc test
global_size = local_size
@@ -1011,7 +1046,6 @@ class TestCollectivesInductor(DynamoDistributedSingleProcTestCase):
assert same(outputs, correct_outputs)
def test_dynamo_trace_reduce_scatter_tensor(self):
-
def func(inp):
ar = _functional_collectives.reduce_scatter_tensor(inp, "sum", 0, "0")
return ar
@@ -1029,7 +1063,9 @@ class TestCollectivesInductor(DynamoDistributedSingleProcTestCase):
def test_dynamo_trace_allgather_coalesced(self):
def func(inp, *, tag, ranks, group_size):
- ar = torch.ops.c10d_functional.all_gather_into_tensor_coalesced(inp, tag, ranks, group_size)
+ ar = torch.ops.c10d_functional.all_gather_into_tensor_coalesced(
+ inp, tag, ranks, group_size
+ )
return ar
inputs = [torch.ones(4, 4, device="cuda"), torch.ones(6, 6, device="cuda")]
@@ -1041,21 +1077,26 @@ class TestCollectivesInductor(DynamoDistributedSingleProcTestCase):
assert counter.op_count == 3 # It generates 2 getattr to unpack the array
assert same(out, correct)
-
def test_backwards(self):
"""
It's probably not that common to need backwards support for collectives.
However, I wanted to at least see if it was possible to support it as a design goal.
"""
+
def func(inp):
ar = _functional_collectives.all_reduce(inp, "sum", "0")
return ar
input = torch.ones(4, 4, device="cuda", requires_grad=True)
# TODO implement backwards
- with self.assertRaisesRegex(RuntimeError, "element 0 of tensors does not require grad and does not have a grad_fn"):
- compiled = torch.compile(func, backend="aot_eager") # inductor bug with single-op allreduce graph
+ with self.assertRaisesRegex(
+ RuntimeError,
+ "element 0 of tensors does not require grad and does not have a grad_fn",
+ ):
+ compiled = torch.compile(
+ func, backend="aot_eager"
+ ) # inductor bug with single-op allreduce graph
out = compiled(input)
out.sum().backward()
@@ -1079,7 +1120,9 @@ class TestCollectivesInductor(DynamoDistributedSingleProcTestCase):
def func(inp, *, tag, ranks, group_size):
x = inp + 1
- tensor_list = torch.ops.c10d_functional.all_gather_into_tensor_coalesced([x, inp], tag, ranks, group_size)
+ tensor_list = torch.ops.c10d_functional.all_gather_into_tensor_coalesced(
+ [x, inp], tag, ranks, group_size
+ )
y = x + 2
ar0 = torch.ops.c10d_functional.wait_tensor(tensor_list[0])
ar1 = torch.ops.c10d_functional.wait_tensor(tensor_list[1])
@@ -1093,25 +1136,32 @@ class TestCollectivesInductor(DynamoDistributedSingleProcTestCase):
code = run_and_get_triton_code(compiled, inputs, **self.get_world_trs())
# NOTE: Make sure we are not unneccessarily copying the outputs of
# wait_tensors before they are returned from the graph.
- FileCheck() \
- .check("buf0 = empty") \
- .check("buf5 = empty") \
- .check("triton_poi__0.run(arg0_1, buf0, buf5") \
- .check("buf1 = empty") \
- .check("buf2 = empty") \
- .check_not("copy_(") \
- .check("buf3_inputs = [buf0,arg0_1]") \
- .check("buf3 = [buf1,buf2]") \
- .check("buf3_work = fun_col_impl._all_gather_into_tensor_coalesced_fallback("
- "output_tensors=buf3, input_tensors=buf3_inputs") \
- .check("fun_col_impl._register_tensor_work(buf3, buf3_work)") \
- .check("buf1 = _wait_tensor(buf1)") \
- .check("buf4 = buf1") \
- .check("buf6 = buf0; del buf0 # reuse") \
- .check("buf2 = _wait_tensor(buf2)") \
- .check("buf7 = buf2") \
- .check("return (buf1, buf5, buf6, buf2") \
- .run(code)
+ FileCheck().check("buf0 = empty").check("buf5 = empty").check(
+ "triton_poi__0.run(arg0_1, buf0, buf5"
+ ).check("buf1 = empty").check("buf2 = empty").check_not("copy_(").check(
+ "buf3_inputs = [buf0,arg0_1]"
+ ).check(
+ "buf3 = [buf1,buf2]"
+ ).check(
+ "buf3_work = fun_col_impl._all_gather_into_tensor_coalesced_fallback("
+ "output_tensors=buf3, input_tensors=buf3_inputs"
+ ).check(
+ "fun_col_impl._register_tensor_work(buf3, buf3_work)"
+ ).check(
+ "buf1 = _wait_tensor(buf1)"
+ ).check(
+ "buf4 = buf1"
+ ).check(
+ "buf6 = buf0; del buf0 # reuse"
+ ).check(
+ "buf2 = _wait_tensor(buf2)"
+ ).check(
+ "buf7 = buf2"
+ ).check(
+ "return (buf1, buf5, buf6, buf2"
+ ).run(
+ code
+ )
out = compiled(inputs, **self.get_world_trs())
correct = func(inputs, **self.get_world_trs())
assert same(out, correct), f"{out} va {correct}"
@@ -1125,7 +1175,9 @@ class TestCollectivesInductor(DynamoDistributedSingleProcTestCase):
def func(inp, *, tag, ranks, group_size):
x = inp + 1
- tensor_list = torch.ops.c10d_functional.reduce_scatter_tensor_coalesced([x, inp], "sum", tag, ranks, group_size)
+ tensor_list = torch.ops.c10d_functional.reduce_scatter_tensor_coalesced(
+ [x, inp], "sum", tag, ranks, group_size
+ )
y = x + 2
ar0 = torch.ops.c10d_functional.wait_tensor(tensor_list[0])
ar1 = torch.ops.c10d_functional.wait_tensor(tensor_list[1])
@@ -1139,24 +1191,30 @@ class TestCollectivesInductor(DynamoDistributedSingleProcTestCase):
code = run_and_get_triton_code(compiled, inputs, **self.get_world_trs())
# NOTE: The first return value should be the output of the first wait_tensor.
# We want to make sure no unneccessary copy is made.
- FileCheck() \
- .check("buf0 = empty") \
- .check("buf5 = empty") \
- .check("triton_poi__0.run(arg0_1, buf0, buf5") \
- .check("buf1 = empty") \
- .check("buf2 = empty") \
- .check_not("copy_(") \
- .check("buf3 = [buf1,buf2]") \
- .check("buf3_work = fun_col_impl._reduce_scatter_tensor_coalesced_fallback("
- "output_tensors=buf3, input_tensors=buf3_inputs") \
- .check("fun_col_impl._register_tensor_work(buf3, buf3_work)") \
- .check("buf1 = _wait_tensor(buf1)") \
- .check("buf4 = buf1") \
- .check("buf6 = buf0; del buf0 # reuse") \
- .check("buf2 = _wait_tensor(buf2)") \
- .check("buf7 = buf2") \
- .check("return (buf1, buf5, buf6, buf2") \
- .run(code)
+ FileCheck().check("buf0 = empty").check("buf5 = empty").check(
+ "triton_poi__0.run(arg0_1, buf0, buf5"
+ ).check("buf1 = empty").check("buf2 = empty").check_not("copy_(").check(
+ "buf3 = [buf1,buf2]"
+ ).check(
+ "buf3_work = fun_col_impl._reduce_scatter_tensor_coalesced_fallback("
+ "output_tensors=buf3, input_tensors=buf3_inputs"
+ ).check(
+ "fun_col_impl._register_tensor_work(buf3, buf3_work)"
+ ).check(
+ "buf1 = _wait_tensor(buf1)"
+ ).check(
+ "buf4 = buf1"
+ ).check(
+ "buf6 = buf0; del buf0 # reuse"
+ ).check(
+ "buf2 = _wait_tensor(buf2)"
+ ).check(
+ "buf7 = buf2"
+ ).check(
+ "return (buf1, buf5, buf6, buf2"
+ ).run(
+ code
+ )
out = compiled(inputs, **self.get_world_trs())
correct = func(inputs, **self.get_world_trs())
assert same(out, correct), f"{out} va {correct}"
@@ -1164,4 +1222,5 @@ class TestCollectivesInductor(DynamoDistributedSingleProcTestCase):
if __name__ == "__main__":
from torch._dynamo.test_case import run_tests
+
run_tests()
diff --git a/test/distributed/test_launcher.py b/test/distributed/test_launcher.py
index 178d98ffdc..58c5de168e 100644
--- a/test/distributed/test_launcher.py
+++ b/test/distributed/test_launcher.py
@@ -13,9 +13,9 @@ if not dist.is_available():
sys.exit(0)
from torch.testing._internal.common_utils import (
+ run_tests,
TEST_WITH_DEV_DBG_ASAN,
TestCase,
- run_tests,
)
diff --git a/test/distributed/test_multi_threaded_pg.py b/test/distributed/test_multi_threaded_pg.py
index 1109c731b5..5bc9700e24 100644
--- a/test/distributed/test_multi_threaded_pg.py
+++ b/test/distributed/test_multi_threaded_pg.py
@@ -1,34 +1,32 @@
# Owner(s): ["oncall: distributed"]
+import operator
import os
import sys
+import threading
+from functools import reduce
+from unittest import skip, SkipTest
+
import torch
+import torch.autograd
import torch.distributed as dist
from torch._C._distributed_c10d import ReduceOp
-from unittest import skip, SkipTest
-import operator
-from functools import reduce
-import threading
-import torch.autograd
if not dist.is_available():
print("Distributed not available, skipping tests", file=sys.stderr)
sys.exit(0)
from torch.testing._internal.common_distributed import (
- spawn_threads_and_init_comms,
MultiThreadedTestCase,
skip_if_lt_x_gpu,
+ spawn_threads_and_init_comms,
)
-from torch.testing._internal.common_utils import (
- TestCase,
- run_tests,
- IS_SANDCASTLE,
-)
+from torch.testing._internal.common_utils import IS_SANDCASTLE, run_tests, TestCase
DEFAULT_WORLD_SIZE = 4
+
class TestCollectivesWithWrapper(TestCase):
@spawn_threads_and_init_comms(world_size=4)
def test_broadcast_object_list(self):
@@ -42,7 +40,9 @@ class TestCollectivesWithWrapper(TestCase):
@spawn_threads_and_init_comms(world_size=4)
def _test_method(self):
input_tensor = torch.ones(3, 3) * dist.get_rank() # perform 1st all gather
- output_tensors = [torch.empty_like(input_tensor) for _ in range(dist.get_world_size())]
+ output_tensors = [
+ torch.empty_like(input_tensor) for _ in range(dist.get_world_size())
+ ]
dist.all_gather(output_tensors, input_tensor)
if dist.get_rank() == 0:
@@ -57,7 +57,9 @@ class TestCollectivesWithWrapper(TestCase):
@spawn_threads_and_init_comms(world_size=4)
def _test_method(self):
input_tensor = torch.ones(3, 3) * dist.get_rank() # perform 1st all gather
- output_tensors = [torch.empty_like(input_tensor) for _ in range(dist.get_world_size())]
+ output_tensors = [
+ torch.empty_like(input_tensor) for _ in range(dist.get_world_size())
+ ]
dist.all_gather(output_tensors, input_tensor)
if dist.get_rank() == 1:
@@ -72,11 +74,15 @@ class TestCollectivesWithWrapper(TestCase):
@spawn_threads_and_init_comms(world_size=4)
def _test_method(self):
input_tensor = torch.ones(3, 3) * dist.get_rank() # perform 1st all gather
- output_tensors = [torch.empty_like(input_tensor) for _ in range(dist.get_world_size())]
+ output_tensors = [
+ torch.empty_like(input_tensor) for _ in range(dist.get_world_size())
+ ]
dist.all_gather(output_tensors, input_tensor)
if dist.get_rank() > 0:
- raise AssertionError("Mimic real test failure.") # fail on all non-zero rank
+ raise AssertionError(
+ "Mimic real test failure."
+ ) # fail on all non-zero rank
dist.all_gather(output_tensors, input_tensor) # perform 2nd all gather
@@ -125,6 +131,7 @@ class TestCollectivesWithWrapper(TestCase):
dist.all_to_all_single(out, send)
self.assertEqual(out.tolist(), list(zip(range(world_size), range(world_size))))
+
class TestCollectivesWithBaseClass(MultiThreadedTestCase):
@property
def world_size(self):
@@ -141,7 +148,9 @@ class TestCollectivesWithBaseClass(MultiThreadedTestCase):
def test_allgather(self):
input_tensor = torch.ones(3, 3) * dist.get_rank()
- output_tensors = [torch.empty_like(input_tensor) for _ in range(self.world_size)]
+ output_tensors = [
+ torch.empty_like(input_tensor) for _ in range(self.world_size)
+ ]
dist.all_gather(output_tensors, input_tensor)
for rank, out_tensor in enumerate(output_tensors):
self.assertEqual(out_tensor, torch.ones(3, 3) * rank)
@@ -307,7 +316,11 @@ class TestCollectivesWithBaseClass(MultiThreadedTestCase):
result, rank = ctx.saved_tensors
bwd_tid = threading.current_thread().ident
- self.assertEqual(fwd_tid, bwd_tid, f"bwd not running in the same thread a fwd for rank {rank.item()}")
+ self.assertEqual(
+ fwd_tid,
+ bwd_tid,
+ f"bwd not running in the same thread a fwd for rank {rank.item()}",
+ )
self.assertTrue(dist.is_initialized())
self.assertEqual(int(rank.item()), dist.get_rank())
dist.all_reduce(result)
@@ -315,9 +328,12 @@ class TestCollectivesWithBaseClass(MultiThreadedTestCase):
return grad_output * result
- x = torch.tensor([dist.get_rank()], dtype=torch.float, device="cuda", requires_grad=True)
+ x = torch.tensor(
+ [dist.get_rank()], dtype=torch.float, device="cuda", requires_grad=True
+ )
x = MyFunc.apply(x)
x.sum().backward()
+
if __name__ == "__main__":
run_tests()
diff --git a/test/distributed/test_nccl.py b/test/distributed/test_nccl.py
index 27997256ea..7da2c9af9a 100644
--- a/test/distributed/test_nccl.py
+++ b/test/distributed/test_nccl.py
@@ -1,26 +1,27 @@
# Owner(s): ["oncall: distributed"]
+import re
import sys
+
import torch
-import torch.cuda.nccl as nccl
import torch.cuda
+import torch.cuda.nccl as nccl
import torch.distributed as c10d
+from torch.testing._internal.common_cuda import TEST_CUDA, TEST_MULTIGPU
+from torch.testing._internal.common_device_type import (
+ dtypes,
+ instantiate_device_type_tests,
+)
from torch.testing._internal.common_utils import (
- TestCase,
- run_tests,
IS_WINDOWS,
load_tests,
- TEST_WITH_ROCM,
- skip_but_pass_in_sandcastle_if,
NoTest,
+ run_tests,
+ skip_but_pass_in_sandcastle_if,
+ TEST_WITH_ROCM,
+ TestCase,
)
-from torch.testing._internal.common_cuda import TEST_CUDA, TEST_MULTIGPU
-from torch.testing._internal.common_device_type import (
- instantiate_device_type_tests,
- dtypes,
-)
-import re
HIP_VERSION = (
0.0
diff --git a/test/distributed/test_store.py b/test/distributed/test_store.py
index da76d7b6a1..8383101d20 100644
--- a/test/distributed/test_store.py
+++ b/test/distributed/test_store.py
@@ -5,8 +5,8 @@ import os
import socket
import sys
import tempfile
-import time
import threading
+import time
from datetime import timedelta
from sys import platform
@@ -14,9 +14,12 @@ import torch
import torch.distributed as dist
import torch.distributed.distributed_c10d as c10d
import torch.distributed.rpc as rpc
-from torch.distributed import DistNetworkError, DistError, DistStoreError
+from torch.distributed import DistError, DistNetworkError, DistStoreError
from torch.testing._internal.common_distributed import MultiThreadedTestCase
-from torch.testing._internal.common_utils import instantiate_parametrized_tests, parametrize
+from torch.testing._internal.common_utils import (
+ instantiate_parametrized_tests,
+ parametrize,
+)
if not dist.is_available():
print("torch.distributed not available, skipping tests", file=sys.stderr)
@@ -24,17 +27,17 @@ if not dist.is_available():
import torch.testing._internal.common_utils as common
from torch.testing._internal.common_distributed import (
- skip_if_win32,
create_tcp_store,
- tp_transports
+ skip_if_win32,
+ tp_transports,
)
from torch.testing._internal.common_utils import (
- TestCase,
- load_tests,
- run_tests,
- retry_on_connect_failures,
ADDRESS_IN_USE,
CONNECT_TIMEOUT,
+ load_tests,
+ retry_on_connect_failures,
+ run_tests,
+ TestCase,
)
# load_tests from common_utils is used to automatically filter tests for
@@ -62,7 +65,7 @@ def gpus_for_rank(world_size):
gpus_for_rank = []
for rank in range(world_size):
gpus_for_rank.append(
- visible_devices[rank * gpus_per_process: (rank + 1) * gpus_per_process]
+ visible_devices[rank * gpus_per_process : (rank + 1) * gpus_per_process]
)
return gpus_for_rank
@@ -102,7 +105,9 @@ class StoreTestBase:
self._test_set_get_check(self._create_store())
def _test_compare_set(self, store):
- missing_key_result = store.compare_set("cs_key0", "wrong_old_value", "new_value0")
+ missing_key_result = store.compare_set(
+ "cs_key0", "wrong_old_value", "new_value0"
+ )
self.assertEqual(b"wrong_old_value", missing_key_result)
store.set("cs_key0", "value0")
@@ -189,10 +194,14 @@ class FileStoreTest(TestCase, StoreTestBase):
rpc_backend_options = rpc.TensorPipeRpcBackendOptions()
rpc_backend_options.init_method = f"file://{file.name}"
rpc_backend_options._transports = tp_transports()
- rpc.init_rpc("worker", rank=0, world_size=1, rpc_backend_options=rpc_backend_options)
+ rpc.init_rpc(
+ "worker", rank=0, world_size=1, rpc_backend_options=rpc_backend_options
+ )
# Init PG using file
- dist.init_process_group("gloo", rank=0, world_size=1, init_method=f"file://{file.name}")
+ dist.init_process_group(
+ "gloo", rank=0, world_size=1, init_method=f"file://{file.name}"
+ )
dist.destroy_process_group()
assert os.path.exists(file.name)
@@ -228,7 +237,9 @@ class PrefixStoreTest(TestCase):
self.file = tempfile.NamedTemporaryFile(delete=False)
def test_get_underlying_store(self):
- tcp_store = dist.TCPStore(host_name=DEFAULT_HOSTNAME, port=0, world_size=1, is_master=True)
+ tcp_store = dist.TCPStore(
+ host_name=DEFAULT_HOSTNAME, port=0, world_size=1, is_master=True
+ )
hash_store = dist.HashStore()
file_store = dist.FileStore(self.file.name, world_size=1)
for store in [tcp_store, hash_store, file_store]:
@@ -305,8 +316,7 @@ class TCPStoreTest(TestCase, StoreTestBase):
)
backend_opts = rpc.TensorPipeRpcBackendOptions(
- init_method=f"tcp://{addr}:{port}",
- _transports=tp_transports()
+ init_method=f"tcp://{addr}:{port}", _transports=tp_transports()
)
rpc.init_rpc(
name="worker0",
@@ -362,11 +372,17 @@ class TCPStoreTest(TestCase, StoreTestBase):
self._test_numkeys_delkeys(self._create_store())
def _create_client(self, index, addr, port, world_size):
- client_store = dist.TCPStore(addr, port, world_size=world_size, timeout=timedelta(seconds=10))
+ client_store = dist.TCPStore(
+ addr, port, world_size=world_size, timeout=timedelta(seconds=10)
+ )
self.assertEqual(b"value", client_store.get("key"))
client_store.set(f"new_key{index}", f"new_value{index}")
- self.assertEqual(f"next_value{index}".encode(),
- client_store.compare_set(f"new_key{index}", f"new_value{index}", f"next_value{index}"))
+ self.assertEqual(
+ f"next_value{index}".encode(),
+ client_store.compare_set(
+ f"new_key{index}", f"new_value{index}", f"next_value{index}"
+ ),
+ )
def _multi_worker_helper(self, world_size):
addr = DEFAULT_HOSTNAME
@@ -408,22 +424,34 @@ class TCPStoreTest(TestCase, StoreTestBase):
self.assertEqual(b"tato", v1)
def test_store_timeout_on_missing_clients(self):
- with self.assertRaisesRegex(DistStoreError, r"Timed out after \d+ seconds waiting for clients. \d+/\d+ clients joined."):
+ with self.assertRaisesRegex(
+ DistStoreError,
+ r"Timed out after \d+ seconds waiting for clients. \d+/\d+ clients joined.",
+ ):
# world_size is 2 so it should timeout
dist.TCPStore("localhost", 0, 2, True, timeout=timedelta(seconds=2))
# when wait_for_workers is not set, then there should be no exception raised
- dist.TCPStore("localhost", 0, 2, True, timeout=timedelta(seconds=2), wait_for_workers=False)
+ dist.TCPStore(
+ "localhost",
+ 0,
+ 2,
+ True,
+ timeout=timedelta(seconds=2),
+ wait_for_workers=False,
+ )
-class LibUvTCPStoreTest(TCPStoreTest):
+class LibUvTCPStoreTest(TCPStoreTest):
def _create_store(self):
store = create_tcp_store(use_libuv=True)
store.set_timeout(timedelta(seconds=300))
return store
def _create_store_with_ws(self, addr, world_size):
- return create_tcp_store(addr, world_size, wait_for_workers=False, use_libuv=True)
+ return create_tcp_store(
+ addr, world_size, wait_for_workers=False, use_libuv=True
+ )
class PrefixTCPStoreTest(TestCase, StoreTestBase):
@@ -445,10 +473,13 @@ class PrefixTCPStoreTest(TestCase, StoreTestBase):
def test_underlying_non_prefix_store(self):
store = self._create_store()
- wrapped_store = dist.PrefixStore(self.prefix, dist.PrefixStore(self.prefix, store))
+ wrapped_store = dist.PrefixStore(
+ self.prefix, dist.PrefixStore(self.prefix, store)
+ )
self.assertEqual(self.tcpstore, store._underlying_non_prefix_store)
self.assertEqual(self.tcpstore, wrapped_store._underlying_non_prefix_store)
+
class MyPythonStore(dist.Store):
def __init__(self):
super().__init__()
@@ -483,6 +514,7 @@ class MyPythonStore(dist.Store):
val = self.store[key] = newValue
return val
+
class PythonStoreTest(TestCase):
def test_set_get(self):
# If we were to inherit from StoreTestBase and try to use
@@ -578,7 +610,9 @@ class RendezvousTCPTest(TestCase):
next(gen)
def test_dns_timeout(self):
- with self.assertRaisesRegex(DistNetworkError, "client socket has timed out after.*dnsnotexist") as manager:
+ with self.assertRaisesRegex(
+ DistNetworkError, "client socket has timed out after.*dnsnotexist"
+ ) as manager:
gen = dist.rendezvous(
"tcp://dnsnotexist:23456?world_size=2&rank=0",
timeout=timedelta(seconds=1),
@@ -641,6 +675,7 @@ class RendezvousTCPTest(TestCase):
store0, rank0, size0 = next(gen0)
self.assertTrue(store0.libuvBackend)
+
class DummyStore(dist.Store):
def __init__(self):
self.appends = []
@@ -662,10 +697,12 @@ class DummyStore(dist.Store):
def has_extended_api(self):
return True
+
class TestPythonStore(TestCase):
def test_optional_methods_fail(self):
class TestStore(dist.Store):
pass
+
store = TestStore()
self.assertFalse(store.has_extended_api())
with self.assertRaisesRegex(RuntimeError, "Not implemented."):
@@ -678,6 +715,7 @@ class TestPythonStore(TestCase):
def test_has_extended_api_passthrough(self):
class TestStore(dist.Store):
pass
+
test_store = TestStore()
store = dist.PrefixStore("p", test_store)
self.assertFalse(store.has_extended_api())
@@ -712,10 +750,10 @@ class TestPythonStore(TestCase):
def test_multi_set_roundtrip(self):
store = DummyStore()
prefix = dist.PrefixStore("p", store)
- prefix.multi_set(["foo", "bar"], [b'x', b'y'])
+ prefix.multi_set(["foo", "bar"], [b"x", b"y"])
self.assertEqual(1, len(store.multi_sets))
self.assertEqual(["p/foo", "p/bar"], store.multi_sets[0][0])
- self.assertEqual([b'x', b'y'], store.multi_sets[0][1])
+ self.assertEqual([b"x", b"y"], store.multi_sets[0][1])
def test_extended_methods_fallbacks(self):
test_store = MyPythonStore()
@@ -735,7 +773,9 @@ class TestMultiThreadedWait(MultiThreadedTestCase):
stores = [
dist.FileStore(tempfile.NamedTemporaryFile(delete=False).name, 1),
dist.HashStore(),
- dist.PrefixStore("pre", dist.FileStore(tempfile.NamedTemporaryFile(delete=False).name, 1)),
+ dist.PrefixStore(
+ "pre", dist.FileStore(tempfile.NamedTemporaryFile(delete=False).name, 1)
+ ),
create_tcp_store(),
create_tcp_store(use_libuv=True),
dist.PrefixStore("pre", create_tcp_store()),
@@ -764,15 +804,18 @@ class TestMultiThreadedWait(MultiThreadedTestCase):
instantiate_parametrized_tests(TestMultiThreadedWait)
+
@skip_if_win32()
class TimeoutTest(TestCase):
def tearDown(self):
import signal
+
super().tearDown()
signal.signal(signal.SIGUSR1, signal.SIG_IGN)
def test_interrupt_doesnt_break_wait(self):
import signal
+
rank_res = [None, None]
def run(rank, my_store):
@@ -789,13 +832,29 @@ class TimeoutTest(TestCase):
time.sleep(1)
rank0_store = dist.TCPStore(
- host_name=DEFAULT_HOSTNAME, port=0, world_size=2, is_master=True, wait_for_workers=False)
+ host_name=DEFAULT_HOSTNAME,
+ port=0,
+ world_size=2,
+ is_master=True,
+ wait_for_workers=False,
+ )
rank1_store = dist.TCPStore(
- host_name=DEFAULT_HOSTNAME, port=rank0_store.port, world_size=2, is_master=False, wait_for_workers=False)
+ host_name=DEFAULT_HOSTNAME,
+ port=rank0_store.port,
+ world_size=2,
+ is_master=False,
+ wait_for_workers=False,
+ )
ths = []
for i in range(2):
- t = threading.Thread(target=run, args=(i, [rank0_store, rank1_store][i],))
+ t = threading.Thread(
+ target=run,
+ args=(
+ i,
+ [rank0_store, rank1_store][i],
+ ),
+ )
t.start()
ths.append(t)
@@ -821,7 +880,12 @@ class InitPgWithUvStore(TestCase):
def test_with_url_param(self):
port = common.find_free_port()
- dist.init_process_group("gloo", rank=0, world_size=1, init_method=f"tcp://{DEFAULT_HOSTNAME}:{port}?use_libuv=1")
+ dist.init_process_group(
+ "gloo",
+ rank=0,
+ world_size=1,
+ init_method=f"tcp://{DEFAULT_HOSTNAME}:{port}?use_libuv=1",
+ )
self._run_test()
def test_with_env_var(self):
@@ -843,6 +907,7 @@ class InitPgWithUvStore(TestCase):
self.assertTrue(store.libuvBackend)
dist.destroy_process_group()
+
if __name__ == "__main__":
assert (
not torch.cuda._initialized
|
2.41.0
|
0211e207c78fafac2edaf2e14954f668e898b4a
|
Tue, 16 Apr 2024 00:56:35 +0000
|
[PATCH 0241/1000] inductor cpp wrapper: add GIL release back (#123897)
|
Fixes https://github.com/pytorch/pytorch/issues/123517. This PR adds the GIL release (originally added in https://github.com/pytorch/pytorch/pull/111888) back following the suggestion here: https://github.com/pytorch/pytorch/pull/123897#discussion_r1562509705. We added a default constructor and an assignment operator for the `RAIIPyObject` class (https://github.com/pytorch/pytorch/pull/123897#discussion_r1566262575) in order to declare the `custom_op_wrapper` outside of the GIL acquisition scope. Pull Request resolved: https://github.com/pytorch/pytorch/pull/123897 Approved by: https://github.com/peterbell10, https://github.com/jgong5
|
diff --git a/test/inductor/test_cpu_cpp_wrapper.py b/test/inductor/test_cpu_cpp_wrapper.py
index 1380de42af..bb7e6770a6 100644
--- a/test/inductor/test_cpu_cpp_wrapper.py
+++ b/test/inductor/test_cpu_cpp_wrapper.py
@@ -240,7 +240,10 @@ if RUN_CPU:
),
BaseTest("test_mm_views"),
BaseTest("test_multihead_attention", "cpu", test_cpu_repro.CPUReproTests()),
- BaseTest("test_multi_threading"),
+ BaseTest(
+ "test_multi_threading",
+ code_string_count={"py::gil_scoped_release release;": 1},
+ ),
BaseTest("test_profiler_mark_wrapper_call"),
BaseTest(
"test_qconv2d",
diff --git a/torch/_inductor/codegen/cpp_wrapper_cpu.py b/torch/_inductor/codegen/cpp_wrapper_cpu.py
index f8e2f30859..ac7cfd95d7 100644
--- a/torch/_inductor/codegen/cpp_wrapper_cpu.py
+++ b/torch/_inductor/codegen/cpp_wrapper_cpu.py
@@ -201,10 +201,19 @@ class CppWrapperCpu(WrapperCodeGen):
class RAIIPyObject {
public:
+ RAIIPyObject() : obj_(nullptr) {}
RAIIPyObject(PyObject* obj) : obj_(obj) {}
~RAIIPyObject() {
Py_XDECREF(obj_);
}
+ RAIIPyObject& operator=(const RAIIPyObject& other) {
+ if (this != &other) {
+ Py_XDECREF(obj_);
+ obj_ = other.obj_;
+ Py_XINCREF(obj_);
+ }
+ return *this;
+ }
operator PyObject*() {
return obj_;
}
@@ -512,6 +521,8 @@ class CppWrapperCpu(WrapperCodeGen):
else:
# Weights are promoted in the JIT mode
num_args = len(V.graph.graph_inputs) + len(V.graph.constants)
+ # release GIL to support multiple instances inference (in different threads of the same process)
+ self.prefix.splice("py::gil_scoped_release release;")
if config.abi_compatible:
self.prefix.splice(
@@ -2001,6 +2012,18 @@ class CppWrapperCpu(WrapperCodeGen):
output_args,
)
+ def generate_scoped_gil_acquire(self, declarations_before_scope, lines_in_scope):
+ scoped_lines = IndentedBuffer()
+ for declaration in declarations_before_scope:
+ scoped_lines.writeline(declaration)
+
+ scoped_lines.writeline("{")
+ with scoped_lines.indent():
+ scoped_lines.writeline("py::gil_scoped_acquire acquire;")
+ scoped_lines.writelines(lines_in_scope.split("\n"))
+ scoped_lines.writelines("}")
+ return scoped_lines._lines
+
def load_custom_op_wrapper(self):
# TODO: need to support control flow
if self.custom_op_wrapper_loaded:
@@ -2011,11 +2034,17 @@ RAIIPyObject codecache_module(PyImport_ImportModule("torch._inductor.codecache")
if (codecache_module.get() == NULL) {
throw std::runtime_error("Failed to load torch._inductor.codecache");
}
-RAIIPyObject custom_op_wrapper(PyObject_GetAttrString(codecache_module, "custom_op_wrapper"));
+custom_op_wrapper = PyObject_GetAttrString(codecache_module, "custom_op_wrapper");
if (custom_op_wrapper.get() == NULL) {
throw std::runtime_error("Failed to load torch._inductor.codecache.custom_op_wrapper");
}"""
- self.writelines(lines.split("\n"))
+
+ declarations_before_scope = ["RAIIPyObject custom_op_wrapper;"]
+ scope_gil_acquire = self.generate_scoped_gil_acquire(
+ declarations_before_scope, lines
+ )
+ self.writelines(scope_gil_acquire)
+
self.custom_op_wrapper_loaded = True
def generate_py_arg(self, py_args_var, idx, raw_arg, arg_type):
@@ -2116,15 +2145,22 @@ if (py_{buf_name}.get() == NULL) {{
if len(output_args) == 1:
# result is a single tensor
lines += f"""
-RAIIAtenTensorHandle {output_args[0]}(reinterpret_cast<AtenTensorHandle>(PyCapsule_GetPointer(py_{buf_name}.get(), NULL)));"""
+{output_args[0]} = reinterpret_cast<AtenTensorHandle>(PyCapsule_GetPointer(py_{buf_name}.get(), NULL));"""
else:
# result is a tuple of tensors
for idx, output_arg in enumerate(output_args):
lines += f"""
-RAIIAtenTensorHandle {output_arg}(
- reinterpret_cast<AtenTensorHandle>(PyCapsule_GetPointer(PyList_GET_ITEM(py_{buf_name}.get(), {idx}), NULL)));"""
+{output_arg} =
+ reinterpret_cast<AtenTensorHandle>(PyCapsule_GetPointer(PyList_GET_ITEM(py_{buf_name}.get(), {idx}), NULL));"""
- self.writelines(lines.split("\n"))
+ declarations_before_scope = [
+ f"RAIIAtenTensorHandle {output_arg};"
+ for idx, output_arg in enumerate(output_args)
+ ]
+ scope_gil_acquire = self.generate_scoped_gil_acquire(
+ declarations_before_scope, lines
+ )
+ self.writelines(scope_gil_acquire)
def generate_extern_kernel_alloc_and_find_schema_if_needed_fbcode(
self,
|
2.41.0
|
4878abab0c8be0f27eda6c991d0aa453493a2e7
|
Wed, 17 Apr 2024 09:24:59 +0000
|
[PATCH 0242/1000] Fix Setup Linux for ARC (#124171)
|
We can't get information about `ami-id`, `instance-id`, `instance-type` for the ARC runners: ``` 2024-04-16T11:10:17.0098276Z curl: (22) The requested URL returned error: 401 2024-04-16T11:10:17.0110775Z ami-id: 2024-04-16T11:10:17.0159131Z curl: (22) The requested URL returned error: 401 2024-04-16T11:10:17.0167378Z instance-id: 2024-04-16T11:10:17.0219464Z curl: (22) The requested URL returned error: 401 ``` Pull Request resolved: https://github.com/pytorch/pytorch/pull/124171 Approved by: https://github.com/malfet, https://github.com/ZainRizvi, https://github.com/zxiiro
|
diff --git a/.github/actions/setup-linux/action.yml b/.github/actions/setup-linux/action.yml
index b1c9081fe5..98c796e0ca 100644
--- a/.github/actions/setup-linux/action.yml
+++ b/.github/actions/setup-linux/action.yml
@@ -15,10 +15,12 @@ runs:
category=$1
# If it is GCP runner (runner name contains gcp), do not run this
runner_name_str=${{ runner.name }}
- if [[ $runner_name_str != *"gcp"* ]]; then
- curl -fsSL "http://169.254.169.254/latest/meta-data/${category}"
- else
+ if [[ -f /.inarc ]]; then
+ echo "ARC Runner, no info on ec2 metadata"
+ elif [[ $runner_name_str == *"gcp"* ]]; then
echo "Runner is from Google Cloud Platform, No info on ec2 metadata"
+ else
+ curl -fsSL "http://169.254.169.254/latest/meta-data/${category}"
fi
}
echo "ami-id: $(get_ec2_metadata ami-id)"
|
2.41.0
|
cc466751b2723eb913fd3148b4f054189bbf1ab
|
Wed, 17 Apr 2024 09:44:07 +0000
|
[PATCH 0243/1000] Add bfloat16 support to binary_cross_entropy for CPU (#123823)
|
Fixes #123715 As the title stated. But, maybe we should pay attention to this https://github.com/pytorch/pytorch/pull/33206, which removed the half support for cpu about 4 years ago. Pull Request resolved: https://github.com/pytorch/pytorch/pull/123823 Approved by: https://github.com/Skylion007, https://github.com/malfet
|
diff --git a/aten/src/ATen/native/Loss.cpp b/aten/src/ATen/native/Loss.cpp
index ea0cb5419a..e21d9f6008 100644
--- a/aten/src/ATen/native/Loss.cpp
+++ b/aten/src/ATen/native/Loss.cpp
@@ -273,27 +273,30 @@ Tensor& binary_cross_entropy_out_cpu(const Tensor& input, const Tensor& target,
.add_owned_const_input(at::squeeze(target))
.build();
- AT_DISPATCH_FLOATING_TYPES(loss.scalar_type(), "binary_cross_entropy", [&] {
- at::native::cpu_kernel(
- iter,
- [] (scalar_t input_val, scalar_t target_val) {
+ AT_DISPATCH_FLOATING_TYPES_AND2(
+ ScalarType::Half,
+ ScalarType::BFloat16,
+ loss.scalar_type(),
+ "binary_cross_entropy",
+ [&] {
+ at::native::cpu_kernel(
+ iter, [](scalar_t input_val, scalar_t target_val) {
TORCH_CHECK(
(input_val >= 0) && (input_val <= 1),
- "all elements of input should be between 0 and 1"
- );
+ "all elements of input should be between 0 and 1");
TORCH_CHECK(
(target_val >= 0) && (target_val <= 1),
- "all elements of target should be between 0 and 1"
- );
+ "all elements of target should be between 0 and 1");
// Binary cross entropy tensor is defined by the equation:
// L = -w (y ln(x) + (1-y) ln(1-x))
- return (target_val - scalar_t(1))
- * std::max(scalar_t(std::log1p(-input_val)), scalar_t(-100))
- - target_val * std::max(scalar_t(std::log(input_val)), scalar_t(-100));
- }
- );
- });
+ return (target_val - scalar_t(1)) *
+ std::max(scalar_t(std::log1p(-input_val)), scalar_t(-100)) -
+ target_val *
+ std::max(scalar_t(std::log(input_val)), scalar_t(-100));
+ });
+ });
+
if (weight.defined()) {
loss.mul_(weight);
}
@@ -328,21 +331,25 @@ Tensor& binary_cross_entropy_backward_out_cpu(const Tensor& grad, const Tensor&
.add_owned_const_input(at::squeeze(target))
.build();
- AT_DISPATCH_FLOATING_TYPES(grad_input.scalar_type(), "binary_cross_entropy_backward", [&] {
- at::native::cpu_kernel(
- iter,
- [] (scalar_t grad_val, scalar_t input_val, scalar_t target_val) {
+ AT_DISPATCH_FLOATING_TYPES_AND2(
+ ScalarType::Half,
+ ScalarType::BFloat16,
+ grad_input.scalar_type(),
+ "binary_cross_entropy_backward",
+ [&] {
+ at::native::cpu_kernel(
+ iter,
+ [](scalar_t grad_val, scalar_t input_val, scalar_t target_val) {
// The gradient is the partial derivative of BCELoss
// with respect to x
// d(L)/d(x) = -w (y - x) / (x - x^2)
- return grad_val * (input_val - target_val)
- / (scalar_t(std::max(
+ return grad_val * (input_val - target_val) /
+ (scalar_t(std::max(
(scalar_t(1) - input_val) * input_val,
- scalar_t(EPSILON)
- )));
- }
- );
- });
+ scalar_t(EPSILON))));
+ });
+ });
+
if (weight.defined()) {
grad_input.mul_(weight);
}
diff --git a/test/test_mps.py b/test/test_mps.py
index 5cf03cefb6..fe73683fd7 100644
--- a/test/test_mps.py
+++ b/test/test_mps.py
@@ -982,6 +982,9 @@ def mps_ops_modifier(ops):
# Unsupported
# input types 'tensor<1x3x9x9xf16>' and 'tensor<1xf32>' are not broadcast compatible
'nn.functional.avg_pool2d': [torch.float16],
+ # input types 'tensor<f32>' and 'tensor<1xf16>' are not broadcast compatible
+ # Refer to the issue please: https://github.com/pytorch/pytorch/issues/124252
+ 'nn.functional.binary_cross_entropy': [torch.float16]
}
def addDecorator(op, d) -> None:
diff --git a/torch/testing/_internal/common_methods_invocations.py b/torch/testing/_internal/common_methods_invocations.py
index c8292207e6..54987ff26b 100644
--- a/torch/testing/_internal/common_methods_invocations.py
+++ b/torch/testing/_internal/common_methods_invocations.py
@@ -15058,7 +15058,7 @@ op_db: List[OpInfo] = [
"nn.functional.binary_cross_entropy",
aten_backward_name='binary_cross_entropy_backward',
sample_inputs_func=sample_inputs_binary_cross_entropy,
- dtypes=floating_types(),
+ dtypes=floating_types_and(torch.float16, torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),
supports_out=False,
gradcheck_fast_mode=False,
|
2.41.0
|
7ad630f5d404858dd19d6e7f79ab3573dbb7c0a
|
Wed, 17 Apr 2024 11:31:32 +0000
|
[PATCH 0244/1000] Revert "Dont precompile already seen keys, limit epilogue choices (#122642)"
|
This reverts commit 050051f412e50d98d506adf0d05aa6e4ceab54bd. Reverted https://github.com/pytorch/pytorch/pull/122642 on behalf of https://github.com/DanilBaibak due to Broken trunk ([comment](https://github.com/pytorch/pytorch/pull/124030#issuecomment-2061044960))
|
diff --git a/test/hi.py b/test/hi.py
deleted file mode 100644
index e69de29bb2..0000000000
diff --git a/test/inductor/test_max_autotune.py b/test/inductor/test_max_autotune.py
index 5745f4a405..37b5d84cad 100644
--- a/test/inductor/test_max_autotune.py
+++ b/test/inductor/test_max_autotune.py
@@ -445,22 +445,6 @@ class TestMaxAutotune(TestCase):
fn_c = torch.compile(mode="max-autotune-no-cudagraphs")(fn)
self.assertEqual(counters["inductor"]["select_algorithm_precompile"], 0)
- @config.patch(autotune_local_cache=False, autotune_remote_cache=False)
- def test_precompilations(self):
- def fn(a, b, c):
- a = (a @ b) @ c
- a, b, c = (t.to(torch.float16) for t in [a, b, c])
- return (a @ b) @ c
-
- fn_c = torch.compile(mode="max-autotune-no-cudagraphs")(fn)
- inputs = [torch.rand([256, 256], device="cuda") for _ in range(3)]
-
- self.assertEqual(fn(*inputs), fn_c(*inputs), atol=1e-2, rtol=1e-2)
-
- from torch._dynamo.utils import counters
-
- self.assertEqual(counters["inductor"]["select_algorithm_precompile"], 2)
-
def test_cat_addmm(self):
def fn(a: torch.Tensor, b: torch.Tensor, c: torch.Tensor):
return torch.cat(
diff --git a/torch/_inductor/codecache.py b/torch/_inductor/codecache.py
index bb912bf16a..30c15bc64c 100644
--- a/torch/_inductor/codecache.py
+++ b/torch/_inductor/codecache.py
@@ -301,7 +301,7 @@ class PersistentCache(CacheBase):
return hit
if config.max_autotune or config.max_autotune_gemm:
- local_cache = self.get_local_cache() if config.autotune_local_cache else {}
+ local_cache = self.get_local_cache()
# check local cache first since it is data specific to the current machine
if (
not check_cache(local_cache)
diff --git a/torch/_inductor/config.py b/torch/_inductor/config.py
index ae83b9533f..d7141639d0 100644
--- a/torch/_inductor/config.py
+++ b/torch/_inductor/config.py
@@ -302,9 +302,6 @@ benchmark_multi_templates = (
os.environ.get("TORCHINDUCTOR_BENCHMARK_MULTI_TEMPLATES", "0") == "1"
)
-# Take how many of the top triton kernels to benchmark epilogue
-max_epilogue_benchmarked_choices = 3
-
# how many nodes to allow into a single fusion
max_fusion_size = 64
diff --git a/torch/_inductor/scheduler.py b/torch/_inductor/scheduler.py
index 4ea60b73f6..deb6ed05eb 100644
--- a/torch/_inductor/scheduler.py
+++ b/torch/_inductor/scheduler.py
@@ -1835,8 +1835,6 @@ class Scheduler:
min_ms_fused = float("inf")
ms_fused_choice = None
- triton_choices = 0
-
for choice, unfused_time in choice_timings.items():
if not isinstance(choice, torch._inductor.ir.TritonTemplateCallerBase):
continue
@@ -1844,10 +1842,6 @@ class Scheduler:
if unfused_time >= ms1 + ms2:
continue
- triton_choices += 1
- if triton_choices > config.max_epilogue_benchmarked_choices:
- break
-
# TODO - parallel compile triton templates
# TODO - should prune/skip choices that are not within certain % of best choice
with node1.node.swap_as_triton_caller(choice):
diff --git a/torch/_inductor/select_algorithm.py b/torch/_inductor/select_algorithm.py
index d7361225ee..eedb0995c3 100644
--- a/torch/_inductor/select_algorithm.py
+++ b/torch/_inductor/select_algorithm.py
@@ -866,15 +866,6 @@ class ErrorFromChoice(RuntimeError):
class AlgorithmSelectorCache(PersistentCache):
- def __init__(self, *args, **kwargs):
- super().__init__(*args, **kwargs)
-
- # the autotuning will get occur in the scheduler, so there is
- # no guarantee that the first lowering for a given key will also be the
- # first to benchmark it. share a single precompilation function for all lowerings
- # of a particular key
- self.precompile_cache: Dict[str, Callable[[], None]] = {}
-
def __call__(
self,
name,
@@ -911,8 +902,6 @@ class AlgorithmSelectorCache(PersistentCache):
def make_benchmark_fn():
return self.make_benchmark_fn(choices, input_nodes, layout, input_gen_fns)
- inputs_key = repr([self.key_of(x) for x in input_nodes])
-
def precompile(choices):
if (
precompilation_timeout_seconds is None
@@ -936,7 +925,7 @@ class AlgorithmSelectorCache(PersistentCache):
timings = self.lookup(
choices,
name,
- inputs_key,
+ repr([self.key_of(x) for x in input_nodes]),
benchmark=None,
)
@@ -946,12 +935,6 @@ class AlgorithmSelectorCache(PersistentCache):
if timings:
return no_op
- precompile_key = (
- f"{name}: {inputs_key} : {torch.get_float32_matmul_precision()}"
- )
- if precompile_func := self.precompile_cache.get(precompile_key):
- return precompile_func
-
executor = ThreadPoolExecutor(max_workers=num_workers)
futures = executor.map(
lambda c: c.precompile(),
@@ -959,9 +942,7 @@ class AlgorithmSelectorCache(PersistentCache):
timeout=precompilation_timeout_seconds,
)
- @functools.lru_cache(None)
def wait_on_futures():
- counters["inductor"]["select_algorithm_precompile"] += 1
try:
iterator = iter(futures)
while True:
@@ -977,11 +958,8 @@ class AlgorithmSelectorCache(PersistentCache):
)
except StopIteration:
pass
-
executor.shutdown(wait=True)
- self.precompile_cache[precompile_key] = wait_on_futures
-
return wait_on_futures
def autotune(choices):
@@ -1002,7 +980,7 @@ class AlgorithmSelectorCache(PersistentCache):
timings = self.lookup(
choices,
name,
- inputs_key,
+ repr([self.key_of(x) for x in input_nodes]),
autotune,
)
autotune_elapse = time.time() - autotune_start_ts
|
2.41.0
|
f89f565bb17bcb70cb6938540af0cd154c8344a
|
Wed, 17 Apr 2024 11:31:33 +0000
|
[PATCH 0245/1000] Revert "Re-land precompile triton templates (#124030)"
|
This reverts commit d68196e7ef5eb8f62064ef70c75032f4d8b4a4fa. Reverted https://github.com/pytorch/pytorch/pull/124030 on behalf of https://github.com/DanilBaibak due to Broken trunk ([comment](https://github.com/pytorch/pytorch/pull/124030#issuecomment-2061044960))
|
diff --git a/test/inductor/test_max_autotune.py b/test/inductor/test_max_autotune.py
index 37b5d84cad..d1f074de51 100644
--- a/test/inductor/test_max_autotune.py
+++ b/test/inductor/test_max_autotune.py
@@ -328,8 +328,7 @@ class TestMaxAutotune(TestCase):
inputs: str,
benchmark: Callable[[Any], Dict[ChoiceCaller, float]],
) -> Dict[ChoiceCaller, float]:
- if benchmark is not None:
- return benchmark(choices)
+ return benchmark(choices)
asc = AlgorithmSelectorCache()
@@ -427,24 +426,6 @@ class TestMaxAutotune(TestCase):
FileCheck().check_not("extern_kernels.convolution").run(code[0])
self.assertEqual(conv1x1(input_tensor), out, atol=1e-2, rtol=0)
- def test_filled_cache_precompile(self):
- def fn(a, b, c):
- a = (a @ b) @ c
- a, b, c = (t.to(torch.float16) for t in [a, b, c])
- return (a @ b) @ c
-
- fn_c = torch.compile(mode="max-autotune-no-cudagraphs")(fn)
- inputs = [torch.rand([256, 256], device="cuda") for _ in range(3)]
- from torch._dynamo.utils import counters
-
- self.assertEqual(fn(*inputs), fn_c(*inputs), atol=1e-2, rtol=1e-2)
-
- torch._dynamo.reset()
- counters.clear()
-
- fn_c = torch.compile(mode="max-autotune-no-cudagraphs")(fn)
- self.assertEqual(counters["inductor"]["select_algorithm_precompile"], 0)
-
def test_cat_addmm(self):
def fn(a: torch.Tensor, b: torch.Tensor, c: torch.Tensor):
return torch.cat(
diff --git a/test/inductor/test_select_algorithm.py b/test/inductor/test_select_algorithm.py
index 48713bb63e..3b76651fcc 100644
--- a/test/inductor/test_select_algorithm.py
+++ b/test/inductor/test_select_algorithm.py
@@ -19,10 +19,8 @@ aten = torch.ops.aten
def patches(fn):
- def skip_cache(self, choices, name, key, benchmark):
- if benchmark is None:
- return {}
- return benchmark(choices)
+ def skip_cache(self, choices, name, key, generate):
+ return generate(choices)
for patcher in [
dynamo_config.patch(verbose=True),
diff --git a/torch/_inductor/autotune_process.py b/torch/_inductor/autotune_process.py
index 8c44167bc3..790ec9d60e 100644
--- a/torch/_inductor/autotune_process.py
+++ b/torch/_inductor/autotune_process.py
@@ -502,6 +502,7 @@ class TestBenchmarkRequest(BenchmarkRequest):
class TritonBenchmarkRequest(BenchmarkRequest):
# Important: Instances of this class have to be serializable
# across process boundaries. Do not put CUDA Tensors in here!
+
def __init__(
self,
kernel_name: str,
@@ -544,8 +545,6 @@ class TritonBenchmarkRequest(BenchmarkRequest):
if "warmup" in inspect.signature(run_method).parameters:
warmup_arg["warmup"] = False
- from torch._C import _cuda_getCurrentRawStream as get_raw_stream
-
if torch.version.hip and self.matrix_instr_nonkdim != 0:
return functools.partial(
run_method,
@@ -554,7 +553,9 @@ class TritonBenchmarkRequest(BenchmarkRequest):
*self.extra_args,
grid=self.grid,
**warmup_arg,
- stream=get_raw_stream(self.output_tensor_meta.device.index),
+ num_stages=self.num_stages,
+ num_warps=self.num_warps,
+ matrix_instr_nonkdim=self.matrix_instr_nonkdim,
)
else:
return functools.partial(
@@ -564,13 +565,10 @@ class TritonBenchmarkRequest(BenchmarkRequest):
*self.extra_args,
grid=self.grid,
**warmup_arg,
- stream=get_raw_stream(self.output_tensor_meta.device.index),
+ num_stages=self.num_stages,
+ num_warps=self.num_warps,
)
- def precompile(self):
- mod = PyCodeCache.load_by_key_path(self.module_cache_key, self.module_path)
- getattr(mod, self.kernel_name).precompile()
-
def __str__(self) -> str:
return f"{self.kernel_name=}, {self.module_path=}, {self.module_cache_key=}"
diff --git a/torch/_inductor/codecache.py b/torch/_inductor/codecache.py
index 30c15bc64c..baf5869b09 100644
--- a/torch/_inductor/codecache.py
+++ b/torch/_inductor/codecache.py
@@ -262,7 +262,7 @@ class PersistentCache(CacheBase):
choices: List[ChoiceCaller],
op: str,
inputs: str,
- benchmark: Optional[Callable[[Any], Dict[ChoiceCaller, float]]],
+ benchmark: Callable[[Any], Dict[ChoiceCaller, float]],
) -> Dict[ChoiceCaller, float]:
"""
Check to see if we have benchmarked the given choice callers. For each
@@ -270,7 +270,7 @@ class PersistentCache(CacheBase):
1. Check global_cache[op][inputs][choice][precision], return benchmark if cached.
2. Check local_cache[op][inputs][choice][precision], return benchmark if cached.
- 3. If benchmark is not None:
+ 3.
a. `max_autotune_gemm=True`: benchmark the choice, update
local_cache[op][inputs][choice], and return the benchmark.
b. `max_autotune_gemm=False`: don't benchmark the choice, return nothing.
@@ -303,13 +303,9 @@ class PersistentCache(CacheBase):
if config.max_autotune or config.max_autotune_gemm:
local_cache = self.get_local_cache()
# check local cache first since it is data specific to the current machine
- if (
- not check_cache(local_cache)
- and not (
- use_global_cache()
- and check_cache(self.get_global_cache(), callback=log_stats)
- )
- and benchmark is not None
+ if not check_cache(local_cache) and not (
+ use_global_cache()
+ and check_cache(self.get_global_cache(), callback=log_stats)
):
try:
# re-benchmark everything to try to get consistent numbers from the same machine
diff --git a/torch/_inductor/codegen/triton_utils.py b/torch/_inductor/codegen/triton_utils.py
index c8a7d92e3c..c95e699bcd 100644
--- a/torch/_inductor/codegen/triton_utils.py
+++ b/torch/_inductor/codegen/triton_utils.py
@@ -65,32 +65,6 @@ def signature_to_meta(
}
-def is_unaligned_buffer(arg: TensorArg):
- buf_name = arg.buffer
- if buf_name in V.graph.graph_inputs:
- return not config.assume_aligned_inputs
-
- if buf_name in V.graph.constants:
- # all constants are assumed to be aligned
- return False
-
- if V.graph.scheduler:
- layout = V.graph.scheduler.get_buffer_layout(buf_name)
- else:
- buffer = V.graph.get_buffer(buf_name)
- # output arg
- if not buffer:
- assert buf_name == V.kernel.output_node.name
- layout = V.kernel.output_node.layout
- else:
- layout = buffer.get_layout()
-
- if isinstance(layout, torch._inductor.ir.NonOwningLayout):
- return not layout.maybe_guard_aligned()
- else:
- return False
-
-
def config_of(
args: List[KernelArgType],
*,
@@ -109,7 +83,9 @@ def config_of(
offset_aligned = V.graph.sizevars.statically_known_multiple_of(
x.offset * x.dtype.itemsize, alignment # type: ignore[arg-type]
)
- return offset_aligned and not is_unaligned_buffer(x)
+ return offset_aligned and not V.graph.scheduler.is_unaligned_buffer(
+ x.buffer
+ )
else:
return False
if isinstance(x, SizeArg):
diff --git a/torch/_inductor/scheduler.py b/torch/_inductor/scheduler.py
index deb6ed05eb..f37580a59b 100644
--- a/torch/_inductor/scheduler.py
+++ b/torch/_inductor/scheduler.py
@@ -2495,9 +2495,18 @@ class Scheduler:
self.flush()
- def get_buffer_layout(self, buf_name: str) -> ir.Layout:
+ def is_unaligned_buffer(self, buf_name):
+ if buf_name in V.graph.graph_inputs:
+ return not config.assume_aligned_inputs
+ if buf_name in V.graph.constants:
+ # all constants are assumed to be aligned
+ return False
node = self.name_to_node[buf_name]
- return node.node.get_layout()
+ layout = node.node.get_layout()
+ if isinstance(layout, ir.NonOwningLayout):
+ return not layout.maybe_guard_aligned()
+ else:
+ return False
class BaseScheduling:
diff --git a/torch/_inductor/select_algorithm.py b/torch/_inductor/select_algorithm.py
index eedb0995c3..75deeaf5e3 100644
--- a/torch/_inductor/select_algorithm.py
+++ b/torch/_inductor/select_algorithm.py
@@ -94,7 +94,7 @@ class TritonTemplateKernel(TritonKernel):
grid_fn,
meta,
call_sizes,
- use_jit=False,
+ use_jit=True,
prefix_args=0,
suffix_args=0,
epilogue_fn=identity,
@@ -153,8 +153,8 @@ class TritonTemplateKernel(TritonKernel):
argdefs, _, signature = self.args.python_argdefs()
triton_meta = {
"signature": signature_to_meta(signature, size_dtype=self.index_dtype),
- "device": self.output_node.get_device().index,
- "device_type": self.output_node.get_device().type,
+ "device": V.graph.scheduler.current_device.index,
+ "device_type": V.graph.scheduler.current_device.type,
"constants": {},
}
triton_meta["configs"] = [config_of(signature)]
@@ -554,7 +554,7 @@ class TritonTemplate(KernelTemplate):
), TritonTemplateKernel(
kernel_name=kernel_name,
output_node=fake_out,
- use_jit=False,
+ use_jit=True,
**kernel_options,
) as kernel:
try:
@@ -740,10 +740,6 @@ class TritonTemplateCaller(ir.TritonTemplateCallerBase):
assert self.bmreq is not None
return self.bmreq.benchmark(*args, output_tensor=out)
- def precompile(self):
- assert self.bmreq is not None
- self.bmreq.precompile()
-
def __str__(self):
return f"TritonTemplateCaller({self.bmreq.module_path}, {self.debug_extra})"
@@ -885,7 +881,6 @@ class AlgorithmSelectorCache(PersistentCache):
# TODO(nmacchioni): remove once CI tests are fixed
choices = [choice for choice in choices if choice is not None]
-
if len(choices) == 0:
raise RuntimeError(
"No choices to select, please consider adding ATEN into max_autotune_gemm_backends "
@@ -921,20 +916,6 @@ class AlgorithmSelectorCache(PersistentCache):
num_workers,
)
- # check local and global cache before precompiling
- timings = self.lookup(
- choices,
- name,
- repr([self.key_of(x) for x in input_nodes]),
- benchmark=None,
- )
-
- def no_op(*args, **kwargs):
- return
-
- if timings:
- return no_op
-
executor = ThreadPoolExecutor(max_workers=num_workers)
futures = executor.map(
lambda c: c.precompile(),
|
2.41.0
|
dc15b684980c5b7f964a797dbee7915399a089c
|
Wed, 17 Apr 2024 11:47:02 +0000
|
[PATCH 0246/1000] Revert "[sparse] Add fast semi-structured spasification kernels (#122350)"
|
This reverts commit 14b2273b0c58b4000e10b2e441341eeafb7dd2f6. Reverted https://github.com/pytorch/pytorch/pull/122350 on behalf of https://github.com/DanilBaibak due to Broken trunk ([comment](https://github.com/pytorch/pytorch/pull/122350#issuecomment-2061070350))
|
diff --git a/aten/src/ATen/native/native_functions.yaml b/aten/src/ATen/native/native_functions.yaml
index 3a14d19ee5..6e96a8a6aa 100644
--- a/aten/src/ATen/native/native_functions.yaml
+++ b/aten/src/ATen/native/native_functions.yaml
@@ -3342,18 +3342,6 @@
dispatch:
CUDA: _cslt_sparse_mm_search
|
- dispatch:
|
efcb6c718c6954df2157d93989ec7ed1821aafe
|
Wed, 17 Apr 2024 12:22:50 +0000
|
[PATCH 0247/1000] Fix wrong ufmt exclusions in `.lintrunner.toml` (#124135)
|
Part of: #123062 In this pull request(#123809), there were some exclusions that should have been removed, but weren't. Pull Request resolved: https://github.com/pytorch/pytorch/pull/124135 Approved by: https://github.com/ezyang
|
diff --git a/.lintrunner.toml b/.lintrunner.toml
index 9e83a8b96e..0bbac77322 100644
--- a/.lintrunner.toml
+++ b/.lintrunner.toml
@@ -1016,118 +1016,6 @@ exclude_patterns = [
'test/_nvfuser/test_torchscript.py',
'test/delete.py',
'test/expect/__init__.py',
- 'test/jit/__init__.py',
- 'test/jit/_imported_class_test/__init__.py',
- 'test/jit/_imported_class_test/bar.py',
- 'test/jit/_imported_class_test/foo.py',
- 'test/jit/_imported_class_test/very/__init__.py',
- 'test/jit/_imported_class_test/very/very/__init__.py',
- 'test/jit/_imported_class_test/very/very/nested.py',
- 'test/jit/fixtures_srcs/__init__.py',
- 'test/jit/fixtures_srcs/fixtures_src.py',
- 'test/jit/fixtures_srcs/generate_models.py',
- 'test/jit/fixtures_srcs/test_upgrader_models_generation.py',
- 'test/jit/myexception.py',
- 'test/jit/test_alias_analysis.py',
- 'test/jit/test_async.py',
- 'test/jit/test_aten_pow.py',
- 'test/jit/test_attr.py',
- 'test/jit/test_autodiff.py',
- 'test/jit/test_autodiff_subgraph_slicing.py',
- 'test/jit/test_await.py',
- 'test/jit/test_backend_nnapi.py',
- 'test/jit/test_backends.py',
- 'test/jit/test_batch_mm.py',
- 'test/jit/test_builtins.py',
- 'test/jit/test_class_type.py',
- 'test/jit/test_complex.py',
- 'test/jit/test_complexity.py',
- 'test/jit/test_convert_activation.py',
- 'test/jit/test_cuda.py',
- 'test/jit/test_custom_operators.py',
- 'test/jit/test_data_parallel.py',
- 'test/jit/test_dataclasses.py',
- 'test/jit/test_dce.py',
- 'test/jit/test_device_analysis.py',
- 'test/jit/test_dtype_analysis.py',
- 'test/jit/test_enum.py',
- 'test/jit/test_exception.py',
- 'test/jit/test_freezing.py',
- 'test/jit/test_functional_blocks.py',
- 'test/jit/test_fuser_common.py',
- 'test/jit/test_graph_rewrite_passes.py',
- 'test/jit/test_hash.py',
- 'test/jit/test_hooks.py',
- 'test/jit/test_hooks_modules.py',
- 'test/jit/test_ignorable_args.py',
- 'test/jit/test_ignore_context_manager.py',
- 'test/jit/test_isinstance.py',
- 'test/jit/test_jit_utils.py',
- 'test/jit/test_list_dict.py',
- 'test/jit/test_logging.py',
- 'test/jit/test_misc.py',
- 'test/jit/test_models.py',
- 'test/jit/test_module_apis.py',
- 'test/jit/test_module_containers.py',
- 'test/jit/test_module_interface.py',
- 'test/jit/test_modules.py',
- 'test/jit/test_op_decompositions.py',
- 'test/jit/test_optimize_for_mobile_preserve_debug_info.py',
- 'test/jit/test_parametrization.py',
- 'test/jit/test_pdt.py',
- 'test/jit/test_peephole.py',
- 'test/jit/test_profiler.py',
- 'test/jit/test_python_bindings.py',
- 'test/jit/test_python_builtins.py',
- 'test/jit/test_python_ir.py',
- 'test/jit/test_recursive_script.py',
- 'test/jit/test_remove_mutation.py',
- 'test/jit/test_save_load.py',
- 'test/jit/test_save_load_for_op_version.py',
- 'test/jit/test_script_profile.py',
- 'test/jit/test_scriptmod_ann.py',
- 'test/jit/test_slice.py',
- 'test/jit/test_sparse.py',
- 'test/jit/test_string_formatting.py',
- 'test/jit/test_symbolic_shape_analysis.py',
- 'test/jit/test_tensor_creation_ops.py',
- 'test/jit/test_tensor_methods.py',
- 'test/jit/test_torchbind.py',
- 'test/jit/test_tracer.py',
- 'test/jit/test_type_sharing.py',
- 'test/jit/test_types.py',
- 'test/jit/test_typing.py',
- 'test/jit/test_union.py',
- 'test/jit/test_unsupported_ops.py',
- 'test/jit/test_upgraders.py',
- 'test/jit/test_warn.py',
- 'test/jit/test_with.py',
- 'test/jit/xnnpack/test_xnnpack_delegate.py',
- 'test/jit_hooks/model.py',
- 'test/lazy/__init__.py',
- 'test/lazy/test_bindings.py',
- 'test/lazy/test_debug_util.py',
- 'test/lazy/test_extract_compiled_graph.py',
- 'test/lazy/test_meta_kernel.py',
- 'test/lazy/test_reuse_ir.py',
- 'test/lazy/test_step_closures.py',
- 'test/lazy/test_ts_opinfo.py',
- 'test/linear.py',
- 'test/load_torchscript_model.py',
- 'test/mkl_verbose.py',
- 'test/mkldnn_verbose.py',
- 'test/nn/test_convolution.py',
- 'test/nn/test_dropout.py',
- 'test/nn/test_embedding.py',
- 'test/nn/test_init.py',
- 'test/nn/test_lazy_modules.py',
- 'test/nn/test_load_state_dict.py',
- 'test/nn/test_module_hooks.py',
- 'test/nn/test_multihead_attention.py',
- 'test/nn/test_packed_sequence.py',
- 'test/nn/test_parametrization.py',
- 'test/nn/test_pooling.py',
- 'test/nn/test_pruning.py',
'test/quantization/__init__.py',
'test/quantization/core/__init__.py',
'test/quantization/core/experimental/apot_fx_graph_mode_ptq.py',
diff --git a/test/nn/test_lazy_modules.py b/test/nn/test_lazy_modules.py
index 78031a73f0..2de0dc656b 100644
--- a/test/nn/test_lazy_modules.py
+++ b/test/nn/test_lazy_modules.py
@@ -165,9 +165,18 @@ class TestLazyModules(TestCase):
with self.assertRaisesRegex(RuntimeError, "shape of an uninitialized"):
module.load_state_dict(lazy_module.state_dict())
- def _check_lazy_conv(self, cls, lazy_cls, func, init_args, input_shape,
- expected_weight_shape, expected_bias_shape, *forward_args, **forward_kwargs):
-
+ def _check_lazy_conv(
+ self,
+ cls,
+ lazy_cls,
+ func,
+ init_args,
+ input_shape,
+ expected_weight_shape,
+ expected_bias_shape,
+ *forward_args,
+ **forward_kwargs,
+ ):
module = lazy_cls(*init_args)
self.assertIsInstance(module.weight, UninitializedParameter)
if module.bias is not None:
@@ -372,8 +381,16 @@ class TestLazyModules(TestCase):
@suppress_warnings
def test_lazy_conv_transpose1d_kwargs(self):
- self._check_lazy_conv(nn.ConvTranspose1d, nn.LazyConvTranspose1d, torch.nn.functional.conv_transpose1d,
- (32, 2), (192, 16, 50), (16, 32, 2), (32,), output_size=(51,))
+ self._check_lazy_conv(
+ nn.ConvTranspose1d,
+ nn.LazyConvTranspose1d,
+ torch.nn.functional.conv_transpose1d,
+ (32, 2),
+ (192, 16, 50),
+ (16, 32, 2),
+ (32,),
+ output_size=(51,),
+ )
@suppress_warnings
def test_lazy_conv_transpose1d_pickle(self):
@@ -409,8 +426,16 @@ class TestLazyModules(TestCase):
@suppress_warnings
def test_lazy_conv_transpose2d_kwargs(self):
- self._check_lazy_conv(nn.ConvTranspose2d, nn.LazyConvTranspose2d, torch.nn.functional.conv_transpose2d,
- (32, 2), (192, 16, 8, 6), (16, 32, 2, 2), (32,), output_size=(9, 7))
+ self._check_lazy_conv(
+ nn.ConvTranspose2d,
+ nn.LazyConvTranspose2d,
+ torch.nn.functional.conv_transpose2d,
+ (32, 2),
+ (192, 16, 8, 6),
+ (16, 32, 2, 2),
+ (32,),
+ output_size=(9, 7),
+ )
@suppress_warnings
def test_lazy_conv_transpose2d_pickle(self):
@@ -446,8 +471,16 @@ class TestLazyModules(TestCase):
@suppress_warnings
def test_lazy_conv_transpose3d_kwargs(self):
- self._check_lazy_conv(nn.ConvTranspose3d, nn.LazyConvTranspose3d, torch.nn.functional.conv_transpose3d,
- (32, 2), (192, 16, 8, 7, 6), (16, 32, 2, 2, 2), (32,), output_size=(9, 8, 7))
+ self._check_lazy_conv(
+ nn.ConvTranspose3d,
+ nn.LazyConvTranspose3d,
+ torch.nn.functional.conv_transpose3d,
+ (32, 2),
+ (192, 16, 8, 7, 6),
+ (16, 32, 2, 2, 2),
+ (32,),
+ output_size=(9, 8, 7),
+ )
@suppress_warnings
def test_lazy_conv_transpose3d_pickle(self):
|
2.41.0
|
7dbfecd374a5b4363dab27bc90aa563285ce50c
|
Tue, 16 Apr 2024 10:57:00 -0700
|
[PATCH 0248/1000] Rename impl_abstract to register_fake, part 1/2 (#123937)
|
This PR: - adds a new torch.library.register_fake and deprecates torch.library.impl_abstract. The motivation is that we have a lot of confusion around the naming so we are going to align the naming with the actual subsystem (FakeTensor). - renames `m.impl_abstract_pystub("fbgemm_gpu.sparse_ops")` to `m.has_python_registration("fbgemm_gpu.sparse_ops")`. No deprecation here yet; I need to test how this works with static initialization. - Renames a bunch of internals to match (e.g. abstractimplpystub -> pystub) I'm scared to rename the Python-side internal APIs (e.g. torch._library.abstract_impl) because of torch.package concerns. I'll do that in its own isolated PR next just in case it causes problems. DEPRECATION NOTE: torch.library.impl_abstract was renamed to to torch.library.register_fake. Please use register_fake. We'll delete impl_abstract in a future version of PyTorch. Test Plan: - existing tests Pull Request resolved: https://github.com/pytorch/pytorch/pull/123937 Approved by: https://github.com/albanD
|
diff --git a/aten/src/ATen/core/MetaFallbackKernel.cpp b/aten/src/ATen/core/MetaFallbackKernel.cpp
index fe56568bbb..8523a55878 100644
--- a/aten/src/ATen/core/MetaFallbackKernel.cpp
+++ b/aten/src/ATen/core/MetaFallbackKernel.cpp
@@ -8,14 +8,14 @@ static void metaFallback(
const c10::OperatorHandle& op,
c10::DispatchKeySet dispatch_keys,
torch::jit::Stack* stack) {
- c10::Dispatcher::singleton().throwIfHasAbstractImplPyStub(op.operator_name());
+ c10::Dispatcher::singleton().throwIfHasPythonModule(op.operator_name());
TORCH_CHECK_NOT_IMPLEMENTED(
false,
op.operator_name(),
": attempted to run this operator with Meta tensors, but there was no ",
- "abstract impl or Meta kernel registered. You may have run into this message "
+ "fake impl or Meta kernel registered. You may have run into this message "
"while using an operator with PT2 compilation APIs (torch.compile/torch.export); "
- "in order to use this operator with those APIs you'll need to add an abstract impl."
+ "in order to use this operator with those APIs you'll need to add a fake impl."
"Please see the following doc for next steps: "
"https://docs.google.com/document/d/1_W62p8WJOQQUzPsJYa7s701JXt0qf2OfLub2sbkHOaU/edit");
}
diff --git a/aten/src/ATen/core/dispatch/Dispatcher.cpp b/aten/src/ATen/core/dispatch/Dispatcher.cpp
index a355bbe92f..6077ac8e34 100644
--- a/aten/src/ATen/core/dispatch/Dispatcher.cpp
+++ b/aten/src/ATen/core/dispatch/Dispatcher.cpp
@@ -266,24 +266,25 @@ void Dispatcher::deregisterDef_(
namespace {
-using AbstractImplPyStubsType = std::unordered_map<at::OperatorName, std::pair<const char*, const char*>>;
-AbstractImplPyStubsType& abstractImplPyStubsSingleton() {
- static AbstractImplPyStubsType _data;
+// Maps OperatorName to (python module name, description) tuple.
+using PythonModuleMapType = std::unordered_map<at::OperatorName, std::pair<const char*, const char*>>;
+PythonModuleMapType& pythonModulesSingleton() {
+ static PythonModuleMapType _data;
return _data;
}
}
-c10::optional<std::pair<const char*, const char*>> Dispatcher::getAbstractImplPyStub(OperatorName op_name) {
+c10::optional<std::pair<const char*, const char*>> Dispatcher::getPyStub(OperatorName op_name) {
std::lock_guard<std::mutex> lock(guard_->mutex);
- auto found = abstractImplPyStubsSingleton().find(op_name);
- if (found == abstractImplPyStubsSingleton().end()) {
+ auto found = pythonModulesSingleton().find(op_name);
+ if (found == pythonModulesSingleton().end()) {
return c10::nullopt;
}
return found->second;
}
-RegistrationHandleRAII Dispatcher::registerAbstractImplPyStub(
+RegistrationHandleRAII Dispatcher::registerPythonModule(
const OperatorName& op_name,
const char* pymodule,
const char* context
@@ -292,28 +293,28 @@ RegistrationHandleRAII Dispatcher::registerAbstractImplPyStub(
// If there are duplicates, we just let it through and warn about it.
// Throwing an error during static initialization causes a crash that
// doesn't give any sign of what happened.
- auto found = abstractImplPyStubsSingleton().find(op_name);
- if (found != abstractImplPyStubsSingleton().end()) {
+ auto found = pythonModulesSingleton().find(op_name);
+ if (found != pythonModulesSingleton().end()) {
TORCH_WARN(
- "Tried to register an abstract impl pystub for ", op_name, " ",
+ "Tried to register an python registration stub (pystub) for ", op_name, " ",
"that specifies the Python module ", pymodule, " "
"but there already was a pystub that specifies the Python module ",
found->second.first, ". We will override the existing pystub.");
}
- abstractImplPyStubsSingleton()[op_name] = std::make_pair(pymodule, context);
+ pythonModulesSingleton()[op_name] = std::make_pair(pymodule, context);
return RegistrationHandleRAII([guard = this->guard_, op_name] {
std::lock_guard<std::mutex> lock(guard->mutex);
if (!guard->alive.load()) {
return;
}
- abstractImplPyStubsSingleton().erase(op_name);
+ pythonModulesSingleton().erase(op_name);
});
}
-void Dispatcher::throwIfHasAbstractImplPyStub(OperatorName op_name) {
+void Dispatcher::throwIfHasPythonModule(OperatorName op_name) {
std::lock_guard<std::mutex> lock(guard_->mutex);
- auto elt = abstractImplPyStubsSingleton().find(op_name);
- if (elt == abstractImplPyStubsSingleton().end()) {
+ auto elt = pythonModulesSingleton().find(op_name);
+ if (elt == pythonModulesSingleton().end()) {
return;
}
const char* pymodule = elt->second.first;
diff --git a/aten/src/ATen/core/dispatch/Dispatcher.h b/aten/src/ATen/core/dispatch/Dispatcher.h
index 020f9e8e6d..c6d336510c 100644
--- a/aten/src/ATen/core/dispatch/Dispatcher.h
+++ b/aten/src/ATen/core/dispatch/Dispatcher.h
@@ -224,17 +224,17 @@ public:
RegistrationHandleRAII registerImpl(OperatorName op_name, c10::optional<DispatchKey> dispatch_key, KernelFunction kernel, c10::optional<impl::CppSignature> cpp_signature, std::unique_ptr<FunctionSchema> inferred_function_schema, std::string debug);
/**
- * Given an operator, tells the Dispatcher that we have implemented an abstract impl
+ * Given an operator, tells the Dispatcher that we have implemented a fake impl
* for this op in the given Python module. Call this a "pystub".
*/
- RegistrationHandleRAII registerAbstractImplPyStub(const OperatorName& op_name, const char* pymodule, const char* context);
+ RegistrationHandleRAII registerPythonModule(const OperatorName& op_name, const char* pymodule, const char* context);
/**
- * Given an operator, throws if we have an abstract impl pystub.
+ * Given an operator, throws if we have a pystub.
*/
- void throwIfHasAbstractImplPyStub(OperatorName op_name);
+ void throwIfHasPythonModule(OperatorName op_name);
- c10::optional<std::pair<const char*, const char*>> getAbstractImplPyStub(OperatorName op_name);
+ c10::optional<std::pair<const char*, const char*>> getPyStub(OperatorName op_name);
/**
* Register a new operator by name.
diff --git a/aten/src/ATen/core/library.cpp b/aten/src/ATen/core/library.cpp
index 34412dd5fd..fd349da2f8 100644
--- a/aten/src/ATen/core/library.cpp
+++ b/aten/src/ATen/core/library.cpp
@@ -133,12 +133,12 @@ Library& Library::_def(c10::FunctionSchema&& schema, c10::OperatorName* out_name
}
switch (rv) {
case _RegisterOrVerify::REGISTER:
- if (impl_abstract_pystub_.has_value()) {
+ if (python_module_.has_value()) {
registrars_.emplace_back(
- c10::Dispatcher::singleton().registerAbstractImplPyStub(
+ c10::Dispatcher::singleton().registerPythonModule(
schema.operator_name(),
- impl_abstract_pystub_->first,
- impl_abstract_pystub_->second)
+ python_module_->first,
+ python_module_->second)
);
}
registrars_.emplace_back(
diff --git a/docs/source/library.rst b/docs/source/library.rst
index a17d318cc0..c5f991ca8e 100644
--- a/docs/source/library.rst
+++ b/docs/source/library.rst
@@ -18,8 +18,8 @@ Use :func:`torch.library.custom_op` to create new custom ops.
.. autofunction:: custom_op
-Extending custom ops created from C++
--------------------------------------
+Extending custom ops (created from Python or C++)
+-------------------------------------------------
Use the impl methods, such as :func:`torch.library.impl` and
func:`torch.library.impl_abstract`, to add implementations
@@ -27,6 +27,7 @@ for any operators (they may have been created using :func:`torch.library.custom_
via PyTorch's C++ operator registration APIs).
.. autofunction:: impl
+.. autofunction:: register_fake
.. autofunction:: impl_abstract
.. autofunction:: get_ctx
diff --git a/test/test_custom_ops.py b/test/test_custom_ops.py
index b794f189c2..03ca4a2f0e 100644
--- a/test/test_custom_ops.py
+++ b/test/test_custom_ops.py
@@ -1505,9 +1505,7 @@ class TestCustomOp(CustomOpTestCaseBase):
op(x)
x = torch.randn(3, device="meta")
- with self.assertRaisesRegex(
- NotImplementedError, "no abstract impl or Meta kernel"
- ):
+ with self.assertRaisesRegex(NotImplementedError, "no fake impl or Meta kernel"):
op(x)
@custom_ops.custom_op(f"{TestCustomOp.test_ns}::bar")
diff --git a/torch/_higher_order_ops/auto_functionalize.py b/torch/_higher_order_ops/auto_functionalize.py
index 55567ac4c9..89263bd65e 100644
--- a/torch/_higher_order_ops/auto_functionalize.py
+++ b/torch/_higher_order_ops/auto_functionalize.py
@@ -23,7 +23,7 @@ from torch.fx.experimental.proxy_tensor import (
# op. First, when FakeTensor sees this op:
# - If the schema says it returns nothing, we can generate a trivial
# FakeTensor rule for it (that returns nothing).
-# - Otherwise, the user needs to provide a FakeTensor rule (abstract impl)
+# - Otherwise, the user needs to provide a FakeTensor impl (fake impl)
#
# Next, when Python FunctionalTensor sees the op, it will functionalize
# it by emitting a call to an auto_functionalize(op, ["x"], {"x": ...})
diff --git a/torch/_library/abstract_impl.py b/torch/_library/abstract_impl.py
index f5eb7e3578..89634ec0eb 100644
--- a/torch/_library/abstract_impl.py
+++ b/torch/_library/abstract_impl.py
@@ -8,7 +8,7 @@ from torch._library.utils import Kernel, RegistrationHandle
class AbstractImplHolder:
- """A holder where one can register an abstract impl to."""
+ """A holder where one can register an fake impl to."""
def __init__(self, qualname: str):
self.qualname: str = qualname
@@ -16,45 +16,45 @@ class AbstractImplHolder:
self.lib: Optional[torch.library.Library] = None
def register(self, func: Callable, source: str) -> RegistrationHandle:
- """Register an abstract impl.
+ """Register an fake impl.
Returns a RegistrationHandle that one can use to de-register this
- abstract impl.
+ fake impl.
"""
if self.kernel is not None:
raise RuntimeError(
- f"impl_abstract(...): the operator {self.qualname} "
- f"already has an abstract impl registered at "
+ f"register_fake(...): the operator {self.qualname} "
+ f"already has an fake impl registered at "
f"{self.kernel.source}."
)
if torch._C._dispatch_has_kernel_for_dispatch_key(self.qualname, "Meta"):
raise RuntimeError(
- f"impl_abstract(...): the operator {self.qualname} "
+ f"register_fake(...): the operator {self.qualname} "
f"already has an DispatchKey::Meta implementation via a "
f"pre-existing torch.library or TORCH_LIBRARY registration. "
f"Please either remove that registration or don't call "
- f"impl_abstract."
+ f"register_fake."
)
if torch._C._dispatch_has_kernel_for_dispatch_key(
self.qualname, "CompositeImplicitAutograd"
):
raise RuntimeError(
- f"impl_abstract(...): the operator {self.qualname} "
+ f"register_fake(...): the operator {self.qualname} "
f"already has an implementation for this device type via a "
f"pre-existing registration to "
f"DispatchKey::CompositeImplicitAutograd."
- f"CompositeImplicitAutograd operators do not need an abstract "
+ f"CompositeImplicitAutograd operators do not need an fake "
f"impl; "
f"instead, the operator will decompose into its constituents "
f"and those "
- f"can have abstract impls defined on them."
+ f"can have fake impls defined on them."
)
# Store the kernel in this holder
self.kernel = Kernel(func, source)
- # Also register the abstract impl to Meta key
+ # Also register the fake impl to Meta key
if self.lib is None:
ns = self.qualname.split("::")[0]
self.lib = torch.library.Library(ns, "FRAGMENT")
@@ -116,7 +116,7 @@ def set_ctx_getter(ctx_getter):
class AbstractImplCtx:
"""
- Context object for writing abstract implementations for custom operators.
+ Context object for writing fake implementations for custom operators.
"""
def __init__(self, _fake_mode, _op):
@@ -133,7 +133,7 @@ class AbstractImplCtx:
def new_dynamic_size(self, *, min=0, max=None) -> torch.SymInt:
"""Constructs a new symint (symbolic int) representing a data-dependent value.
- This is useful for writing the abstract implementation (which is necessary
+ This is useful for writing the fake implementation (which is necessary
for torch.compile) for a CustomOp where an output Tensor has a size
that depends on the data of the input Tensors.
@@ -161,10 +161,10 @@ class AbstractImplCtx:
>>> lib = torch.library.Library("mymodule", "FRAGMENT")
>>> lib.define("mymodule::custom_nonzero(Tensor x) -> Tensor")
>>>
- >>> @torch.library.impl_abstract("mymodule::custom_nonzero")
- >>> def custom_nonzero_abstract(x):
+ >>> @torch.library.register_fake("mymodule::custom_nonzero")
+ >>> def _(x):
>>> # Number of nonzero-elements is data-dependent.
- >>> # Since we cannot peek at the data in an abstract impl,
+ >>> # Since we cannot peek at the data in an fake impl,
>>> # we use the ctx object to construct a new symint that
>>> # represents the data-dependent size.
>>> ctx = torch.library.get_ctx()
@@ -174,7 +174,7 @@ class AbstractImplCtx:
>>> return result
>>>
>>> @torch.library.impl(lib, "custom_nonzero", "CPU")
- >>> def custom_nonzero_cpu(x):
+ >>> def _(x):
>>> x_np = x.numpy()
>>> res = np.stack(np.nonzero(x_np), axis=1)
>>> return torch.tensor(res, device=x.device)
@@ -208,7 +208,7 @@ class AbstractImplCtx:
def to_fake_tensor(self, tensor: torch.Tensor):
"""
- Creates a fake tensor from a concrete tensor. Note: this is not needed for impl_abstract.
+ Creates a fake tensor from a concrete tensor. Note: this is not needed for register_fake.
This is useful for register_fake_class (which is necessary for torch.compile) for custom class.
Users need to implement a from_real method that takes a real custom object and creates a fake
diff --git a/torch/_library/simple_registry.py b/torch/_library/simple_registry.py
index 1213266096..64a543e99b 100644
--- a/torch/_library/simple_registry.py
+++ b/torch/_library/simple_registry.py
@@ -8,11 +8,11 @@ class SimpleLibraryRegistry:
The "simple" torch.library APIs are a higher-level API on top of the
raw PyTorch DispatchKey registration APIs that includes:
- - abstract impl
+ - fake impl
Registrations for these APIs do not go into the PyTorch dispatcher's
table because they may not directly involve a DispatchKey. For example,
- the abstract impl is a Python function that gets invoked by FakeTensor.
+ the fake impl is a Python function that gets invoked by FakeTensor.
Instead, we manage them here.
SimpleLibraryRegistry is a mapping from a fully qualified operator name
diff --git a/torch/_utils_internal.py b/torch/_utils_internal.py
index 8cf4b1826a..d12a499193 100644
--- a/torch/_utils_internal.py
+++ b/torch/_utils_internal.py
@@ -52,13 +52,13 @@ def resolve_library_path(path: str) -> str:
def throw_abstract_impl_not_imported_error(opname, module, context):
if module in sys.modules:
raise NotImplementedError(
- f"{opname}: We could not find the abstract impl for this operator. "
+ f"{opname}: We could not find the fake impl for this operator. "
)
else:
raise NotImplementedError(
- f"{opname}: We could not find the abstract impl for this operator. "
+ f"{opname}: We could not find the fake impl for this operator. "
f"The operator specified that you may need to import the '{module}' "
- f"Python module to load the abstract impl. {context}"
+ f"Python module to load the fake impl. {context}"
)
diff --git a/torch/csrc/utils/python_dispatch.cpp b/torch/csrc/utils/python_dispatch.cpp
index 2b905a96f6..5af8c438b1 100644
--- a/torch/csrc/utils/python_dispatch.cpp
+++ b/torch/csrc/utils/python_dispatch.cpp
@@ -794,7 +794,7 @@ void initDispatchBindings(PyObject* module) {
m.def(
"_dispatch_is_main_interpreter", []() { return isMainPyInterpreter(); });
m.def("_dispatch_pystub", [](const char* name, const char* overload) {
- return c10::Dispatcher::singleton().getAbstractImplPyStub(
+ return c10::Dispatcher::singleton().getPyStub(
c10::OperatorName(name, overload));
});
diff --git a/torch/library.h b/torch/library.h
index 793c875442..fcac0e8094 100644
--- a/torch/library.h
+++ b/torch/library.h
@@ -611,18 +611,23 @@ class TORCH_API Library final {
}
/// Declares that for all operators that are subsequently def'ed, their
- /// abstract impls may be found in the given Python module (pymodule).
- /// This registers some help text that is used if the abstract impl
+ /// fake impls may be found in the given Python module (pymodule).
+ /// This registers some help text that is used if the fake impl
/// cannot be found.
///
/// Args:
/// - pymodule: the python module
/// - context: We may include this in the error message.
- Library& impl_abstract_pystub(const char* pymodule, const char* context = "") {
- impl_abstract_pystub_ = {pymodule, context};
+ Library& set_python_module(const char* pymodule, const char* context = "") {
+ python_module_ = {pymodule, context};
return *this;
}
+ /// Deprecated; use set_python_module instead
+ Library& impl_abstract_pystub(const char* pymodule, const char* context = "") {
+ return set_python_module(pymodule, context);
+ }
+
/// Define an operator for a schema and then register an implementation for
/// it. This is typically what you would use if you aren't planning
/// on making use of the dispatcher to structure your operator
@@ -844,7 +849,7 @@ class TORCH_API Library final {
Kind kind_;
c10::optional<std::string> ns_;
c10::optional<c10::DispatchKey> dispatch_key_;
- c10::optional<std::pair<const char*, const char*>> impl_abstract_pystub_;
+ c10::optional<std::pair<const char*, const char*>> python_module_;
const char* file_;
uint32_t line_;
diff --git a/torch/library.py b/torch/library.py
index a7488c81d6..bf53fa874b 100644
--- a/torch/library.py
+++ b/torch/library.py
@@ -8,6 +8,7 @@ import inspect
import re
import contextlib
import sys
+import warnings
from torch._library.custom_ops import custom_op
@@ -17,6 +18,7 @@ __all__ = [
'define',
'fallthrough_kernel',
'impl_abstract',
+ 'register_fake',
'get_ctx',
'custom_op',
]
@@ -244,7 +246,7 @@ def define(qualname, schema, *, lib=None, tags=()):
This entrypoint defines the custom operator (the first step)
you must then perform the second step by calling various
``impl_*`` APIs, like :func:`torch.library.impl` or
- :func:`torch.library.impl_abstract`.
+ :func:`torch.library.register_fake`.
Args:
qualname (str): The qualified name for the operator. Should be
@@ -393,21 +395,35 @@ def _(lib: Library, name, dispatch_key=""):
return wrap
-
def impl_abstract(qualname, func=None, *, lib=None, _stacklevel=1):
- r"""Register an abstract implementation for this operator.
+ r"""This API was renamed to :func:`torch.library.register_fake` in PyTorch 2.4.
+ Please use that instead.
+ """
+ warnings.warn("torch.library.impl_abstract was renamed to "
+ "torch.library.register_fake. Please use that instead; "
+ "we will remove torch.library.impl_abstract in a future "
+ "version of PyTorch.",
+ DeprecationWarning, stacklevel=2)
+ return register_fake(qualname, func, lib=lib, _stacklevel=_stacklevel + 1)
+
+
+
+def register_fake(qualname, func=None, /, *, lib=None, _stacklevel=1):
+ r"""Register a FakeTensor implementation ("fake impl") for this operator.
+
+ Also sometimes known as a "meta kernel", "abstract impl".
- An "abstract implementation" specifies the behavior of this operator on
- Tensors that carry no data. Given some input Tensors with certain properties
- (sizes/strides/storage_offset/device), it specifies what the properties of
- the output Tensors are.
+ An "FakeTensor implementation" specifies the behavior of this operator on
+ Tensors that carry no data ("FakeTensor"). Given some input Tensors with
+ certain properties (sizes/strides/storage_offset/device), it specifies
+ what the properties of the output Tensors are.
- The abstract implementation has the same signature as the operator.
- It is run for both FakeTensors and meta tensors. To write an abstract
+ The FakeTensor implementation has the same signature as the operator.
+ It is run for both FakeTensors and meta tensors. To write a FakeTensor
implementation, assume that all Tensor inputs to the operator are
regular CPU/CUDA/Meta tensors, but they do not have storage, and
you are trying to return regular CPU/CUDA/Meta tensor(s) as output.
- The abstract implementation must consist of only PyTorch operations
+ The FakeTensor implementation must consist of only PyTorch operations
(and may not directly access the storage or data of any input or
intermediate Tensors).
@@ -426,8 +442,8 @@ def impl_abstract(qualname, func=None, *, lib=None, _stacklevel=1):
>>> "mylib::custom_linear",
>>> "(Tensor x, Tensor weight, Tensor bias) -> Tensor")
>>>
- >>> @torch.library.impl_abstract("mylib::custom_linear")
- >>> def custom_linear_abstract(x, weight, bias):
+ >>> @torch.library.register_fake("mylib::custom_linear")
+ >>> def _(x, weight, bias):
>>> assert x.dim() == 2
>>> assert weight.dim() == 2
>>> assert bias.dim() == 1
@@ -448,10 +464,10 @@ def impl_abstract(qualname, func=None, *, lib=None, _stacklevel=1):
>>> # Example 2: an operator with data-dependent output shape
>>> torch.library.define("mylib::custom_nonzero", "(Tensor x) -> Tensor")
>>>
- >>> @torch.library.impl_abstract("mylib::custom_nonzero")
- >>> def custom_nonzero_abstract(x):
+ >>> @torch.library.register_fake("mylib::custom_nonzero")
+ >>> def _(x):
>>> # Number of nonzero-elements is data-dependent.
- >>> # Since we cannot peek at the data in an abstract impl,
+ >>> # Since we cannot peek at the data in an fake impl,
>>> # we use the ctx object to construct a new symint that
>>> # represents the data-dependent size.
>>> ctx = torch.library.get_ctx()
@@ -478,7 +494,7 @@ def impl_abstract(qualname, func=None, *, lib=None, _stacklevel=1):
source = torch._library.utils.get_source(_stacklevel + 1)
frame = sys._getframe(_stacklevel)
caller_module = inspect.getmodule(frame)
- # Can be none if you call impl_abstract from somewhere there isn't a module
+ # Can be none if you call register_fake from somewhere there isn't a module
# (e.g. __main__)
caller_module_name = None if caller_module is None else caller_module.__name__
@@ -505,8 +521,8 @@ def impl_abstract(qualname, func=None, *, lib=None, _stacklevel=1):
# If the op was defined in C++, then we want to make sure there was an
-# m.impl_abstract_pystub(module, ...) call and that the module is the
-# same as the module that called torch.library.impl_abstract.
+# m.set_python_module(module, ...) call and that the module is the
+# same as the module that called torch.library.register_fake.
def _check_pystubs_once(func, qualname, actual_module_name):
checked = False
@@ -528,8 +544,8 @@ def _check_pystubs_once(func, qualname, actual_module_name):
cpp_filename = op._handle().debug()
raise RuntimeError(
f"Operator '{qualname}' was defined in C++ and has a Python "
- f"abstract impl. In this situation, we require there to also be a "
- f"companion C++ `m.impl_abstract_pystub(\"{actual_module_name}\")` "
+ f"fake impl. In this situation, we require there to also be a "
+ f"companion C++ `m.set_python_module(\"{actual_module_name}\")` "
f"call, but we could not find one. Please add that to "
f"to the top of the C++ TORCH_LIBRARY({namespace}, ...) block the "
f"operator was registered in ({cpp_filename})")
@@ -537,10 +553,10 @@ def _check_pystubs_once(func, qualname, actual_module_name):
if actual_module_name != pystub_module:
cpp_filename = op._handle().debug()
raise RuntimeError(
- f"Operator '{qualname}' specified that its python abstract impl "
+ f"Operator '{qualname}' specified that its python fake impl "
f"is in the Python module '{pystub_module}' but it was actually found "
- f"in '{actual_module_name}'. Please either move the abstract impl "
- f"or correct the m.impl_abstract_pystub call ({cpp_filename})")
+ f"in '{actual_module_name}'. Please either move the fake impl "
+ f"or correct the m.set_python_module call ({cpp_filename})")
checked = True
return func(*args, **kwargs)
return inner
@@ -556,7 +572,7 @@ def _check_pystubs_once(func, qualname, actual_module_name):
def get_ctx() -> "torch._library.abstract_impl.AbstractImplCtx":
"""get_ctx() returns the current AbstractImplCtx object.
- Calling ``get_ctx()`` is only valid inside of an abstract impl
- (see :func:`torch.library.impl_abstract` for more usage details.
+ Calling ``get_ctx()`` is only valid inside of an fake impl
+ (see :func:`torch.library.register_fake` for more usage details.
"""
return torch._library.abstract_impl.global_ctx_getter()
|
2.41.0
|
f378e1853558d603028c2062373869c6f192885
|
Tue, 16 Apr 2024 17:47:00 +0200
|
[PATCH 0249/1000] [Inductor cutlass backend] Fix flaky test ( CUDA IMA ) (#124106)
|
A unit test within test_cutlass_backend.py can fail with CUDA illegal memory accesses due to the fact that some CUTLASS Kernels contain bugs. By using autotuning in subprocesses, this CUDA illegal memory access simply leads to the buggy Cutlass Kernels being filtered out, instead of causing it to bring down the entire process. Test Plan: This is a change to a unit test. It's recommended to use autotune_in_subproc when using the Cutlass backend anyway. Pull Request resolved: https://github.com/pytorch/pytorch/pull/124106 Approved by: https://github.com/eellison
|
diff --git a/test/inductor/test_cutlass_backend.py b/test/inductor/test_cutlass_backend.py
index bc411ad9ce..0bcd895b00 100644
--- a/test/inductor/test_cutlass_backend.py
+++ b/test/inductor/test_cutlass_backend.py
@@ -385,10 +385,12 @@ class TestCutlassBackend(TestCase):
with config.patch(
{
"max_autotune": True,
- "autotune_in_subproc": False,
+ # Some Cutlass Kernels fail with IMA on this example, which leads to unrecoverable CUDA errors
+ # unless we tune in a subproc here.
+ "autotune_in_subproc": True,
"max_autotune_gemm_backends": max_autotune_gemm_backends,
"cuda.cutlass_dir": _CUTLASS_DIR,
- "cuda.cutlass_max_profiling_configs": 2,
+ "cuda.cutlass_max_profiling_configs": 4,
}
):
# No broadcast
|
2.41.0
|
880a71010acbe30e5e2a63f3b216aa635413df9
|
Wed, 17 Apr 2024 14:12:29 +0000
|
[PATCH 0250/1000] [BE] Add missing `std::` prefix to `Unique.mm` (#124232)
|
Follow up after https://github.com/pytorch/pytorch/pull/124117 fixes following warning ``` /Users/malfet/git/pytorch/pytorch/aten/src/ATen/native/mps/operations/Unique.mm:282:26: warning: use of function template name with no prior declaration in function call with explicit template arguments is a C++20 extension [-Wc++20-extensions] return std::make_tuple(get<0>(out).to("mps"), get<1>(out).to("mps"), get<2>(out).to("mps")); ^ ``` Pull Request resolved: https://github.com/pytorch/pytorch/pull/124232 Approved by: https://github.com/kit1980, https://github.com/Skylion007
|
diff --git a/aten/src/ATen/native/mps/operations/Unique.mm b/aten/src/ATen/native/mps/operations/Unique.mm
index a1241d5395..fc30c2d0b7 100644
--- a/aten/src/ATen/native/mps/operations/Unique.mm
+++ b/aten/src/ATen/native/mps/operations/Unique.mm
@@ -279,7 +279,7 @@ static std::tuple<Tensor, Tensor, Tensor> _unique_impl_mps(const Tensor& self,
}
static std::tuple<Tensor, Tensor, Tensor> castToMPS(std::tuple<Tensor, Tensor, Tensor> out) {
- return std::make_tuple(get<0>(out).to("mps"), get<1>(out).to("mps"), get<2>(out).to("mps"));
+ return std::make_tuple(std::get<0>(out).to("mps"), std::get<1>(out).to("mps"), std::get<2>(out).to("mps"));
}
std::tuple<Tensor, Tensor, Tensor> unique_consecutive_mps(const Tensor& self,
|
2.41.0
|
a735ece6b46248b6bb224bae4d0d7df24a335f0
|
Wed, 17 Apr 2024 14:13:51 +0000
|
[PATCH 0251/1000] Remove @abock from ONNX approvers/codeowners (#124259)
|
As he is no longer interested in the project Pull Request resolved: https://github.com/pytorch/pytorch/pull/124259 Approved by: https://github.com/kit1980, https://github.com/BowenBao
|
diff --git a/.github/merge_rules.yaml b/.github/merge_rules.yaml
index 8e9f05051c..cd1f5cb7c4 100644
--- a/.github/merge_rules.yaml
+++ b/.github/merge_rules.yaml
@@ -28,7 +28,6 @@
- caffe2/python/onnx/**
approved_by:
- BowenBao
- - abock
- justinchuby
- shubhambhokare1
- thiagocrepaldi
diff --git a/CODEOWNERS b/CODEOWNERS
index 9f0891b029..3d09c31c43 100644
--- a/CODEOWNERS
+++ b/CODEOWNERS
@@ -43,12 +43,12 @@ nn/qat/ @jerryzh168
/torch/csrc/distributed/rpc/tensorpipe_agent.h @jiayisuse @osalpekar @lw
# ONNX Export
-/torch/_dynamo/backends/onnxrt.py @bowenbao @abock @thiagocrepaldi @wschin
-/torch/csrc/jit/passes/onnx.h @bowenbao @abock @thiagocrepaldi
-/torch/csrc/jit/passes/onnx.cpp @bowenbao @abock @thiagocrepaldi
-/torch/csrc/jit/passes/onnx/ @bowenbao @abock @thiagocrepaldi
-/torch/onnx/ @bowenbao @abock @thiagocrepaldi @wschin
-/test/onnx/ @bowenbao @abock @thiagocrepaldi @wschin
+/torch/_dynamo/backends/onnxrt.py @bowenbao @thiagocrepaldi @wschin
+/torch/csrc/jit/passes/onnx.h @bowenbao @thiagocrepaldi
+/torch/csrc/jit/passes/onnx.cpp @bowenbao @thiagocrepaldi
+/torch/csrc/jit/passes/onnx/ @bowenbao @thiagocrepaldi
+/torch/onnx/ @bowenbao @thiagocrepaldi @wschin
+/test/onnx/ @bowenbao @thiagocrepaldi @wschin
# CI
/.ci @pytorch/pytorch-dev-infra
|
2.41.0
|
2b0c0a34e0c5daba1e7bb3b9e4dece5020c9414
|
Wed, 17 Apr 2024 14:30:26 +0300
|
[PATCH 0252/1000] Fix index_reduce sampler filter when op_info.variant_test_name is specified (#123375)
|
As in the title: `index_reduce` sample must correspond to reduction type specified by `variant_test_name`. Pull Request resolved: https://github.com/pytorch/pytorch/pull/123375 Approved by: https://github.com/zou3519, https://github.com/peterbell10
|
diff --git a/test/distributed/_tensor/test_dtensor_ops.py b/test/distributed/_tensor/test_dtensor_ops.py
index 9a43cf2e78..d14d2d851f 100644
--- a/test/distributed/_tensor/test_dtensor_ops.py
+++ b/test/distributed/_tensor/test_dtensor_ops.py
@@ -190,7 +190,10 @@ dtensor_fails = {
xfail("index_copy"),
xfail("index_fill"),
xfail("index_put"),
- xfail("index_reduce"),
+ xfail("index_reduce", "prod"),
+ xfail("index_reduce", "mean"),
+ xfail("index_reduce", "amax"),
+ xfail("index_reduce", "amin"),
xfail("index_select"),
xfail("isin"),
xfail("isinf"),
diff --git a/test/functorch/test_ops.py b/test/functorch/test_ops.py
index 18f8bf6ee8..e913d90dde 100644
--- a/test/functorch/test_ops.py
+++ b/test/functorch/test_ops.py
@@ -934,7 +934,7 @@ class TestOperators(TestCase):
# It looks like you're either (1) calling .item() on a Tensor or
# (2) attempting to use a Tensor in some data-dependent control flow or
# (3) encountering this error in PyTorch internals.
- xfail("index_reduce"),
+ xfail("index_reduce", "prod"),
decorate(
"linalg.householder_product", decorator=runOnRocm
), # works on ROCm
@@ -1154,7 +1154,7 @@ class TestOperators(TestCase):
xfail("sparse.sampled_addmm", ""),
xfail("sparse.mm", "reduce"),
xfail("as_strided_scatter", ""), # calls as_strided
- xfail("index_reduce", ""), # .item() call
+ xfail("index_reduce", "prod"), # .item() call
# ---------------------------------------------------------------------
}
)
@@ -1507,7 +1507,18 @@ class TestOperators(TestCase):
xfail("cdouble", ""),
xfail("cfloat", ""),
xfail("chalf", ""),
- xfail("index_reduce", ""),
+ xfail(
+ "index_reduce", "prod"
+ ), # aten::index_reduce hit the vmap fallback which is currently disabled
+ xfail(
+ "index_reduce", "mean"
+ ), # aten::index_reduce hit the vmap fallback which is currently disabled
+ xfail(
+ "index_reduce", "amax"
+ ), # aten::index_reduce hit the vmap fallback which is currently disabled
+ xfail(
+ "index_reduce", "amin"
+ ), # aten::index_reduce hit the vmap fallback which is currently disabled
xfail("nn.functional.dropout3d", ""),
xfail("as_strided_scatter", ""),
xfail("_segment_reduce", "offsets"),
@@ -1758,7 +1769,10 @@ class TestOperators(TestCase):
"_segment_reduce", "offsets"
), # NYI: forward-AD for _segment_reduce
xfail("sparse.mm", "reduce"), # Sparse tensors have no strides
- xfail("index_reduce", ""), # NYI: forward-AD for index_reduce
+ xfail("index_reduce", "prod"), # NYI: forward-AD for index_reduce
+ xfail("index_reduce", "mean"), # NYI: forward-AD for index_reduce
+ xfail("index_reduce", "amax"), # NYI: forward-AD for index_reduce
+ xfail("index_reduce", "amin"), # NYI: forward-AD for index_reduce
xfail(
"_segment_reduce", "lengths"
), # NYI: forward-AD for _segment_reduce
@@ -1896,9 +1910,10 @@ class TestOperators(TestCase):
xfail("double"), # required rank 4 tensor to use channels_last format
xfail("float"), # required rank 4 tensor to use channels_last format
xfail("half"), # required rank 4 tensor to use channels_last format
- xfail(
- "index_reduce"
- ), # Forward AD not implemented and no decomposition
+ xfail("index_reduce", "prod"), # NYI: forward AD for index_reduce
+ xfail("index_reduce", "mean"), # NYI: forward AD for index_reduce
+ xfail("index_reduce", "amax"), # NYI: forward AD for index_reduce
+ xfail("index_reduce", "amin"), # NYI: forward AD for index_reduce
xfail(
"mvlgamma", "mvlgamma_p_1"
), # vmap: inplace into a regular tensor
diff --git a/test/functorch/test_vmap.py b/test/functorch/test_vmap.py
index 5c6b98fd1f..a23b51da92 100644
--- a/test/functorch/test_vmap.py
+++ b/test/functorch/test_vmap.py
@@ -4357,7 +4357,10 @@ class TestVmapOperatorsOpInfo(TestCase):
xfail("sparse.mm", "reduce"),
xfail("special.chebyshev_polynomial_u"),
xfail("_segment_reduce", "offsets"),
- xfail("index_reduce", ""),
+ xfail("index_reduce", "prod"),
+ xfail("index_reduce", "mean"),
+ xfail("index_reduce", "amin"),
+ xfail("index_reduce", "amax"),
xfail("special.laguerre_polynomial_l"),
xfail("special.hermite_polynomial_h"),
xfail("jiterator_binary", device_type="cuda"),
diff --git a/test/inductor/test_torchinductor.py b/test/inductor/test_torchinductor.py
index 1bda97a3fe..ba1da01427 100644
--- a/test/inductor/test_torchinductor.py
+++ b/test/inductor/test_torchinductor.py
@@ -304,7 +304,9 @@ def compute_grads(args, kwrags, results, grads):
return leaf_tensors
flat_results = pytree.tree_leaves(results)
- flat_diff_results = [r for r in flat_results if r.requires_grad]
+ flat_diff_results = [
+ r for r in flat_results if isinstance(r, torch.Tensor) and r.requires_grad
+ ]
assert len(flat_diff_results) > 0
leaf_tensors = gather_leaf_tensors(args, kwrags)
@@ -517,7 +519,7 @@ def check_model(
grads = [
torch.rand(r.shape, device=r.device, dtype=r.dtype)
for r in correct_flat
- if r.requires_grad
+ if isinstance(r, torch.Tensor) and r.requires_grad
]
for g in grads:
g /= g.norm()
diff --git a/test/inductor/test_torchinductor_opinfo.py b/test/inductor/test_torchinductor_opinfo.py
index 1d27b44a23..75c4f61ca3 100644
--- a/test/inductor/test_torchinductor_opinfo.py
+++ b/test/inductor/test_torchinductor_opinfo.py
@@ -167,6 +167,8 @@ inductor_skips = defaultdict(dict)
inductor_skips["cpu"] = {
"linalg.ldl_factor": {f32, f64}, # flaky
"nn.functional.cosine_embedding_loss": {b8}, # flaky
+ ("index_reduce", "prod"): {f16}, # flaky
+ ("index_reduce", "mean"): {f16, f64}, # flaky
}
if IS_MACOS and IS_X86:
@@ -225,7 +227,10 @@ inductor_expected_failures_single_sample["cpu"] = {
("normal", "number_mean"): {f16, f32, f64},
("sparse.mm", "reduce"): {f32, f64},
"sparse.sampled_addmm": {f32, f64},
- "to_sparse": {f32, f64},
+ "to_sparse": {
+ f32,
+ f64,
+ }, # NYI: could not find kernel for aten.view.default at dispatch key DispatchKey.SparseCPU
"view_as_complex": {f16},
}
@@ -234,13 +239,16 @@ inductor_expected_failures_single_sample["cuda"] = {
"_upsample_bilinear2d_aa": {f16, f32, f64},
"cholesky": {f32, f64},
"multinomial": {f16, f32, f64},
- "nn.functional.normalize": {f16},
("normal", "in_place"): {f16, f32, f64},
("normal", "number_mean"): {f16, f32, f64},
"sparse.sampled_addmm": {f32, f64},
- "to_sparse": {f16, f32, f64},
- "torch.ops.aten._efficient_attention_forward": {f16, bf16, f32},
- "torch.ops.aten._flash_attention_forward": {f16, bf16, f32},
+ "torch.ops.aten._flash_attention_forward": {f16},
+ "torch.ops.aten._efficient_attention_forward": {f16, f32},
+ "to_sparse": {
+ f16,
+ f32,
+ f64,
+ }, # NYI: could not find kernel for aten.view.default at dispatch key DispatchKey.SparseCUDA
}
@@ -255,12 +263,7 @@ inductor_expected_failures_single_sample["cuda"].update(intentionally_not_handle
inductor_gradient_expected_failures_single_sample = defaultdict(dict)
-inductor_gradient_expected_failures_single_sample["cuda"] = {
- "nn.functional.normalize": {f16},
-}
-
-if not TEST_WITH_ROCM:
- inductor_gradient_expected_failures_single_sample["cuda"]["tanh"] = {f16}
+inductor_gradient_expected_failures_single_sample["cuda"] = {}
if not TEST_MKL:
inductor_expected_failures_single_sample["cpu"].update({})
@@ -337,6 +340,7 @@ inductor_override_kwargs = {
("nn.functional.cosine_similarity", "cuda", f16): {"reference_in_float": True},
("nn.functional.instance_norm", "cuda", f16): {"reference_in_float": True},
("nn.functional.local_response_norm", "cuda", f16): {"reference_in_float": True},
+ ("nn.functional.normalize", "cuda", f16): {"atol": 1e-3, "rtol": 0.05},
("nn.functional.rms_norm", "cuda", f16): {"reference_in_float": True},
("nn.functional.soft_margin_loss", "cuda", f16): {"reference_in_float": True},
("nn.functional.softmin", "cuda", f16): {"atol": 1e-4, "rtol": 0.01},
@@ -374,6 +378,18 @@ inductor_override_kwargs = {
("nn.functional.upsample_bilinear", "cuda", f64): {"atol": 5e-4, "rtol": 0},
("nn.functional.interpolate.bicubic", "cpu", f32): {"atol": 5e-3, "rtol": 0},
("nn.functional.interpolate.bicubic", "cuda", f64): {"atol": 1e-3, "rtol": 0},
+ # Unreasonably high atol requirement:
+ ("index_reduce.mean", "cuda", f16): {"check_gradient": False},
+ ("index_reduce.mean", "cuda", f32): {"check_gradient": False},
+ ("index_reduce.mean", "cuda", f64): {"check_gradient": False},
+ # Gradient contains non-finite entries:
+ ("index_reduce.amin", "cuda", f64): {"check_gradient": False},
+ ("index_reduce.amin", "cuda", f32): {"check_gradient": False},
+ ("index_reduce.amin", "cuda", f16): {"check_gradient": False},
+ ("index_reduce.amax", "cuda", f64): {"check_gradient": False},
+ ("index_reduce.amax", "cuda", f32): {"check_gradient": False},
+ ("index_reduce.amax", "cuda", f16): {"check_gradient": False},
+ ("tanh", "cuda", f16): {"atol": 1e-4, "rtol": 1e-2},
}
@@ -386,6 +402,10 @@ inductor_all_samples = {
"softmax.with_dtype",
"index_add",
"index_copy",
+ "index_reduce.prod",
+ "index_reduce.mean",
+ "index_reduce.amax",
+ "index_reduce.amin",
"scatter_reduce.sum",
"select_scatter",
"squeeze",
diff --git a/test/test_mps.py b/test/test_mps.py
index fe73683fd7..511a76a87d 100644
--- a/test/test_mps.py
+++ b/test/test_mps.py
@@ -668,7 +668,10 @@ def mps_ops_modifier(ops):
'igamma': None,
'igammac': None,
'index_copy': None,
- 'index_reduce': None,
+ 'index_reduceprod': None,
+ 'index_reducemean': None,
+ 'index_reduceamax': None,
+ 'index_reduceamin': None,
'isin': None,
'isneginf': None,
'isposinf': None,
diff --git a/test/test_proxy_tensor.py b/test/test_proxy_tensor.py
index 74eac04572..c92e220349 100644
--- a/test/test_proxy_tensor.py
+++ b/test/test_proxy_tensor.py
@@ -1981,6 +1981,12 @@ out_symbolic_tensor_failures = {
xfail('ones', ''),
xfail('randn', ''),
xfail('zeros', ''),
+
+ # RuntimeError: Cannot call numel() on tensor with symbolic sizes/strides
+ xfail('index_reduce', 'prod'),
+ xfail('index_reduce', 'mean'),
+ xfail('index_reduce', 'amax'),
+ xfail('index_reduce', 'amin'),
}
out_symbolic_tensor_segfaults = {
diff --git a/torch/testing/_internal/common_methods_invocations.py b/torch/testing/_internal/common_methods_invocations.py
index 54987ff26b..4b27ff2002 100644
--- a/torch/testing/_internal/common_methods_invocations.py
+++ b/torch/testing/_internal/common_methods_invocations.py
@@ -5229,9 +5229,10 @@ def sample_inputs_index_reduce(op_info, device, dtype, requires_grad, **kwargs):
shapes = [((), ()), ((1,), (1,)), ((S, S), (S, M)), ((S, S, S), (S, M, S))]
include_selfs = (True, False)
- reduces = ('prod', 'mean', 'amin', 'amax')
+ reduce = op_info.variant_test_name
+ assert reduce in ('prod', 'mean', 'amin', 'amax')
- for shape, include_self, reduce in product(shapes, include_selfs, reduces):
+ for shape, include_self in product(shapes, include_selfs):
self_shape, src_shape = shape
# dim. We handle the scalar case
dim = 1 if len(self_shape) >= 2 else 0
@@ -5243,7 +5244,7 @@ def sample_inputs_index_reduce(op_info, device, dtype, requires_grad, **kwargs):
kwargs={'include_self' : include_self})
# Sample inputs to test edge cases for backward
- if requires_grad:
+ if requires_grad and reduce == 'prod':
# Check that gradients are propagated correctly for prod when zeros in self/src are reduced
# This sample tests gradients for the following cases
# (a) 1 zero reduced (from source (self[0, 1]), from self (self[0, 0]))
@@ -5256,7 +5257,7 @@ def sample_inputs_index_reduce(op_info, device, dtype, requires_grad, **kwargs):
idx = torch.tensor([0, 1, 2, 0], dtype=torch.long, device=device)
yield SampleInput(input,
- args=(0, idx, src, 'prod'),
+ args=(0, idx, src, reduce),
kwargs={'include_self': True})
def sample_inputs_mode(op_info, device, dtype, requires_grad, **kwargs):
@@ -16751,10 +16752,16 @@ op_db: List[OpInfo] = [
dtypes=(torch.bool,)),
),
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL),
- OpInfo('index_reduce',
- dtypes=all_types_and(torch.float16, torch.bfloat16),
- supports_out=True,
- sample_inputs_func=sample_inputs_index_reduce),
+ *(OpInfo('index_reduce',
+ variant_test_name=reduction_type,
+ dtypes=all_types_and(torch.float16, torch.bfloat16),
+ skips=(
+ DecorateInfo(toleranceOverride({torch.float16: tol(atol=2e-3, rtol=3e-3)}),
+ 'TestInductorOpInfo', 'test_comprehensive'),
+ ),
+ supports_out=True,
+ sample_inputs_func=sample_inputs_index_reduce,
+ ) for reduction_type in ('mean', 'prod', 'amin', 'amax')),
OpInfo('__getitem__',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf),
# Runs very slowly on slow gradcheck - alternatively reduce input sizes
|
2.41.0
|
e1c98c171d3e7e8ea2ad5e740e3f9c29bd88b07
|
Wed, 17 Apr 2024 11:49:08 +0000
|
[PATCH 0254/1000] [dynamo] support `object.__setattr__(obj, name, value)` (#124068)
|
Resolves #114964 Resolves #114966 - #114964 - #114966 Pull Request resolved: https://github.com/pytorch/pytorch/pull/124068 Approved by: https://github.com/jansel
|
diff --git a/test/dynamo/test_misc.py b/test/dynamo/test_misc.py
index 14f43ec37d..8ee9037e4c 100644
--- a/test/dynamo/test_misc.py
+++ b/test/dynamo/test_misc.py
@@ -3058,6 +3058,66 @@ utils_device.CURRENT_DEVICE == None""".split(
self.assertEqual(cnts.frame_count, 1)
self.assertEqual(cnts.op_count, 9)
+ def test_object_setattr(self):
+ @dataclasses.dataclass
+ class A:
+ x: torch.Tensor
+
+ def fn1(x) -> None:
+ a = A(x)
+ object.__setattr__(a, "x", x + 2)
+ return a
+
+ x1 = torch.randn(10)
+ obj11 = fn1(x1.clone())
+
+ cnts = torch._dynamo.testing.CompileCounter()
+ opt_fn1 = torch._dynamo.optimize(cnts)(fn1)
+ obj12 = opt_fn1(x1.clone())
+ self.assertTrue(same(obj11.x, x1 + 2))
+ self.assertTrue(same(obj12.x, x1 + 2))
+ self.assertTrue(same(obj11.x, obj12.x))
+ self.assertEqual(cnts.frame_count, 1)
+
+ @dataclasses.dataclass(frozen=True)
+ class B:
+ x: torch.Tensor
+
+ def fn2(x) -> None:
+ b = B(x)
+ return b
+
+ x2 = torch.randn(10)
+ obj21 = fn2(x2.clone())
+
+ cnts = torch._dynamo.testing.CompileCounter()
+ opt_fn2 = torch._dynamo.optimize(cnts)(fn2)
+ obj22 = opt_fn2(x2.clone())
+ self.assertTrue(same(obj21.x, x2))
+ self.assertTrue(same(obj22.x, x2))
+ self.assertTrue(same(obj21.x, obj22.x))
+ self.assertEqual(cnts.frame_count, 0)
+
+ @dataclasses.dataclass(frozen=True)
+ class C:
+ x: torch.Tensor
+
+ def fn3(x) -> None:
+ c = C(x)
+ object.__setattr__(c, "x", x + 2)
+ return c
+
+ x3 = torch.randn(10)
+ obj31 = fn3(x3.clone())
+
+ cnts = torch._dynamo.testing.CompileCounter()
+ opt_fn3 = torch._dynamo.optimize(cnts)(fn3)
+ obj32 = opt_fn3(x3.clone())
+ self.assertTrue(same(obj31.x, x3 + 2))
+ self.assertTrue(same(obj32.x, x3 + 2))
+ self.assertTrue(same(obj31.x, obj32.x))
+ self.assertEqual(cnts.frame_count, 1)
+
def test_user_defined_class_name(self):
class MyClassFoo:
pass
diff --git a/test/dynamo_expected_failures/TestAOTAutograd.test_default_partitioner_saves_symints_not_tensors_for_bw b/test/dynamo_expected_failures/TestAOTAutograd.test_default_partitioner_saves_symints_not_tensors_for_bw
deleted file mode 100644
index e69de29bb2..0000000000
diff --git a/test/dynamo_expected_failures/TestAOTAutograd.test_dupe_arg_torture b/test/dynamo_expected_failures/TestAOTAutograd.test_dupe_arg_torture
deleted file mode 100644
index e69de29bb2..0000000000
diff --git a/test/dynamo_expected_failures/TestAOTAutograd.test_input_aliased_with_mutation_output_alias b/test/dynamo_expected_failures/TestAOTAutograd.test_input_aliased_with_mutation_output_alias
deleted file mode 100644
index e69de29bb2..0000000000
diff --git a/test/dynamo_expected_failures/TestAOTAutograd.test_input_data_and_metadata_mutation b/test/dynamo_expected_failures/TestAOTAutograd.test_input_data_and_metadata_mutation
deleted file mode 100644
index e69de29bb2..0000000000
diff --git a/test/dynamo_expected_failures/TestAOTAutograd.test_input_mutation_alias_everything b/test/dynamo_expected_failures/TestAOTAutograd.test_input_mutation_alias_everything
deleted file mode 100644
index e69de29bb2..0000000000
diff --git a/test/dynamo_expected_failures/TestAOTAutograd.test_input_mutation_aliases_and_output_alias b/test/dynamo_expected_failures/TestAOTAutograd.test_input_mutation_aliases_and_output_alias
deleted file mode 100644
index e69de29bb2..0000000000
diff --git a/test/dynamo_expected_failures/TestAOTAutograd.test_input_mutation_and_output_view b/test/dynamo_expected_failures/TestAOTAutograd.test_input_mutation_and_output_view
deleted file mode 100644
index e69de29bb2..0000000000
diff --git a/test/dynamo_expected_failures/TestAOTAutograd.test_input_mutation_is_output b/test/dynamo_expected_failures/TestAOTAutograd.test_input_mutation_is_output
deleted file mode 100644
index e69de29bb2..0000000000
diff --git a/test/dynamo_expected_failures/TestAOTAutograd.test_input_mutation_metadata b/test/dynamo_expected_failures/TestAOTAutograd.test_input_mutation_metadata
deleted file mode 100644
index e69de29bb2..0000000000
diff --git a/test/dynamo_expected_failures/TestAOTAutograd.test_input_mutation_output_view_multiple b/test/dynamo_expected_failures/TestAOTAutograd.test_input_mutation_output_view_multiple
deleted file mode 100644
index e69de29bb2..0000000000
diff --git a/test/dynamo_expected_failures/TestAOTAutograd.test_input_output_view_metadata_mutate_multiple b/test/dynamo_expected_failures/TestAOTAutograd.test_input_output_view_metadata_mutate_multiple
deleted file mode 100644
index e69de29bb2..0000000000
diff --git a/test/dynamo_expected_failures/TestAOTAutograd.test_input_output_view_mutate_multiple b/test/dynamo_expected_failures/TestAOTAutograd.test_input_output_view_mutate_multiple
deleted file mode 100644
index e69de29bb2..0000000000
diff --git a/test/dynamo_expected_failures/TestAOTAutograd.test_input_output_view_simple b/test/dynamo_expected_failures/TestAOTAutograd.test_input_output_view_simple
deleted file mode 100644
index e69de29bb2..0000000000
diff --git a/test/dynamo_expected_failures/TestAOTAutograd.test_invalid_dupe b/test/dynamo_expected_failures/TestAOTAutograd.test_invalid_dupe
deleted file mode 100644
index e69de29bb2..0000000000
diff --git a/test/dynamo_expected_failures/TestAOTAutograd.test_invalid_dupe_fake b/test/dynamo_expected_failures/TestAOTAutograd.test_invalid_dupe_fake
deleted file mode 100644
index e69de29bb2..0000000000
diff --git a/test/dynamo_expected_failures/TestAOTAutograd.test_invalid_dupe_left_bias b/test/dynamo_expected_failures/TestAOTAutograd.test_invalid_dupe_left_bias
deleted file mode 100644
index e69de29bb2..0000000000
diff --git a/test/dynamo_expected_failures/TestAOTAutograd.test_new_inp_requires_grad_now b/test/dynamo_expected_failures/TestAOTAutograd.test_new_inp_requires_grad_now
deleted file mode 100644
index e69de29bb2..0000000000
diff --git a/test/dynamo_expected_failures/TestAOTAutograd.test_output_aliases_input_multi_output_view b/test/dynamo_expected_failures/TestAOTAutograd.test_output_aliases_input_multi_output_view
deleted file mode 100644
index e69de29bb2..0000000000
diff --git a/test/dynamo_expected_failures/TestAOTAutograd.test_output_aliases_input_view_meta_replay b/test/dynamo_expected_failures/TestAOTAutograd.test_output_aliases_input_view_meta_replay
deleted file mode 100644
index e69de29bb2..0000000000
diff --git a/test/dynamo_expected_failures/TestAOTAutograd.test_output_aliases_intermediate_and_returned b/test/dynamo_expected_failures/TestAOTAutograd.test_output_aliases_intermediate_and_returned
deleted file mode 100644
index e69de29bb2..0000000000
diff --git a/test/dynamo_expected_failures/TestAOTAutograd.test_output_aliases_intermediate_and_returned_different_grad b/test/dynamo_expected_failures/TestAOTAutograd.test_output_aliases_intermediate_and_returned_different_grad
deleted file mode 100644
index e69de29bb2..0000000000
diff --git a/test/dynamo_expected_failures/TestAOTAutograd.test_output_aliases_intermediate_and_returned_flipped b/test/dynamo_expected_failures/TestAOTAutograd.test_output_aliases_intermediate_and_returned_flipped
deleted file mode 100644
index e69de29bb2..0000000000
diff --git a/test/dynamo_expected_failures/TestAOTAutograd.test_output_aliases_intermediate_multi_output_view b/test/dynamo_expected_failures/TestAOTAutograd.test_output_aliases_intermediate_multi_output_view
deleted file mode 100644
index e69de29bb2..0000000000
diff --git a/test/dynamo_expected_failures/TestAOTAutograd.test_output_aliases_intermediate_multiple b/test/dynamo_expected_failures/TestAOTAutograd.test_output_aliases_intermediate_multiple
deleted file mode 100644
index e69de29bb2..0000000000
diff --git a/test/dynamo_expected_failures/TestAOTAutograd.test_output_aliases_intermediate_multiple_mixed b/test/dynamo_expected_failures/TestAOTAutograd.test_output_aliases_intermediate_multiple_mixed
deleted file mode 100644
index e69de29bb2..0000000000
diff --git a/test/dynamo_expected_failures/TestAOTAutograd.test_output_aliases_intermediate_returned_multiple_times b/test/dynamo_expected_failures/TestAOTAutograd.test_output_aliases_intermediate_returned_multiple_times
deleted file mode 100644
index e69de29bb2..0000000000
diff --git a/test/dynamo_expected_failures/TestAOTAutograd.test_output_aliases_intermediate_view_meta_replay b/test/dynamo_expected_failures/TestAOTAutograd.test_output_aliases_intermediate_view_meta_replay
deleted file mode 100644
index e69de29bb2..0000000000
diff --git a/test/dynamo_expected_failures/TestAOTAutograd.test_output_aliases_output_view_meta_replay b/test/dynamo_expected_failures/TestAOTAutograd.test_output_aliases_output_view_meta_replay
deleted file mode 100644
index e69de29bb2..0000000000
diff --git a/test/dynamo_expected_failures/TestAOTAutograd.test_output_all_alias_types b/test/dynamo_expected_failures/TestAOTAutograd.test_output_all_alias_types
deleted file mode 100644
index e69de29bb2..0000000000
diff --git a/test/dynamo_expected_failures/TestAOTAutograd.test_output_dict b/test/dynamo_expected_failures/TestAOTAutograd.test_output_dict
deleted file mode 100644
index e69de29bb2..0000000000
diff --git a/test/dynamo_expected_failures/TestAOTAutograd.test_output_op_depending_on_symint b/test/dynamo_expected_failures/TestAOTAutograd.test_output_op_depending_on_symint
deleted file mode 100644
index e69de29bb2..0000000000
diff --git a/test/dynamo_expected_failures/TestAOTAutograd.test_outputs_are_aliased b/test/dynamo_expected_failures/TestAOTAutograd.test_outputs_are_aliased
deleted file mode 100644
index e69de29bb2..0000000000
diff --git a/test/dynamo_expected_failures/TestGraph.test_traverse_circular_datapipe b/test/dynamo_expected_failures/TestGraph.test_traverse_circular_datapipe
deleted file mode 100644
index e69de29bb2..0000000000
diff --git a/test/dynamo_expected_failures/TestGraph.test_traverse_unhashable_datapipe b/test/dynamo_expected_failures/TestGraph.test_traverse_unhashable_datapipe
deleted file mode 100644
index e69de29bb2..0000000000
diff --git a/test/dynamo_expected_failures/TestImports.test_circular_dependencies b/test/dynamo_expected_failures/TestImports.test_circular_dependencies
deleted file mode 100644
index e69de29bb2..0000000000
diff --git a/test/dynamo_expected_failures/TestPartitioning.test_contiguous b/test/dynamo_expected_failures/TestPartitioning.test_contiguous
deleted file mode 100644
index e69de29bb2..0000000000
diff --git a/test/dynamo_expected_failures/TestPartitioning.test_default_partitioner_output_tensor_shape_tensor b/test/dynamo_expected_failures/TestPartitioning.test_default_partitioner_output_tensor_shape_tensor
deleted file mode 100644
index e69de29bb2..0000000000
diff --git a/test/dynamo_expected_failures/TestPartitioning.test_min_cut_partitioner_output_tensor_shape_tensor b/test/dynamo_expected_failures/TestPartitioning.test_min_cut_partitioner_output_tensor_shape_tensor
deleted file mode 100644
index e69de29bb2..0000000000
diff --git a/torch/_dynamo/variables/builtin.py b/torch/_dynamo/variables/builtin.py
index 733d97d3ef..0cc73456fe 100644
--- a/torch/_dynamo/variables/builtin.py
+++ b/torch/_dynamo/variables/builtin.py
@@ -63,7 +63,7 @@ from .tensor import (
TensorVariable,
UnspecializedPythonVariable,
)
-from .user_defined import UserDefinedVariable
+from .user_defined import UserDefinedObjectVariable, UserDefinedVariable
log = logging.getLogger(__name__)
@@ -952,6 +952,17 @@ class BuiltinVariable(VariableTracker):
args: "List[VariableTracker]",
kwargs: "Dict[str, VariableTracker]",
) -> "VariableTracker":
+ if self.fn == object and name == "__setattr__":
+ assert len(args) == 3
+ assert len(kwargs) == 0
+ obj, name_var, val = args
+ obj = obj.realize()
+ if (
+ isinstance(obj, UserDefinedObjectVariable)
+ and tx.output.side_effects.is_attribute_mutation(obj)
+ and name_var.is_python_constant()
+ ):
+ return obj.method_setattr_standard(tx, name_var, val)
if self.fn == dict and name == "fromkeys":
return BuiltinVariable.call_custom_dict_fromkeys(tx, dict, *args, **kwargs)
if self.fn == itertools.chain and name == "from_iterable":
diff --git a/torch/_dynamo/variables/torch.py b/torch/_dynamo/variables/torch.py
index 9c1faa9878..47705cdc07 100644
--- a/torch/_dynamo/variables/torch.py
+++ b/torch/_dynamo/variables/torch.py
@@ -181,7 +181,14 @@ class TorchCtxManagerClassVariable(BaseTorchVariable):
# We can't do isinstance(value, type) check because some ctx managers
# are implemented as a function decorated by contextlib.contextmanager,
# E.g., torch._functorch.vmap.vmap_increment_nesting.
- return hashable(value) and value in supported_ctx_manager_classes
+ return (
+ # Context manager type or function with @contextmanager is callable
+ callable(value)
+ and (
+ hashable(value) # accesses value.__hash__()
+ and value in supported_ctx_manager_classes
+ )
+ )
def call_function(
self, tx, args: "List[VariableTracker]", kwargs: "Dict[str, VariableTracker]"
|
2.41.0
|
b1d6c8d98e1014ca5c9827b67d5a29eb2f14741
|
Wed, 17 Apr 2024 16:04:09 +0000
|
[PATCH 0255/1000] improve F.adaptive_avg_pool2d error messages on mps (#124143)
|
Gives better error messages on mps. Partially fixes #123725 in the case of `F.adaptive_avg_pool2d`. This also relates to #96056. Pull Request resolved: https://github.com/pytorch/pytorch/pull/124143 Approved by: https://github.com/albanD, https://github.com/malfet
|
diff --git a/aten/src/ATen/native/mps/operations/AdaptivePooling.mm b/aten/src/ATen/native/mps/operations/AdaptivePooling.mm
index c88d468f7e..c38d5faec6 100644
--- a/aten/src/ATen/native/mps/operations/AdaptivePooling.mm
+++ b/aten/src/ATen/native/mps/operations/AdaptivePooling.mm
@@ -37,8 +37,9 @@ static void set_kernel_params(int64_t isizeH,
if (isizeH >= osizeH) {
if (check_avg_pooling) {
- TORCH_CHECK((isizeH % osizeH == 0 && isizeW % osizeW == 0),
- "Adaptive pool MPS: input sizes must be divisible by output sizes.");
+ TORCH_CHECK(
+ (isizeH % osizeH == 0 && isizeW % osizeW == 0),
+ "Adaptive pool MPS: input sizes must be divisible by output sizes. Non-divisible input sizes are not implemented on MPS device yet. For now, you can manually transfer tensor to cpu in this case. Please refer to [this issue](https://github.com/pytorch/pytorch/issues/96056)");
}
strideH = (int64_t)(isizeH / osizeH);
strideW = (int64_t)(isizeW / osizeW);
@@ -46,8 +47,9 @@ static void set_kernel_params(int64_t isizeH,
kernel_sizeW = isizeW - (osizeW - 1) * strideW;
} else {
if (check_avg_pooling) {
- TORCH_CHECK((osizeH % isizeH == 0 && osizeW % isizeW == 0),
- "Adaptive pool MPS: output sizes must be divisible by input sizes.");
+ TORCH_CHECK(
+ (osizeH % isizeH == 0 && osizeW % isizeW == 0),
+ "Adaptive pool MPS: output sizes must be divisible by input sizes. Non-divisible input sizes are not implemented on MPS device yet. For now, you can manually transfer tensor to cpu in this case. Please refer to [this issue](https://github.com/pytorch/pytorch/issues/96056)");
}
strideH = (int64_t)(osizeH / isizeH);
strideW = (int64_t)(osizeW / isizeW);
|
2.41.0
|
6324fe0732c58578c4623a84c49922ee76626d9
|
Wed, 17 Apr 2024 16:04:22 +0000
|
[PATCH 0256/1000] Speedup int4mm_kernel with NEON (#124257)
|
By unrolling middle loop by 16 elements and using neon to decode packed int4 to float32. Unrolling entire `n` loop actually makes it a tad slower, probably because ARM has smaller register file that x86 Before/after performance running stories110M on M2Pro | eager (before) | eager (after) | compile(before) | compile (after) | | ---- | --- | -- | -- | | 28 | 57 | 31 | 104 | Pull Request resolved: https://github.com/pytorch/pytorch/pull/124257 Approved by: https://github.com/mikekgfb
|
diff --git a/aten/src/ATen/native/cpu/int4mm_kernel.cpp b/aten/src/ATen/native/cpu/int4mm_kernel.cpp
index 1e7a7c0bcb..28dfd84c0d 100644
--- a/aten/src/ATen/native/cpu/int4mm_kernel.cpp
+++ b/aten/src/ATen/native/cpu/int4mm_kernel.cpp
@@ -339,6 +339,56 @@ inline void tinygemm_kernel(
#endif
+#if !defined(C10_MOBILE) && defined(__aarch64__)
+#include <arm_neon.h>
+template <int BLOCK_M, int BLOCK_N>
+inline void tinygemm_kernel(
+ const Half* RESTRICT A,
+ const uint8_t* RESTRICT B,
+ const Half* RESTRICT ScaleAndZeros,
+ Half* RESTRICT C,
+ int lda,
+ int ldb,
+ int ldc,
+ int K,
+ int BLOCK_K) {
+ int16_t shift_vals[4] = {0, -4, -8, -12};
+ int16x4_t shifts = vld1_s16(shift_vals);
+ int16x4_t mask = vdup_n_s16(0x0F);
+ int16x4_t offs = vdup_n_s16(8);
+ for (const auto m : c10::irange(BLOCK_M)) {
+ for (int n = 0; n < BLOCK_N; n+= 16) {
+ float32x4_t c_val[4];
+ float32x4_t scales[4], zeros[4];
+ c10::ForcedUnroll<4>{}([&](auto i) {
+ c_val[i] = vdupq_n_f32(0.0);
+ });
+ for (const auto k : c10::irange(K)) {
+ const auto a_val = vdupq_n_f32(static_cast<float>(A[m * lda + k]));
+ if (is_block_start(k, BLOCK_K)) {
+ int kb = k / BLOCK_K;
+ c10::ForcedUnroll<4>{}([&](auto i) {
+ auto scales_and_zeros = vld2_f16(reinterpret_cast<const float16_t*>(ScaleAndZeros + kb * ldc * 2 + n * 2 + i * 8));
+ scales[i] = vcvt_f32_f16(scales_and_zeros.val[0]);
+ zeros[i] = vcvt_f32_f16(scales_and_zeros.val[1]);
+ });
+ }
+ c10::ForcedUnroll<4>{}([&](auto i) {
+ uint16_t b_pack = reinterpret_cast<const uint16_t*>(B + k * ldb + n / 2)[i];
+ int16x4_t b_ints = vsub_s16(vand_u16(vshl_u16(vdup_n_u16(b_pack), shifts), mask), offs);
+ float32x4_t b_vals = vcvtq_f32_s32(vmovl_s16(b_ints));
+ b_vals = vaddq_f32(zeros[i], vmulq_f32(scales[i], b_vals));
+ c_val[i] = vfmaq_f32(c_val[i], b_vals, a_val);
+ });
+ }
+ c10::ForcedUnroll<4>{}([&](auto i) {
+ vst1_f16(reinterpret_cast<float16_t*>(C + m * ldc + n + i * 4), vcvt_f16_f32(c_val[i]));
+ });
+ }
+ }
+}
+#endif
+
inline float convert_int4_to_float(uint8_t a, bool is_even) {
static constexpr float lut[16] = {
-8.0f, -7.0f, -6.0f, -5.0f,
|
2.41.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.